id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7 values |
|---|---|---|
/Flask-Storage-Helpers-0.1.5.tar.gz/Flask-Storage-Helpers-0.1.5/flask_storage_helpers/cloudfiles.py | from __future__ import absolute_import
import mimetypes
import cloudfiles
from cloudfiles.errors import NoSuchObject, ResponseError, NoSuchContainer
from flask import current_app, request
from werkzeug.utils import cached_property
from .base import Storage, StorageFile, reraise
__all__ = ('CloudFilesStorage',)
class CloudFilesStorage(Storage):
def __init__(self,
folder_name=None,
username=None,
api_key=None,
timeout=None):
"""
Initialize the settings for the connection and container.
"""
self.username = username or current_app.config.get(
'CLOUDFILES_USERNAME', None)
self.api_key = api_key or current_app.config.get(
'CLOUDFILES_API_KEY', None)
self.container_name = folder_name or \
current_app.config.get('CLOUDFILES_CONTAINER', None)
self.timeout = timeout or current_app.config.get(
'CLOUDFILES_TIMEOUT', 5)
self.use_servicenet = current_app.config.get(
'CLOUDFILES_SERVICENET', False)
self.auto_create_container = current_app.config.get(
'CLOUDFILES_AUTO_CREATE_CONTAINER', False)
self.secure_uris = current_app.config.get(
'CLOUDFILES_SECURE_URIS', False)
@property
def folder_name(self):
return self.container_name
@property
def folder(self):
return self.container
@cached_property
def connection(self):
return cloudfiles.get_connection(
username=self.username,
api_key=self.api_key,
timeout=self.timeout,
servicenet=self.use_servicenet
)
@property
def container(self):
if not hasattr(self, '_container'):
self._container = self._get_or_create_container(self.container_name)
if not self._container.is_public():
self._container.make_public()
return self._container
@cached_property
def container_url(self):
container_uris = current_app.config.get(
'CLOUDFILES_CONTAINER_URIS', {})
if self.container_name in container_uris:
return container_uris[self.container_name]
if self.secure_uris or request.is_secure:
return self.container.public_ssl_uri()
else:
return self.container.public_uri()
def _get_or_create_container(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_container(name)
except NoSuchContainer:
if self.auto_create_container:
return self.connection.create_container(name)
else:
raise RuntimeError(
"Container specified by "
"CLOUDFILES_BUCKET_NAME does not exist. "
"Containers can be automatically created by setting "
"CLOUDFILES_AUTO_CREATE_CONTAINER=True")
def _save(self, name, content):
"""
Use the Cloud Files service to write a `werkzeug.FileStorage`
(called ``file``) to a remote file (called ``name``).
"""
cloud_obj = self.container.create_object(name)
mimetype, _ = mimetypes.guess_type(name)
cloud_obj.content_type = mimetype
cloud_obj.send(content)
return self.open(name)
def _open(self, name, mode='rb'):
return self.file_class(self, name)
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
try:
self.container.delete_object(name)
except ResponseError, e:
reraise(e)
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in
the storage system, or False if the name is available for a new file.
"""
try:
self.container.get_object(name)
return True
except NoSuchObject:
return False
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a web browser.
"""
return '%s/%s' % (self.container_url, name)
def get_object(self, name):
try:
return self.container.get_object(name)
except NoSuchObject, e:
reraise(e)
except ResponseError, e:
reraise(e)
@property
def file_class(self):
return CloudFilesStorageFile
class CloudFilesStorageFile(StorageFile):
_file = None
def __init__(self, storage, name=None, prefix=''):
self._storage = storage
self.prefix = prefix
if name is not None:
self.name = name
if self._name:
self.file
self._pos = 0
@property
def file(self):
if not self._file:
self._file = self._storage.get_object(self.name)
return self._file
def read(self, size=-1, **kw):
kw['offset'] = self._pos
data = self.file.read(size, **kw)
self._pos += len(data)
return data | PypiClean |
/Circuitscape-4.0.5.tar.gz/Circuitscape-4.0.5/circuitscape/cfg.py | import ConfigParser, os, copy, ast, codecs
class CSConfig:
"""Represents a Circuitscape configuration object"""
FILE_PATH_PROPS = ['polygon_file', 'source_file', 'ground_file', 'mask_file', 'output_file', 'habitat_file', 'point_file', 'reclass_file']
DEFAULTS = {
'Version': {
'version': 'unknown'
},
'Connection scheme for raster habitat data': {
'connect_four_neighbors_only': False,
'connect_using_avg_resistances': False,
},
'Short circuit regions (aka polygons)': {
'use_polygons': False,
'polygon_file': '(Browse for a short-circuit region file)'
},
'Options for advanced mode': {
'source_file': '(Browse for a current source file)',
'ground_file': '(Browse for a ground point file)',
'ground_file_is_resistances': True,
'use_unit_currents': False,
'use_direct_grounds': False,
'remove_src_or_gnd': 'keepall'
},
'Mask file': {
'mask_file': 'None',
'use_mask': False
},
'Calculation options': {
'preemptive_memory_release': False,
'low_memory_mode': False,
'parallelize': False, # can parallelize if True. It may be overridden internally to False based on os support and suitability of the problem.
'max_parallel': 0, # passing 0 results in using all available cpus
'print_timings': False,
'print_rusages': False,
'solver': 'cg+amg'
},
'Options for one-to-all and all-to-one modes': {
'use_variable_source_strengths': False,
'variable_source_file': 'None'
},
'Output options': {
'set_null_currents_to_nodata': False,
'output_file': '(Choose a base name for output files)',
'write_cum_cur_map_only': False,
'log_transform_maps': False,
'write_max_cur_maps': False,
'compress_grids': False,
'set_null_voltages_to_nodata': False,
'set_focal_node_currents_to_zero': False,
'write_volt_maps': False,
'write_cur_maps': False
},
'Habitat raster or graph': {
'habitat_map_is_resistances': True,
'habitat_file': '(Browse for a resistance file)'
},
'Circuitscape mode': {
'scenario': 'not entered',
'data_type': 'raster'
},
'Options for pairwise and one-to-all and all-to-one modes': {
'use_included_pairs': False,
'included_pairs_file': '(Browse for a file with pairs to include or exclude)',
'point_file': '(Browse for file with locations of focal points or regions)'
},
'Options for reclassification of habitat data': {
'use_reclass_table': False,
'reclass_file': '(Browse for file with reclassification data)'
},
'Logging Options': {
'profiler_log_file': None, # file to log timing and rusage profiling results
'log_file': None, # file to log regular log messages
'log_level': 'INFO', # one of FATAL, ERROR, WARN, INFO, DEBUG
'screenprint_log': False # whether to print logs to console (stdout)
}
}
CHECKS_AND_MESSAGES = {
'scenario': 'Please choose a scenario',
'habitat_file': 'Please choose a resistance file',
'output_file': 'Please choose an output file name',
'point_file': 'Please choose a focal node file',
'source_file': 'Please enter a current source file',
'ground_file': 'Ground point file does not exist!',
'reclass_file': 'Please choose a file with reclassification data',
'polygon_file': 'Please enter a short-circuit region file or uncheck this option in the Options menu'
}
def __init__(self, cfgfile=None):
o = {}
for olist in CSConfig.DEFAULTS.values():
o.update(olist)
self.options = o
if None == cfgfile:
return
if not os.path.isfile(cfgfile):
raise RuntimeError('File %s does not exist'%(cfgfile,))
config = ConfigParser.ConfigParser()
try:
config.read(cfgfile)
except:
# try again with utf8 bom markers
with codecs.open(cfgfile, 'r', encoding='utf_8_sig') as fp:
config.readfp(fp)
for section in config.sections():
for item in config.items(section):
try:
self.options[item[0]] = ast.literal_eval(item[1])
except:
self.options[item[0]] = item[1]
def as_dict(self, rel_to_abs=None):
result = {}
for section in CSConfig.DEFAULTS.keys():
for option in CSConfig.DEFAULTS[section].keys():
val = self.options[option]
if option in CSConfig.FILE_PATH_PROPS:
if (val == None) or (val == CSConfig.DEFAULTS[section][option]):
val = ''
elif (not os.path.isabs(val)) and (rel_to_abs != None):
val = os.path.join(rel_to_abs, val)
result[option] = val
return result
def are_all_paths_relative(self):
defaults = {}
for olist in CSConfig.DEFAULTS.values():
defaults.update(olist)
for name in CSConfig.FILE_PATH_PROPS:
if not ((name in self.options) and (self.options[name] != defaults[name]) and (self.options[name] != None)):
continue
if os.path.isabs(self.options[name]):
return False
return True
def write(self, cfg_filename, is_filename_template=False):
if is_filename_template:
out_base, _out_extn = os.path.splitext(cfg_filename)
cfg_filename = out_base + '.ini'
out_dir = os.path.split(cfg_filename)[0]
if (len(out_dir) > 0) and (not os.path.isdir(out_dir)):
try:
os.makedirs(out_dir)
except:
raise RuntimeError('Cannot create output directory: ' + out_dir + '.')
config = ConfigParser.ConfigParser()
for section in CSConfig.DEFAULTS.keys():
config.add_section(section)
for option in CSConfig.DEFAULTS[section].keys():
config.set(section, option, self.options[option])
with open(cfg_filename, 'w') as f:
config.write(f)
def check(self):
"""Checks to make sure sufficient options are passed to Circuitscape to complete a run."""
defaults = {}
for olist in CSConfig.DEFAULTS.values():
defaults.update(olist)
# get all the checks to be done
checks = copy.copy(CSConfig.CHECKS_AND_MESSAGES)
# remove checks that are not required
if self.options['scenario'] not in ['pairwise', 'one-to-all']:
del checks['point_file']
if self.options['scenario'] != 'advanced':
for key in ['source_file', 'ground_file']:#, 'ground_file_is_resistances']:
del checks[key]
if self.options['use_polygons'] == False:
del checks['polygon_file']
if self.options['use_reclass_table'] == False:
del checks['reclass_file']
# check if values have been entered for the options
for name in checks.keys():
if self.options[name] == defaults[name]:
return False,checks[name]
return True,'None'
def __getattr__(self, name):
return self.options[name]
def __setattr__(self, name, value):
if name == 'options':
self.__dict__[name] = value
else:
self.options[name] = value
return value | PypiClean |
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_198512.py | from __future__ import unicode_literals
name = '198512'
division_schema = {
'110000': '北京市',
'110101': '东城区',
'110102': '西城区',
'110103': '崇文区',
'110104': '宣武区',
'110105': '朝阳区',
'110106': '丰台区',
'110107': '石景山区',
'110108': '海淀区',
'110109': '门头沟区',
'110110': '燕山区',
'110221': '昌平县',
'110222': '顺义县',
'110223': '通县',
'110224': '大兴县',
'110225': '房山县',
'110226': '平谷县',
'110227': '怀柔县',
'110228': '密云县',
'110229': '延庆县',
'120000': '天津市',
'120101': '和平区',
'120102': '河东区',
'120103': '河西区',
'120104': '南开区',
'120105': '河北区',
'120106': '红桥区',
'120107': '塘沽区',
'120108': '汉沽区',
'120109': '大港区',
'120110': '东郊区',
'120111': '西郊区',
'120112': '南郊区',
'120113': '北郊区',
'120221': '宁河县',
'120222': '武清县',
'120223': '静海县',
'120224': '宝坻县',
'120225': '蓟县',
'130000': '河北省',
'130100': '石家庄市',
'130102': '长安区',
'130103': '桥东区',
'130104': '桥西区',
'130105': '新华区',
'130106': '郊区',
'130107': '井陉矿区',
'130121': '井陉县',
'130122': '获鹿县',
'130200': '唐山市',
'130202': '路南区',
'130203': '路北区',
'130204': '东矿区',
'130205': '开平区',
'130206': '新区',
'130221': '丰润县',
'130222': '丰南县',
'130223': '滦县',
'130224': '滦南县',
'130225': '乐亭县',
'130226': '迁安县',
'130227': '迁西县',
'130228': '遵化县',
'130229': '玉田县',
'130230': '唐海县',
'130300': '秦皇岛市',
'130302': '海港区',
'130303': '山海关区',
'130304': '北戴河区',
'130321': '青龙县',
'130322': '昌黎县',
'130323': '抚宁县',
'130324': '卢龙县',
'130400': '邯郸市',
'130402': '邯山区',
'130403': '丛台区',
'130404': '复兴区',
'130405': '郊区',
'130406': '峰峰矿区',
'130421': '邯郸县',
'130500': '邢台市',
'130502': '桥东区',
'130503': '桥西区',
'130504': '郊区',
'130600': '保定市',
'130602': '新市区',
'130603': '北市区',
'130604': '南市区',
'130605': '郊区',
'130621': '满城县',
'130700': '张家口市',
'130702': '桥东区',
'130703': '桥西区',
'130704': '茶坊区',
'130705': '宣化区',
'130706': '下花园区',
'130707': '庞家堡区',
'130721': '宣化县',
'130800': '承德市',
'130802': '双桥区',
'130803': '双滦区',
'130804': '鹰手营子矿区',
'130821': '承德县',
'130900': '沧州市',
'130902': '新华区',
'130903': '运河区',
'130904': '郊区',
'130921': '沧县',
'132100': '邯郸地区',
'132121': '大名县',
'132122': '魏县',
'132123': '曲周县',
'132124': '丘县',
'132125': '鸡泽县',
'132126': '肥乡县',
'132127': '广平县',
'132128': '成安县',
'132129': '临漳县',
'132130': '磁县',
'132131': '武安县',
'132132': '涉县',
'132133': '永年县',
'132135': '馆陶县',
'132200': '邢台地区',
'132221': '邢台县',
'132222': '沙河县',
'132223': '临城县',
'132224': '内丘县',
'132225': '柏乡县',
'132226': '隆尧县',
'132227': '任县',
'132228': '南和县',
'132229': '宁晋县',
'132230': '南宫县',
'132231': '巨鹿县',
'132232': '新河县',
'132233': '广宗县',
'132234': '平乡县',
'132235': '威县',
'132236': '清河县',
'132237': '临西县',
'132300': '石家庄地区',
'132321': '束鹿县',
'132322': '晋县',
'132323': '深泽县',
'132324': '无极县',
'132325': '藁城县',
'132326': '赵县',
'132327': '栾城县',
'132328': '正定县',
'132329': '新乐县',
'132330': '高邑县',
'132331': '元氏县',
'132332': '赞皇县',
'132335': '平山县',
'132336': '灵寿县',
'132337': '行唐县',
'132400': '保定地区',
'132421': '易县',
'132423': '徐水县',
'132424': '涞源县',
'132425': '定兴县',
'132426': '完县',
'132427': '唐县',
'132428': '望都县',
'132429': '涞水县',
'132430': '涿县',
'132431': '清苑县',
'132432': '高阳县',
'132433': '安新县',
'132434': '雄县',
'132435': '容城县',
'132436': '新城县',
'132437': '曲阳县',
'132438': '阜平县',
'132439': '定县',
'132440': '安国县',
'132441': '博野县',
'132442': '蠡县',
'132500': '张家口地区',
'132521': '张北县',
'132522': '康保县',
'132523': '沽源县',
'132524': '尚义县',
'132525': '蔚县',
'132526': '阳原县',
'132527': '怀安县',
'132528': '万全县',
'132529': '怀来县',
'132530': '涿鹿县',
'132532': '赤城县',
'132533': '崇礼县',
'132600': '承德地区',
'132622': '宽城县',
'132623': '兴隆县',
'132624': '平泉县',
'132626': '滦平县',
'132627': '丰宁县',
'132628': '隆化县',
'132629': '围场县',
'132800': '廊坊地区',
'132801': '廊坊市',
'132821': '三河县',
'132822': '大厂回族自治县',
'132823': '香河县',
'132825': '永清县',
'132826': '固安县',
'132827': '霸县',
'132828': '文安县',
'132829': '大城县',
'132900': '沧州地区',
'132902': '泊头市',
'132922': '河间县',
'132923': '肃宁县',
'132924': '献县',
'132926': '吴桥县',
'132927': '东光县',
'132928': '南皮县',
'132929': '盐山县',
'132930': '黄骅县',
'132931': '孟村回族自治县',
'132932': '青县',
'132933': '任丘县',
'132934': '海兴县',
'133000': '衡水地区',
'133001': '衡水市',
'133022': '冀县',
'133023': '枣强县',
'133024': '武邑县',
'133025': '深县',
'133026': '武强县',
'133027': '饶阳县',
'133028': '安平县',
'133029': '故城县',
'133030': '景县',
'133031': '阜城县',
'140000': '山西省',
'140100': '太原市',
'140102': '南城区',
'140103': '北城区',
'140104': '河西区',
'140111': '古交工矿区',
'140112': '南郊区',
'140113': '北郊区',
'140121': '清徐县',
'140122': '阳曲县',
'140123': '娄烦县',
'140200': '大同市',
'140202': '城区',
'140203': '矿区',
'140211': '南郊区',
'140212': '新荣区',
'140300': '阳泉市',
'140302': '城区',
'140303': '矿区',
'140311': '郊区',
'140321': '平定县',
'140322': '盂县',
'140400': '长治市',
'140402': '城区',
'140411': '郊区',
'140421': '长治县',
'140422': '潞城县',
'140423': '襄垣县',
'140424': '屯留县',
'140425': '平顺县',
'140426': '黎城县',
'140427': '壶关县',
'140428': '长子县',
'140429': '武乡县',
'140430': '沁县',
'140431': '沁源县',
'140500': '晋城市',
'140502': '城区',
'140503': '郊区',
'140521': '沁水县',
'140522': '阳城县',
'140523': '高平县',
'140524': '陵川县',
'142100': '雁北地区',
'142121': '阳高县',
'142122': '天镇县',
'142123': '广灵县',
'142124': '灵丘县',
'142125': '浑源县',
'142126': '应县',
'142127': '山阴县',
'142128': '朔县',
'142129': '平鲁县',
'142130': '左云县',
'142131': '右玉县',
'142132': '大同县',
'142133': '怀仁县',
'142200': '忻州地区',
'142201': '忻州市',
'142222': '定襄县',
'142223': '五台县',
'142224': '原平县',
'142225': '代县',
'142226': '繁峙县',
'142227': '宁武县',
'142228': '静乐县',
'142229': '神池县',
'142230': '五寨县',
'142231': '岢岚县',
'142232': '河曲县',
'142233': '保德县',
'142234': '偏关县',
'142300': '吕梁地区',
'142321': '汾阳县',
'142322': '文水县',
'142323': '交城县',
'142324': '孝义县',
'142325': '兴县',
'142326': '临县',
'142327': '柳林县',
'142328': '石楼县',
'142329': '岚县',
'142330': '方山县',
'142331': '离石县',
'142332': '中阳县',
'142333': '交口县',
'142400': '晋中地区',
'142401': '榆次市',
'142421': '榆社县',
'142422': '左权县',
'142423': '和顺县',
'142424': '昔阳县',
'142427': '寿阳县',
'142429': '太谷县',
'142430': '祁县',
'142431': '平遥县',
'142432': '介休县',
'142433': '灵石县',
'142600': '临汾地区',
'142601': '临汾市',
'142602': '侯马市',
'142621': '曲沃县',
'142622': '翼城县',
'142623': '襄汾县',
'142625': '洪洞县',
'142626': '霍县',
'142627': '古县',
'142628': '安泽县',
'142629': '浮山县',
'142630': '吉县',
'142631': '乡宁县',
'142632': '蒲县',
'142633': '大宁县',
'142634': '永和县',
'142635': '隰县',
'142636': '汾西县',
'142700': '运城地区',
'142701': '运城市',
'142722': '永济县',
'142723': '芮城县',
'142724': '临猗县',
'142725': '万荣县',
'142726': '新绛县',
'142727': '稷山县',
'142728': '河津县',
'142729': '闻喜县',
'142730': '夏县',
'142731': '绛县',
'142732': '平陆县',
'142733': '垣曲县',
'150000': '内蒙古自治区',
'150100': '呼和浩特市',
'150102': '新城区',
'150103': '回民区',
'150104': '玉泉区',
'150105': '郊区',
'150121': '土默特左旗',
'150122': '托克托县',
'150200': '包头市',
'150202': '东河区',
'150203': '昆都仑区',
'150204': '青山区',
'150205': '石拐矿区',
'150206': '白云矿区',
'150207': '郊区',
'150221': '土默特右旗',
'150222': '固阳县',
'150300': '乌海市',
'150302': '海勃湾区',
'150303': '海南区',
'150304': '乌达区',
'150400': '赤峰市',
'150402': '红山区',
'150403': '元宝山区',
'150404': '郊区',
'150421': '阿鲁科尔沁旗',
'150422': '巴林左旗',
'150423': '巴林右旗',
'150424': '林西县',
'150425': '克什克腾旗',
'150426': '翁牛特旗',
'150428': '喀喇沁旗',
'150429': '宁城县',
'150430': '敖汉旗',
'152100': '呼伦贝尔盟',
'152101': '海拉尔市',
'152102': '满洲里市',
'152103': '扎兰屯市',
'152104': '牙克石市',
'152122': '阿荣旗',
'152123': '莫力达瓦达斡尔族自治旗',
'152125': '额尔古纳右旗',
'152126': '额尔古纳左旗',
'152127': '鄂伦春自治旗',
'152128': '鄂温克族自治旗',
'152129': '新巴尔虎右旗',
'152130': '新巴尔虎左旗',
'152131': '陈巴尔虎旗',
'152200': '兴安盟',
'152201': '乌兰浩特市',
'152221': '科尔沁右翼前旗',
'152222': '科尔沁右翼中旗',
'152223': '扎赉特旗',
'152224': '突泉县',
'152300': '哲里木盟',
'152301': '通辽市',
'152302': '霍林郭勒市',
'152321': '通辽县',
'152322': '科尔沁左翼中旗',
'152323': '科尔沁左翼后旗',
'152324': '开鲁县',
'152325': '库伦旗',
'152326': '奈曼旗',
'152327': '扎鲁特旗',
'152500': '锡林郭勒盟',
'152501': '二连浩特市',
'152502': '锡林浩特市',
'152522': '阿巴嘎旗',
'152523': '苏尼特左旗',
'152524': '苏尼特右旗',
'152525': '东乌珠穆沁旗',
'152526': '西乌珠穆沁旗',
'152527': '太仆寺旗',
'152528': '镶黄旗',
'152529': '正镶白旗',
'152530': '正蓝旗',
'152531': '多伦县',
'152600': '乌兰察布盟',
'152601': '集宁市',
'152621': '武川县',
'152622': '和林格尔县',
'152623': '清水河县',
'152624': '卓资县',
'152625': '化德县',
'152626': '商都县',
'152627': '兴和县',
'152628': '丰镇县',
'152629': '凉城县',
'152630': '察哈尔右翼前旗',
'152631': '察哈尔右翼中旗',
'152632': '察哈尔右翼后旗',
'152633': '达尔罕茂明安联合旗',
'152634': '四子王旗',
'152700': '伊克昭盟',
'152701': '东胜市',
'152722': '达拉特旗',
'152723': '准格尔旗',
'152724': '鄂托克前旗',
'152725': '鄂托克旗',
'152726': '杭锦旗',
'152727': '乌审旗',
'152728': '伊金霍洛旗',
'152800': '巴彦淖尔盟',
'152801': '临河市',
'152822': '五原县',
'152823': '磴口县',
'152824': '乌拉特前旗',
'152825': '乌拉特中旗',
'152826': '乌拉特后旗',
'152827': '杭锦后旗',
'152900': '阿拉善盟',
'152921': '阿拉善左旗',
'152922': '阿拉善右旗',
'152923': '额济纳旗',
'210000': '辽宁省',
'210100': '沈阳市',
'210102': '和平区',
'210103': '沈河区',
'210104': '大东区',
'210105': '皇姑区',
'210106': '铁西区',
'210111': '苏家屯区',
'210112': '东陵区',
'210113': '新城子区',
'210114': '于洪区',
'210121': '新民县',
'210122': '辽中县',
'210200': '大连市',
'210202': '中山区',
'210203': '西岗区',
'210204': '沙河口区',
'210211': '甘井子区',
'210212': '旅顺口区',
'210219': '瓦房店市',
'210221': '金县',
'210222': '新金县',
'210224': '长海县',
'210225': '庄河县',
'210300': '鞍山市',
'210302': '铁东区',
'210303': '铁西区',
'210304': '立山区',
'210311': '旧堡区',
'210319': '海城市',
'210321': '台安县',
'210400': '抚顺市',
'210402': '新抚区',
'210403': '露天区',
'210404': '望花区',
'210411': '郊区',
'210421': '抚顺县',
'210422': '新宾满族自治县',
'210423': '清原县',
'210500': '本溪市',
'210502': '平山区',
'210503': '溪湖区',
'210504': '明山区',
'210511': '南芬区',
'210521': '本溪县',
'210522': '桓仁县',
'210600': '丹东市',
'210602': '元宝区',
'210603': '振兴区',
'210604': '振安区',
'210621': '凤城满族自治县',
'210622': '岫岩满族自治县',
'210623': '东沟县',
'210624': '宽甸县',
'210700': '锦州市',
'210702': '古塔区',
'210703': '凌河区',
'210704': '南票区',
'210705': '葫芦岛区',
'210711': '太和区',
'210719': '锦西市',
'210722': '兴城县',
'210723': '绥中县',
'210724': '锦县',
'210725': '北镇县',
'210726': '黑山县',
'210727': '义县',
'210800': '营口市',
'210802': '站前区',
'210803': '西市区',
'210804': '鲅鱼圈区',
'210811': '老边区',
'210821': '营口县',
'210824': '盖县',
'210900': '阜新市',
'210902': '海州区',
'210903': '新邱区',
'210904': '太平区',
'210905': '清河门区',
'210911': '细河区',
'210921': '阜新蒙古族自治县',
'210922': '彰武县',
'211000': '辽阳市',
'211002': '白塔区',
'211003': '文圣区',
'211004': '宏伟区',
'211005': '弓长岭区',
'211011': '太子河区',
'211021': '辽阳县',
'211022': '灯塔县',
'211100': '盘锦市',
'211102': '盘山区',
'211103': '兴隆台区',
'211111': '郊区',
'211121': '大洼县',
'211200': '铁岭市',
'211202': '银州区',
'211203': '铁法区',
'211204': '清河区',
'211221': '铁岭县',
'211222': '开原县',
'211223': '西丰县',
'211224': '昌图县',
'211225': '康平县',
'211226': '法库县',
'211300': '朝阳市',
'211302': '双塔区',
'211303': '龙城区',
'211319': '北票市',
'211321': '朝阳县',
'211322': '建平县',
'211323': '凌源县',
'211324': '喀喇沁左翼蒙古族自治县',
'211325': '建昌县',
'220000': '吉林省',
'220100': '长春市',
'220102': '南关区',
'220103': '宽城区',
'220104': '朝阳区',
'220105': '二道河子区',
'220111': '郊区',
'220121': '榆树县',
'220122': '农安县',
'220123': '九台县',
'220124': '德惠县',
'220125': '双阳县',
'220200': '吉林市',
'220202': '昌邑区',
'220203': '龙潭区',
'220204': '船营区',
'220211': '郊区',
'220221': '永吉县',
'220222': '舒兰县',
'220223': '磐石县',
'220224': '蛟河县',
'220225': '桦甸县',
'220300': '四平市',
'220302': '铁西区',
'220303': '铁东区',
'220319': '公主岭市',
'220322': '梨树县',
'220323': '伊通县',
'220324': '双辽县',
'220400': '辽源市',
'220402': '龙山区',
'220403': '西安区',
'220421': '东丰县',
'220422': '东辽县',
'220500': '通化市',
'220519': '梅河口市',
'220521': '通化县',
'220522': '集安县',
'220523': '辉南县',
'220524': '柳河县',
'220600': '浑江市',
'220621': '抚松县',
'220622': '靖宇县',
'220623': '长白朝鲜族自治县',
'222300': '白城地区',
'222301': '白城市',
'222321': '扶余县',
'222322': '洮安县',
'222323': '长岭县',
'222324': '前郭尔罗斯蒙古族自治县',
'222325': '大安县',
'222326': '镇赉县',
'222327': '通榆县',
'222328': '乾安县',
'222400': '延边朝鲜族自治州',
'222401': '延吉市',
'222402': '图们市',
'222403': '敦化市',
'222421': '龙井县',
'222423': '和龙县',
'222424': '汪清县',
'222425': '珲春县',
'222426': '安图县',
'230000': '黑龙江省',
'230100': '哈尔滨市',
'230102': '道里区',
'230103': '南岗区',
'230104': '道外区',
'230105': '太平区',
'230106': '香坊区',
'230107': '动力区',
'230108': '平房区',
'230121': '呼兰县',
'230122': '阿城县',
'230200': '齐齐哈尔市',
'230202': '龙沙区',
'230203': '建华区',
'230204': '铁锋区',
'230205': '昂昂溪区',
'230206': '富拉尔基区',
'230207': '碾子山区',
'230208': '梅里斯区',
'230221': '龙江县',
'230222': '讷河县',
'230223': '依安县',
'230224': '泰来县',
'230225': '甘南县',
'230226': '杜尔伯特蒙古族自治县',
'230227': '富裕县',
'230228': '林甸县',
'230229': '克山县',
'230230': '克东县',
'230231': '拜泉县',
'230300': '鸡西市',
'230302': '鸡冠区',
'230303': '恒山区',
'230304': '滴道区',
'230305': '梨树区',
'230306': '城子河区',
'230307': '麻山区',
'230321': '鸡东县',
'230400': '鹤岗市',
'230402': '向阳区',
'230403': '工农区',
'230404': '南山区',
'230405': '兴安区',
'230406': '东山区',
'230407': '兴山区',
'230500': '双鸭山市',
'230502': '尖山区',
'230503': '岭东区',
'230504': '岭西区',
'230505': '四方台区',
'230506': '宝山区',
'230600': '大庆市',
'230602': '萨尔图区',
'230603': '龙凤区',
'230604': '让胡路区',
'230605': '红岗区',
'230606': '大同区',
'230700': '伊春市',
'230702': '伊春区',
'230703': '南岔区',
'230704': '友好区',
'230705': '西林区',
'230706': '翠峦区',
'230707': '新青区',
'230708': '美溪区',
'230709': '金山屯区',
'230710': '五营区',
'230711': '乌马河区',
'230712': '汤旺河区',
'230713': '带岭区',
'230714': '乌伊岭区',
'230715': '红星区',
'230716': '上甘岭区',
'230721': '铁力县',
'230722': '嘉荫县',
'230800': '佳木斯市',
'230802': '永红区',
'230803': '向阳区',
'230804': '前进区',
'230805': '东风区',
'230811': '郊区',
'230821': '富锦县',
'230822': '桦南县',
'230823': '依兰县',
'230824': '友谊县',
'230825': '集贤县',
'230826': '桦川县',
'230827': '宝清县',
'230828': '汤原县',
'230829': '绥滨县',
'230830': '萝北县',
'230831': '同江县',
'230832': '饶河县',
'230833': '抚远县',
'230900': '七台河市',
'230902': '新兴区',
'230903': '桃山区',
'230904': '茄子河区',
'230921': '勃利县',
'231000': '牡丹江市',
'231002': '东安区',
'231003': '阳明区',
'231004': '爱民区',
'231005': '西安区',
'231011': '郊区',
'231120': '绥芬河市',
'231021': '宁安县',
'231022': '海林县',
'231023': '穆棱县',
'231024': '东宁县',
'231025': '林口县',
'231026': '密山县',
'231027': '虎林县',
'232100': '松花江地区',
'232122': '宾县',
'232124': '双城县',
'232125': '五常县',
'232126': '巴彦县',
'232127': '木兰县',
'232128': '通河县',
'232129': '尚志县',
'232130': '方正县',
'232131': '延寿县',
'232300': '绥化地区',
'232301': '绥化市',
'232302': '安达市',
'232321': '海伦县',
'232322': '肇东县',
'232324': '望奎县',
'232325': '兰西县',
'232326': '青冈县',
'232328': '肇源县',
'232329': '肇州县',
'232330': '庆安县',
'232331': '明水县',
'232332': '绥棱县',
'232600': '黑河地区',
'232601': '黑河市',
'232602': '北安市',
'232603': '五大连池市',
'232622': '嫩江县',
'232623': '德都县',
'232625': '逊克县',
'232626': '孙吴县',
'232700': '大兴安岭地区',
'232721': '呼玛县',
'232722': '塔河县',
'232723': '漠河县',
'310000': '上海市',
'310101': '黄浦区',
'310102': '南市区',
'310103': '卢湾区',
'310104': '徐汇区',
'310105': '长宁区',
'310106': '静安区',
'310107': '普陀区',
'310108': '闸北区',
'310109': '虹口区',
'310110': '杨浦区',
'310111': '吴淞区',
'310112': '闵行区',
'310221': '上海县',
'310222': '嘉定县',
'310223': '宝山县',
'310224': '川沙县',
'310225': '南汇县',
'310226': '奉贤县',
'310227': '松江县',
'310228': '金山县',
'310229': '青浦县',
'310230': '崇明县',
'320000': '江苏省',
'320100': '南京市',
'320102': '玄武区',
'320103': '白下区',
'320104': '秦淮区',
'320105': '建邺区',
'320106': '鼓楼区',
'320107': '下关区',
'320111': '浦口区',
'320112': '大厂区',
'320113': '栖霞区',
'320114': '雨花台区',
'320121': '江宁县',
'320122': '江浦县',
'320123': '六合县',
'320124': '溧水县',
'320125': '高淳县',
'320200': '无锡市',
'320202': '崇安区',
'320203': '南长区',
'320204': '北塘区',
'320211': '郊区',
'320221': '江阴县',
'320222': '无锡县',
'320223': '宜兴县',
'320300': '徐州市',
'320302': '鼓楼区',
'320303': '云龙区',
'320304': '矿区',
'320305': '贾汪区',
'320311': '郊区',
'320321': '丰县',
'320322': '沛县',
'320323': '铜山县',
'320324': '睢宁县',
'320325': '邳县',
'320326': '新沂县',
'320400': '常州市',
'320402': '天宁区',
'320403': '广化区',
'320404': '钟楼区',
'320405': '戚墅堰区',
'320411': '郊区',
'320421': '武进县',
'320422': '金坛县',
'320423': '溧阳县',
'320500': '苏州市',
'320502': '沧浪区',
'320503': '平江区',
'320504': '金阊区',
'320511': '郊区',
'320521': '沙洲县',
'320522': '太仓县',
'320523': '昆山县',
'320524': '吴县',
'320525': '吴江县',
'320600': '南通市',
'320602': '城区',
'320611': '郊区',
'320621': '海安县',
'320622': '如皋县',
'320623': '如东县',
'320624': '南通县',
'320625': '海门县',
'320626': '启东县',
'320700': '连云港市',
'320702': '新海区',
'320703': '连云区',
'320704': '云台区',
'320721': '赣榆县',
'320722': '东海县',
'320723': '灌云县',
'320800': '淮阴市',
'320802': '清河区',
'320811': '清浦区',
'320821': '淮阴县',
'320822': '灌南县',
'320823': '沭阳县',
'320824': '宿迁县',
'320825': '泗阳县',
'320826': '涟水县',
'320827': '泗洪县',
'320828': '淮安县',
'320829': '洪泽县',
'320830': '盱眙县',
'320831': '金湖县',
'320900': '盐城市',
'320902': '城区',
'320911': '郊区',
'320921': '响水县',
'320922': '滨海县',
'320923': '阜宁县',
'320924': '射阳县',
'320925': '建湖县',
'320926': '大丰县',
'320927': '东台县',
'321000': '扬州市',
'321002': '广陵区',
'321011': '郊区',
'321021': '兴化县',
'321022': '高邮县',
'321023': '宝应县',
'321024': '靖江县',
'321025': '泰兴县',
'321026': '江都县',
'321027': '邗江县',
'321028': '泰县',
'321029': '仪征县',
'321100': '镇江市',
'321102': '京口区',
'321111': '润州区',
'321121': '丹徒县',
'321122': '丹阳县',
'321123': '句容县',
'321124': '扬中县',
'329001': '泰州市',
'329003': '常熟市',
'330000': '浙江省',
'330100': '杭州市',
'330102': '上城区',
'330103': '下城区',
'330104': '江干区',
'330105': '拱墅区',
'330106': '西湖区',
'330107': '半山区',
'330121': '萧山县',
'330122': '桐庐县',
'330123': '富阳县',
'330124': '临安县',
'330125': '余杭县',
'330126': '建德县',
'330127': '淳安县',
'330200': '宁波市',
'330203': '海曙区',
'330204': '江东区',
'330205': '江北区',
'330206': '滨海区',
'330211': '镇海区',
'330219': '余姚市',
'330222': '慈溪县',
'330224': '奉化县',
'330225': '象山县',
'330226': '宁海县',
'330227': '鄞县',
'330300': '温州市',
'330302': '鹿城区',
'330303': '龙湾区',
'330321': '瓯海县',
'330322': '洞头县',
'330323': '乐清县',
'330324': '永嘉县',
'330325': '瑞安县',
'330326': '平阳县',
'330327': '苍南县',
'330328': '文成县',
'330329': '泰顺县',
'330400': '嘉兴市',
'330402': '城区',
'330411': '郊区',
'330421': '嘉善县',
'330422': '平湖县',
'330423': '海宁县',
'330424': '海盐县',
'330425': '桐乡县',
'330500': '湖州市',
'330502': '城区',
'330511': '郊区',
'330521': '德清县',
'330522': '长兴县',
'330523': '安吉县',
'330600': '绍兴市',
'330602': '越城区',
'330621': '绍兴县',
'330622': '上虞县',
'330623': '嵊县',
'330624': '新昌县',
'330625': '诸暨县',
'330700': '金华市',
'330702': '婺城区',
'330701': '兰溪市',
'330721': '金华县',
'330722': '永康县',
'330723': '武义县',
'330724': '东阳县',
'330725': '义乌县',
'330726': '浦江县',
'330727': '磐安县',
'330800': '衢州市',
'330802': '柯城区',
'330821': '衢县',
'330822': '常山县',
'330823': '江山县',
'330824': '开化县',
'330825': '龙游县',
'332500': '丽水地区',
'332521': '丽水县',
'332522': '青田县',
'332523': '云和县',
'332524': '龙泉县',
'332525': '庆元县',
'332526': '缙云县',
'332527': '遂昌县',
'332528': '松阳县',
'332529': '景宁畲族自治县',
'332600': '台州地区',
'332601': '椒江市',
'332621': '临海县',
'332622': '黄岩县',
'332623': '温岭县',
'332624': '仙居县',
'332625': '天台县',
'332626': '三门县',
'332627': '玉环县',
'332700': '舟山地区',
'332721': '定海县',
'332722': '普陀县',
'332723': '岱山县',
'332724': '嵊泗县',
'340000': '安徽省',
'340100': '合肥市',
'340102': '东市区',
'340103': '中市区',
'340104': '西市区',
'340111': '郊区',
'340121': '长丰县',
'340122': '肥东县',
'340123': '肥西县',
'340200': '芜湖市',
'340202': '镜湖区',
'340203': '马塘区',
'340204': '新芜区',
'340205': '裕溪口区',
'340206': '四褐山区',
'340211': '郊区',
'340221': '芜湖县',
'340222': '繁昌县',
'340223': '南陵县',
'340224': '青阳县',
'340300': '蚌埠市',
'340302': '东市区',
'340303': '中市区',
'340304': '西市区',
'340311': '郊区',
'340321': '怀远县',
'340322': '五河县',
'340323': '固镇县',
'340400': '淮南市',
'340402': '大通区',
'340403': '田家庵区',
'340404': '谢家集区',
'340405': '八公山区',
'340406': '潘集区',
'340421': '凤台县',
'340500': '马鞍山市',
'340502': '金家庄区',
'340503': '花山区',
'340504': '雨山区',
'340505': '向山区',
'340521': '当涂县',
'340600': '淮北市',
'340602': '杜集区',
'340603': '相山区',
'340604': '烈山区',
'340621': '濉溪县',
'340700': '铜陵市',
'340702': '铜官山区',
'340703': '狮子山区',
'340704': '铜山区',
'340711': '郊区',
'340721': '铜陵县',
'340800': '安庆市',
'340802': '迎江区',
'340803': '大观区',
'340811': '郊区',
'340901': '黄山市',
'342100': '阜阳地区',
'342101': '阜阳市',
'342121': '阜阳县',
'342122': '临泉县',
'342123': '太和县',
'342124': '涡阳县',
'342125': '蒙城县',
'342126': '亳县',
'342127': '阜南县',
'342128': '颍上县',
'342129': '界首县',
'342130': '利辛县',
'342200': '宿县地区',
'342201': '宿州市',
'342221': '砀山县',
'342222': '萧县',
'342223': '宿县',
'342224': '灵璧县',
'342225': '泗县',
'342300': '滁县地区',
'342301': '滁州市',
'342321': '天长县',
'342322': '来安县',
'342324': '全椒县',
'342325': '定远县',
'342326': '凤阳县',
'342327': '嘉山县',
'342400': '六安地区',
'342401': '六安市',
'342421': '六安县',
'342422': '寿县',
'342423': '霍邱县',
'342425': '舒城县',
'342426': '金寨县',
'342427': '霍山县',
'342500': '宣城地区',
'342521': '宣城县',
'342522': '郎溪县',
'342523': '广德县',
'342524': '宁国县',
'342529': '泾县',
'342600': '巢湖地区',
'342601': '巢湖市',
'342622': '庐江县',
'342623': '无为县',
'342625': '含山县',
'342626': '和县',
'342700': '徽州地区',
'342701': '屯溪市',
'342721': '绩溪县',
'342722': '旌德县',
'342723': '歙县',
'342724': '休宁县',
'342725': '黟县',
'342726': '祁门县',
'342728': '石台县',
'342800': '安庆地区',
'342821': '怀宁县',
'342822': '桐城县',
'342823': '枞阳县',
'342824': '潜山县',
'342825': '太湖县',
'342826': '宿松县',
'342827': '望江县',
'342828': '岳西县',
'342829': '东至县',
'342830': '贵池县',
'350000': '福建省',
'350100': '福州市',
'350102': '鼓楼区',
'350103': '台江区',
'350104': '仓山区',
'350105': '马尾区',
'350111': '郊区',
'350121': '闽侯县',
'350122': '连江县',
'350123': '罗源县',
'350124': '闽清县',
'350125': '永泰县',
'350126': '长乐县',
'350127': '福清县',
'350128': '平潭县',
'350200': '厦门市',
'350202': '鼓浪屿区',
'350203': '思明区',
'350204': '开元区',
'350205': '杏林区',
'350211': '郊区',
'350221': '同安县',
'350300': '莆田市',
'350302': '城厢区',
'350303': '涵江区',
'350321': '莆田县',
'350322': '仙游县',
'350400': '三明市',
'350402': '梅列区',
'350403': '三元区',
'350420': '永安市',
'350421': '明溪县',
'350423': '清流县',
'350424': '宁化县',
'350425': '大田县',
'350426': '尤溪县',
'350427': '沙县',
'350428': '将乐县',
'350429': '泰宁县',
'350430': '建宁县',
'350500': '泉州市',
'350502': '鲤城区',
'350521': '惠安县',
'350522': '晋江县',
'350523': '南安县',
'350524': '安溪县',
'350525': '永春县',
'350526': '德化县',
'350527': '金门县',
'350600': '漳州市',
'350602': '芗城区',
'350621': '龙海县',
'350622': '云霄县',
'350623': '漳浦县',
'350624': '诏安县',
'350625': '长泰县',
'350626': '东山县',
'350627': '南靖县',
'350628': '平和县',
'350629': '华安县',
'352100': '建阳地区',
'352101': '南平市',
'352102': '邵武市',
'352121': '顺昌县',
'352122': '建阳县',
'352123': '建瓯县',
'352124': '浦城县',
'352126': '崇安县',
'352127': '光泽县',
'352128': '松溪县',
'352129': '政和县',
'352200': '宁德地区',
'352221': '宁德县',
'352224': '福鼎县',
'352225': '霞浦县',
'352226': '福安县',
'352227': '古田县',
'352228': '屏南县',
'352229': '寿宁县',
'352230': '周宁县',
'352231': '柘荣县',
'352600': '龙岩地区',
'352601': '龙岩市',
'352622': '长汀县',
'352623': '永定县',
'352624': '上杭县',
'352625': '武平县',
'352626': '漳平县',
'352627': '连城县',
'360000': '江西省',
'360100': '南昌市',
'360102': '东湖区',
'360103': '西湖区',
'360104': '青云谱区',
'360105': '湾里区',
'360111': '郊区',
'360121': '南昌县',
'360122': '新建县',
'360123': '安义县',
'360124': '进贤县',
'360200': '景德镇市',
'360202': '昌江区',
'360203': '珠山区',
'360211': '鹅湖区',
'360212': '蛟潭区',
'360221': '乐平县',
'360300': '萍乡市',
'360302': '城关区',
'360311': '上栗区',
'360312': '芦溪区',
'360313': '湘东区',
'360400': '九江市',
'360402': '庐山区',
'360403': '浔阳区',
'360421': '九江县',
'360422': '瑞昌县',
'360423': '武宁县',
'360424': '修水县',
'360425': '永修县',
'360426': '德安县',
'360427': '星子县',
'360428': '都昌县',
'360429': '湖口县',
'360430': '彭泽县',
'360500': '新余市',
'360502': '渝水区',
'360521': '分宜县',
'360600': '鹰潭市',
'360602': '月湖区',
'360621': '贵溪县',
'360622': '余江县',
'362100': '赣州地区',
'362101': '赣州市',
'362121': '赣县',
'362122': '南康县',
'362123': '信丰县',
'362124': '大余县',
'362125': '上犹县',
'362126': '崇义县',
'362127': '安远县',
'362128': '龙南县',
'362129': '定南县',
'362130': '全南县',
'362131': '宁都县',
'362132': '于都县',
'362133': '兴国县',
'362134': '瑞金县',
'362135': '会昌县',
'362136': '寻乌县',
'362137': '石城县',
'362200': '宜春地区',
'362201': '宜春市',
'362221': '丰城县',
'362222': '高安县',
'362223': '清江县',
'362226': '奉新县',
'362227': '万载县',
'362228': '上高县',
'362229': '宜丰县',
'362232': '靖安县',
'362233': '铜鼓县',
'362300': '上饶地区',
'362301': '上饶市',
'362321': '上饶县',
'362322': '广丰县',
'362323': '玉山县',
'362324': '铅山县',
'362325': '横峰县',
'362326': '弋阳县',
'362329': '余干县',
'362330': '波阳县',
'362331': '万年县',
'362333': '德兴县',
'362334': '婺源县',
'362400': '吉安地区',
'362401': '吉安市',
'362402': '井冈山市',
'362421': '吉安县',
'362422': '吉水县',
'362423': '峡江县',
'362424': '新干县',
'362425': '永丰县',
'362426': '泰和县',
'362427': '遂川县',
'362428': '万安县',
'362429': '安福县',
'362430': '永新县',
'362431': '莲花县',
'362432': '宁冈县',
'362500': '抚州地区',
'362501': '抚州市',
'362521': '临川县',
'362522': '南城县',
'362523': '黎川县',
'362524': '南丰县',
'362525': '崇仁县',
'362526': '乐安县',
'362527': '宜黄县',
'362528': '金溪县',
'362529': '资溪县',
'362531': '东乡县',
'362532': '广昌县',
'370000': '山东省',
'370100': '济南市',
'370102': '历下区',
'370103': '市中区',
'370104': '槐荫区',
'370105': '天桥区',
'370111': '郊区',
'370121': '历城县',
'370122': '章丘县',
'370123': '长清县',
'370124': '平阴县',
'370200': '青岛市',
'370202': '市南区',
'370203': '市北区',
'370204': '台东区',
'370205': '四方区',
'370206': '沧口区',
'370211': '黄岛区',
'370221': '崂山县',
'370222': '即墨县',
'370223': '胶南县',
'370224': '胶县',
'370225': '莱西县',
'370226': '平度县',
'370300': '淄博市',
'370302': '淄川区',
'370303': '张店区',
'370304': '博山区',
'370305': '临淄区',
'370306': '周村区',
'370321': '桓台县',
'370400': '枣庄市',
'370402': '市中区',
'370403': '薛城区',
'370404': '峄城区',
'370405': '台儿庄区',
'370406': '山亭区',
'370421': '滕县',
'370500': '东营市',
'370502': '东营区',
'370503': '河口区',
'370504': '牛庄区',
'370521': '垦利县',
'370522': '利津县',
'370523': '广饶县',
'370600': '烟台市',
'370602': '芝罘区',
'370611': '福山区',
'370620': '威海市',
'370622': '蓬莱县',
'370623': '黄县',
'370624': '招远县',
'370625': '掖县',
'370627': '莱阳县',
'370628': '栖霞县',
'370629': '海阳县',
'370630': '乳山县',
'370631': '牟平县',
'370632': '文登县',
'370633': '荣成县',
'370634': '长岛县',
'370700': '潍坊市',
'370702': '潍城区',
'370703': '寒亭区',
'370704': '坊子区',
'370721': '益都县',
'370722': '安丘县',
'370723': '寿光县',
'370724': '临朐县',
'370725': '昌乐县',
'370726': '昌邑县',
'370727': '高密县',
'370728': '诸城县',
'370729': '五莲县',
'370800': '济宁市',
'370802': '市中区',
'370811': '市郊区',
'370822': '兖州县',
'370823': '曲阜县',
'370825': '邹县',
'370826': '微山县',
'370827': '鱼台县',
'370828': '金乡县',
'370829': '嘉祥县',
'370830': '汶上县',
'370831': '泗水县',
'370900': '泰安市',
'370902': '泰山区',
'370911': '郊区',
'370901': '莱芜市',
'370902': '新泰市',
'370921': '宁阳县',
'370922': '肥城县',
'370923': '东平县',
'372300': '惠民地区',
'372301': '滨州市',
'372321': '惠民县',
'372322': '滨县',
'372323': '阳信县',
'372324': '无棣县',
'372325': '沾化县',
'372328': '博兴县',
'372330': '邹平县',
'372331': '高青县',
'372400': '德州地区',
'372401': '德州市',
'372421': '陵县',
'372422': '平原县',
'372423': '夏津县',
'372424': '武城县',
'372425': '齐河县',
'372426': '禹城县',
'372427': '乐陵县',
'372428': '临邑县',
'372429': '商河县',
'372430': '济阳县',
'372431': '宁津县',
'372432': '庆云县',
'372500': '聊城地区',
'372501': '聊城市',
'372502': '临清市',
'372522': '阳谷县',
'372523': '莘县',
'372524': '茌平县',
'372525': '东阿县',
'372526': '冠县',
'372527': '高唐县',
'372800': '临沂地区',
'372801': '临沂市',
'372822': '郯城县',
'372823': '苍山县',
'372824': '莒南县',
'372825': '日照县',
'372826': '莒县',
'372827': '沂水县',
'372828': '沂源县',
'372829': '蒙阴县',
'372830': '平邑县',
'372831': '费县',
'372832': '沂南县',
'372833': '临沭县',
'372900': '菏泽地区',
'372901': '菏泽市',
'372922': '曹县',
'372923': '定陶县',
'372924': '成武县',
'372925': '单县',
'372926': '巨野县',
'372927': '梁山县',
'372928': '郓城县',
'372929': '鄄城县',
'372930': '东明县',
'410000': '河南省',
'410100': '郑州市',
'410102': '中原区',
'410103': '二七区',
'410104': '管城回族区',
'410105': '金水区',
'410106': '上街区',
'410107': '新密区',
'410111': '金海区',
'410112': '郊区',
'410121': '荥阳县',
'410122': '中牟县',
'410123': '新郑县',
'410124': '巩县',
'410125': '登封县',
'410126': '密县',
'410200': '开封市',
'410202': '龙亭区',
'410203': '顺河回族区',
'410204': '古楼区',
'410205': '南关区',
'410211': '郊区',
'410221': '杞县',
'410222': '通许县',
'410223': '尉氏县',
'410224': '开封县',
'410225': '兰考县',
'410300': '洛阳市',
'410302': '老城区',
'410303': '西工区',
'410304': '瀍河回族区',
'410305': '涧西区',
'410306': '吉利区',
'410311': '郊区',
'410321': '偃师县',
'410322': '孟津县',
'410323': '新安县',
'410400': '平顶山市',
'410402': '新华区',
'410403': '卫东区',
'410411': '郊区',
'410412': '舞钢区',
'410421': '宝丰县',
'410422': '叶县',
'410423': '鲁山县',
'410500': '安阳市',
'410502': '文峰区',
'410503': '北关区',
'410504': '铁西区',
'410511': '郊区',
'410521': '林县',
'410522': '安阳县',
'410523': '汤阴县',
'410524': '淇县',
'410525': '浚县',
'410600': '鹤壁市',
'410602': '鹤山区',
'410603': '山城区',
'410611': '郊区',
'410700': '新乡市',
'410702': '红旗区',
'410703': '新华区',
'410704': '北站区',
'410711': '郊区',
'410721': '新乡县',
'410722': '汲县',
'410800': '焦作市',
'410802': '解放区',
'410803': '中站区',
'410804': '马村区',
'410811': '郊区',
'410821': '修武县',
'410822': '博爱县',
'410900': '濮阳市',
'410902': '市区',
'410911': '郊区',
'410921': '滑县',
'410922': '清丰县',
'410923': '南乐县',
'410924': '内黄县',
'410925': '长垣县',
'410926': '范县',
'410927': '台前县',
'412200': '新乡地区',
'412221': '沁阳县',
'412223': '济源县',
'412224': '孟县',
'412225': '温县',
'412226': '武陟县',
'412228': '获嘉县',
'412230': '辉县',
'412232': '原阳县',
'412233': '延津县',
'412234': '封丘县',
'412300': '商丘地区',
'412301': '商丘市',
'412321': '虞城县',
'412322': '商丘县',
'412323': '民权县',
'412324': '宁陵县',
'412325': '睢县',
'412326': '夏邑县',
'412327': '柘城县',
'412328': '永城县',
'412500': '洛阳地区',
'412501': '三门峡市',
'412502': '义马市',
'412524': '渑池县',
'412525': '陕县',
'412526': '灵宝县',
'412527': '伊川县',
'412528': '汝阳县',
'412529': '嵩县',
'412530': '洛宁县',
'412531': '卢氏县',
'412532': '栾川县',
'412533': '临汝县',
'412534': '宜阳县',
'412600': '许昌地区',
'412601': '许昌市',
'412602': '漯河市',
'412621': '长葛县',
'412622': '禹县',
'412623': '鄢陵县',
'412624': '许昌县',
'412625': '郏县',
'412626': '临颍县',
'412627': '襄城县',
'412629': '郾城县',
'412632': '舞阳县',
'412700': '周口地区',
'412701': '周口市',
'412721': '扶沟县',
'412722': '西华县',
'412723': '商水县',
'412724': '太康县',
'412725': '鹿邑县',
'412726': '郸城县',
'412727': '淮阳县',
'412728': '沈丘县',
'412729': '项城县',
'412800': '驻马店地区',
'412801': '驻马店市',
'412821': '确山县',
'412822': '泌阳县',
'412823': '遂平县',
'412824': '西平县',
'412825': '上蔡县',
'412826': '汝南县',
'412827': '平舆县',
'412828': '新蔡县',
'412829': '正阳县',
'412900': '南阳地区',
'412901': '南阳市',
'412921': '南召县',
'412922': '方城县',
'412923': '西峡县',
'412924': '南阳县',
'412925': '镇平县',
'412926': '内乡县',
'412927': '淅川县',
'412928': '社旗县',
'412929': '唐河县',
'412930': '邓县',
'412931': '新野县',
'412932': '桐柏县',
'413000': '信阳地区',
'413001': '信阳市',
'413021': '息县',
'413022': '淮滨县',
'413023': '信阳县',
'413024': '潢川县',
'413025': '光山县',
'413026': '固始县',
'413027': '商城县',
'413028': '罗山县',
'413029': '新县',
'420000': '湖北省',
'420100': '武汉市',
'420102': '江岸区',
'420103': '江汉区',
'420104': '硚口区',
'420105': '汉阳区',
'420106': '武昌区',
'420107': '青山区',
'420111': '洪山区',
'420112': '东西湖区',
'420113': '汉南区',
'420121': '汉阳县',
'420122': '武昌县',
'420123': '黄陂县',
'420124': '新洲县',
'420200': '黄石市',
'420202': '黄石港区',
'420203': '石灰窑区',
'420204': '下陆区',
'420205': '铁山区',
'420221': '大冶县',
'420300': '十堰市',
'420302': '茅箭区',
'420303': '张湾区',
'420400': '沙市市',
'420500': '宜昌市',
'420600': '襄樊市',
'420602': '襄城区',
'420603': '樊东区',
'420604': '樊西区',
'420611': '郊区',
'420621': '襄阳县',
'420622': '枣阳县',
'420623': '宜城县',
'420624': '南漳县',
'420625': '谷城县',
'420626': '保康县',
'420700': '鄂州市',
'420703': '黄州区',
'420704': '鄂城区',
'420800': '荆门市',
'420802': '鄂城区',
'420803': '黄州区',
'422100': '黄冈地区',
'422121': '黄冈县',
'422123': '红安县',
'422124': '麻城县',
'422125': '罗田县',
'422126': '英山县',
'422127': '浠水县',
'422128': '蕲春县',
'422129': '广济县',
'422130': '黄梅县',
'422200': '孝感地区',
'422201': '孝感市',
'422223': '大悟县',
'422224': '应山县',
'422225': '安陆县',
'422226': '云梦县',
'422227': '应城县',
'422228': '汉川县',
'422300': '咸宁地区',
'422301': '咸宁市',
'422322': '嘉鱼县',
'422323': '蒲圻县',
'422324': '通城县',
'422325': '崇阳县',
'422326': '通山县',
'422327': '阳新县',
'422400': '荆州地区',
'422421': '江陵县',
'422422': '松滋县',
'422423': '公安县',
'422424': '石首县',
'422425': '监利县',
'422426': '洪湖县',
'422427': '沔阳县',
'422428': '天门县',
'422429': '潜江县',
'422431': '钟祥县',
'422432': '京山县',
'422600': '郧阳地区',
'422601': '丹江口市',
'422622': '郧县',
'422623': '郧西县',
'422624': '竹山县',
'422625': '竹溪县',
'422626': '房县',
'422700': '宜昌地区',
'422721': '宜昌县',
'422722': '宜都县',
'422723': '枝江县',
'422724': '当阳县',
'422725': '远安县',
'422726': '兴山县',
'422727': '秭归县',
'422728': '长阳土家族自治县',
'422729': '五峰土家族自治县',
'422800': '鄂西土家族苗族自治州',
'422801': '恩施市',
'422822': '建始县',
'422823': '巴东县',
'422824': '利川县',
'422825': '宣恩县',
'422826': '咸丰县',
'422827': '来凤县',
'422828': '鹤峰县',
'429001': '随州市',
'429002': '老河口市',
'429021': '神农架林区',
'430000': '湖南省',
'430100': '长沙市',
'430102': '东区',
'430103': '南区',
'430104': '西区',
'430105': '北区',
'430111': '郊区',
'430121': '长沙县',
'430122': '望城县',
'430123': '浏阳县',
'430124': '宁乡县',
'430200': '株洲市',
'430202': '东区',
'430203': '北区',
'430204': '南区',
'430211': '郊区',
'430219': '醴陵市',
'430221': '株洲县',
'430223': '攸县',
'430224': '茶陵县',
'430225': '酃县',
'430300': '湘潭市',
'430302': '雨湖区',
'430303': '湘江区',
'430304': '岳塘区',
'430305': '板塘区',
'430311': '郊区',
'430321': '湘潭县',
'430322': '湘乡县',
'430400': '衡阳市',
'430402': '江东区',
'430403': '城南区',
'430404': '城北区',
'430411': '郊区',
'430421': '衡阳县',
'430422': '衡南县',
'430423': '衡山县',
'430424': '衡东县',
'430425': '常宁县',
'430426': '祁东县',
'430427': '耒阳县',
'430500': '邵阳市',
'430502': '东区',
'430503': '西区',
'430504': '桥头区',
'430511': '郊区',
'430521': '邵东县',
'430522': '新邵县',
'430600': '岳阳市',
'430602': '南区',
'430603': '北区',
'430611': '郊区',
'430621': '岳阳县',
'432200': '岳阳地区',
'432222': '平江县',
'432223': '湘阴县',
'432224': '汨罗县',
'432225': '临湘县',
'432226': '华容县',
'432300': '益阳地区',
'432301': '益阳市',
'432321': '益阳县',
'432322': '南县',
'432323': '沅江县',
'432325': '桃江县',
'432326': '安化县',
'432400': '常德地区',
'432401': '常德市',
'432402': '津市市',
'432421': '常德县',
'432422': '安乡县',
'432423': '汉寿县',
'432424': '澧县',
'432425': '临澧县',
'432426': '桃源县',
'432427': '石门县',
'432428': '慈利县',
'432500': '娄底地区',
'432501': '娄底市',
'432502': '冷水江市',
'432521': '涟源县',
'432522': '双峰县',
'432524': '新化县',
'432600': '邵阳地区',
'432621': '邵阳县',
'432622': '隆回县',
'432623': '武冈县',
'432624': '洞口县',
'432625': '新宁县',
'432626': '绥宁县',
'432627': '城步苗族自治县',
'432800': '郴州地区',
'432801': '郴州市',
'432802': '资兴市',
'432821': '郴县',
'432822': '桂阳县',
'432823': '永兴县',
'432824': '宜章县',
'432826': '嘉禾县',
'432827': '临武县',
'432828': '汝城县',
'432829': '桂东县',
'432830': '安仁县',
'432900': '零陵地区',
'432901': '永州市',
'432902': '冷水滩市',
'432922': '东安县',
'432923': '道县',
'432924': '宁远县',
'432925': '江永县',
'432926': '江华瑶族自治县',
'432927': '蓝山县',
'432928': '新田县',
'432929': '双牌县',
'432930': '祁阳县',
'433000': '怀化地区',
'433001': '怀化市',
'433002': '洪江市',
'433021': '黔阳县',
'433022': '沅陵县',
'433023': '辰溪县',
'433024': '溆浦县',
'433025': '麻阳县',
'433026': '新晃侗族自治县',
'433027': '芷江县',
'433029': '会同县',
'433030': '靖县',
'433031': '通道侗族自治县',
'433100': '湘西土家族苗族自治州',
'433101': '吉首市',
'433102': '大庸市',
'433122': '泸溪县',
'433123': '凤凰县',
'433124': '花垣县',
'433125': '保靖县',
'433126': '古丈县',
'433127': '永顺县',
'433129': '桑植县',
'433130': '龙山县',
'440000': '广东省',
'440100': '广州市',
'440102': '东山区',
'440103': '荔湾区',
'440104': '越秀区',
'440105': '海珠区',
'440106': '芳村区',
'440107': '天河区',
'440111': '郊区',
'440112': '黄埔区',
'440121': '花县',
'440122': '从化县',
'440123': '新丰县',
'440124': '龙门县',
'440125': '增城县',
'440126': '番禺县',
'440127': '清远县',
'440128': '佛冈县',
'440200': '韶关市',
'440202': '北江区',
'440203': '浈江区',
'440204': '武江区',
'440221': '曲江县',
'440222': '始兴县',
'440223': '南雄县',
'440224': '仁化县',
'440225': '乐昌县',
'440226': '连县',
'440227': '阳山县',
'440228': '英德县',
'440229': '翁源县',
'440230': '连山壮族瑶族自治县',
'440231': '连南瑶族自治县',
'440232': '乳源瑶族自治县',
'440300': '深圳市',
'440321': '宝安县',
'440400': '珠海市',
'440402': '香洲区',
'440421': '斗门县',
'440500': '汕头市',
'440502': '同平区',
'440503': '安平区',
'440504': '公园区',
'440505': '金沙区',
'440506': '达豪区',
'440511': '郊区',
'440521': '澄海县',
'440522': '饶平县',
'440523': '南澳县',
'440524': '潮阳县',
'440525': '揭阳县',
'440526': '揭西县',
'440527': '普宁县',
'440528': '惠来县',
'440600': '佛山市',
'440602': '汾江区',
'440603': '石湾区',
'440621': '三水县',
'440622': '南海县',
'440623': '顺德县',
'440624': '高明县',
'440700': '江门市',
'440702': '城区',
'440711': '郊区',
'440721': '新会县',
'440722': '台山县',
'440723': '恩平县',
'440724': '开平县',
'440725': '鹤山县',
'440726': '阳江县',
'440727': '阳春县',
'440800': '湛江市',
'440802': '赤坎区',
'440803': '霞山区',
'440804': '坡头区',
'440811': '郊区',
'440821': '吴川县',
'440822': '廉江县',
'440823': '遂溪县',
'440824': '海康县',
'440825': '徐闻县',
'440900': '茂名市',
'440902': '茂南区',
'440921': '信宜县',
'440922': '高州县',
'440923': '电白县',
'440924': '化州县',
'449001': '潮州市',
'449002': '中山市',
'442100': '海南行政区',
'442101': '海口市',
'442121': '琼山县',
'442122': '文昌县',
'442123': '琼海县',
'442124': '万宁县',
'442125': '定安县',
'442126': '屯昌县',
'442127': '澄迈县',
'442128': '临高县',
'442129': '儋县',
'442200': '海南黎族苗族自治州',
'442201': '三亚市',
'442222': '东方县',
'442223': '乐东县',
'442224': '琼中县',
'442225': '保亭县',
'442226': '陵水县',
'442227': '白沙县',
'442228': '昌江县',
'442400': '梅县地区',
'442401': '梅县市',
'442422': '大埔县',
'442423': '丰顺县',
'442424': '五华县',
'442425': '兴宁县',
'442426': '平远县',
'442427': '蕉岭县',
'442500': '惠阳地区',
'442501': '惠州市',
'442502': '东莞市',
'442521': '惠阳县',
'442522': '紫金县',
'442523': '和平县',
'442524': '连平县',
'442525': '河源县',
'442526': '博罗县',
'442528': '惠东县',
'442529': '龙川县',
'442530': '陆丰县',
'442531': '海丰县',
'442800': '肇庆地区',
'442801': '肇庆市',
'442821': '高要县',
'442822': '四会县',
'442823': '广宁县',
'442824': '怀集县',
'442825': '封开县',
'442826': '德庆县',
'442827': '云浮县',
'442828': '新兴县',
'442829': '郁南县',
'442830': '罗定县',
'450000': '广西壮族自治区',
'450100': '南宁市',
'450102': '兴宁区',
'450103': '新城区',
'450104': '城北区',
'450105': '江南区',
'450106': '永新区',
'450107': '市郊区',
'450121': '邕宁县',
'450122': '武鸣县',
'450200': '柳州市',
'450202': '城中区',
'450203': '鱼峰区',
'450204': '柳南区',
'450205': '柳北区',
'450206': '市郊区',
'450221': '柳江县',
'450222': '柳城县',
'450300': '桂林市',
'450302': '秀峰区',
'450303': '叠彩区',
'450304': '象山区',
'450305': '七星区',
'450306': '市郊区',
'450321': '阳朔县',
'450322': '临桂县',
'450400': '梧州市',
'450403': '万秀区',
'450404': '蝶山区',
'450405': '市郊区',
'450421': '苍梧县',
'450500': '北海市',
'450502': '海城区',
'450503': '市郊区',
'452100': '南宁地区',
'452101': '凭祥市',
'452122': '横县',
'452123': '宾阳县',
'452124': '上林县',
'452126': '隆安县',
'452127': '马山县',
'452128': '扶绥县',
'452129': '崇左县',
'452130': '大新县',
'452131': '天等县',
'452132': '宁明县',
'452133': '龙州县',
'452200': '柳州地区',
'452201': '合山市',
'452223': '鹿寨县',
'452224': '象州县',
'452225': '武宣县',
'452226': '来宾县',
'452227': '融安县',
'452228': '三江侗族自治县',
'452229': '融水苗族自治县',
'452230': '金秀瑶族自治县',
'452231': '忻城县',
'452300': '桂林地区',
'452322': '灵川县',
'452323': '全州县',
'452324': '兴安县',
'452325': '永福县',
'452327': '灌阳县',
'452328': '龙胜各族自治县',
'452329': '资源县',
'452330': '平乐县',
'452331': '荔浦县',
'452332': '恭城县',
'452400': '梧州地区',
'452421': '岑溪县',
'452423': '藤县',
'452424': '昭平县',
'452425': '蒙山县',
'452426': '贺县',
'452427': '钟山县',
'452428': '富川瑶族自治县',
'452500': '玉林地区',
'452501': '玉林市',
'452522': '贵县',
'452523': '桂平县',
'452524': '平南县',
'452525': '容县',
'452526': '北流县',
'452527': '陆川县',
'452528': '博白县',
'452600': '百色地区',
'452601': '百色市',
'452622': '田阳县',
'452623': '田东县',
'452624': '平果县',
'452625': '德保县',
'452626': '靖西县',
'452627': '那坡县',
'452628': '凌云县',
'452629': '乐业县',
'452630': '田林县',
'452631': '隆林各族自治县',
'452632': '西林县',
'452700': '河池地区',
'452701': '河池市',
'452722': '宜山县',
'452723': '罗城仫佬族自治县',
'452724': '环江县',
'452725': '南丹县',
'452726': '天峨县',
'452727': '凤山县',
'452728': '东兰县',
'452729': '巴马瑶族自治县',
'452730': '都安瑶族自治县',
'452800': '钦州地区',
'452801': '钦州市',
'452821': '上思县',
'452822': '防城各族自治县',
'452824': '灵山县',
'452825': '合浦县',
'452826': '浦北县',
'510000': '四川省',
'510100': '成都市',
'510102': '东城区',
'510103': '西城区',
'510111': '金牛区',
'510112': '龙泉驿区',
'510113': '青白江区',
'510121': '金堂县',
'510122': '双流县',
'510123': '温江县',
'510124': '郫县',
'510125': '新都县',
'510126': '彭县',
'510127': '灌县',
'510128': '崇庆县',
'510129': '大邑县',
'510130': '邛崃县',
'510131': '蒲江县',
'510132': '新津县',
'510200': '重庆市',
'510202': '市中区',
'510203': '大渡口区',
'510211': '江北区',
'510212': '沙坪坝区',
'510213': '九龙坡区',
'510214': '南岸区',
'510215': '北碚区',
'510216': '南桐矿区',
'510217': '双桥区',
'510221': '长寿县',
'510222': '巴县',
'510223': '綦江县',
'510224': '江北县',
'510225': '江津县',
'510226': '合川县',
'510227': '潼南县',
'510228': '铜梁县',
'510229': '永川县',
'510230': '大足县',
'510231': '荣昌县',
'510232': '璧山县',
'510300': '自贡市',
'510302': '自流井区',
'510303': '贡井区',
'510304': '大安区',
'510311': '沿滩区',
'510321': '荣县',
'510322': '富顺县',
'510400': '渡口市',
'510402': '东区',
'510403': '西区',
'510411': '仁和区',
'510421': '米易县',
'510422': '盐边县',
'510500': '泸州市',
'510502': '市中区',
'510521': '泸县',
'510522': '合江县',
'510523': '纳溪县',
'510524': '叙永县',
'510525': '古蔺县',
'510600': '德阳市',
'510602': '市中区',
'510622': '绵竹县',
'510623': '中江县',
'510624': '广汉县',
'510625': '什邡县',
'510700': '绵阳市',
'510702': '市中区',
'510721': '江油县',
'510722': '三台县',
'510723': '盐亭县',
'510724': '安县',
'510725': '梓潼县',
'510726': '北川县',
'510727': '平武县',
'510800': '广元市',
'510802': '市中区',
'510821': '旺苍县',
'510822': '青川县',
'510823': '剑阁县',
'510824': '苍溪县',
'510900': '遂宁市',
'510902': '市中区',
'510921': '蓬溪县',
'510922': '射洪县',
'511000': '内江市',
'511002': '市中区',
'511021': '内江县',
'511022': '乐至县',
'511023': '安岳县',
'511024': '威远县',
'511025': '资中县',
'511026': '资阳县',
'511027': '简阳县',
'511028': '隆昌县',
'511100': '乐山市',
'511102': '市中区',
'511103': '沙湾区',
'511104': '金口河区',
'511112': '五通桥区',
'511121': '仁寿县',
'511122': '眉山县',
'511123': '犍为县',
'511124': '井研县',
'511125': '峨眉县',
'511126': '夹江县',
'511127': '洪雅县',
'511128': '彭山县',
'511129': '沐川县',
'511130': '青神县',
'511131': '丹棱县',
'511132': '峨边彝族自治县',
'511133': '马边彝族自治县',
'512200': '万县地区',
'512201': '万县市',
'512221': '万县',
'512222': '开县',
'512223': '忠县',
'512224': '梁平县',
'512225': '云阳县',
'512226': '奉节县',
'512227': '巫山县',
'512228': '巫溪县',
'512229': '城口县',
'512300': '涪陵地区',
'512301': '涪陵市',
'512322': '垫江县',
'512323': '南川县',
'512324': '丰都县',
'512325': '石柱土家族自治县',
'512326': '武隆县',
'512327': '彭水苗族土家族自治县',
'512328': '黔江土家族苗族自治县',
'512329': '酉阳土家族苗族自治县',
'512330': '秀山土家族苗族自治县',
'512500': '宜宾地区',
'512501': '宜宾市',
'512527': '宜宾县',
'512528': '南溪县',
'512529': '江安县',
'512530': '长宁县',
'512531': '高县',
'512532': '筠连县',
'512533': '珙县',
'512534': '兴文县',
'512535': '屏山县',
'512900': '南充地区',
'512901': '南充市',
'512902': '华蓥市',
'512921': '南充县',
'512922': '南部县',
'512923': '岳池县',
'512924': '营山县',
'512925': '广安县',
'512926': '蓬安县',
'512927': '仪陇县',
'512928': '武胜县',
'512929': '西充县',
'512930': '阆中县',
'513000': '达县地区',
'513001': '达县市',
'513021': '达县',
'513022': '宣汉县',
'513023': '开江县',
'513024': '万源县',
'513025': '通江县',
'513026': '南江县',
'513027': '巴中县',
'513028': '平昌县',
'513029': '大竹县',
'513030': '渠县',
'513031': '邻水县',
'513032': '白沙工农区',
'513100': '雅安地区',
'513101': '雅安市',
'513122': '名山县',
'513123': '荥经县',
'513124': '汉源县',
'513125': '石棉县',
'513126': '天全县',
'513127': '芦山县',
'513128': '宝兴县',
'513200': '阿坝藏族自治州',
'513221': '汶川县',
'513222': '理县',
'513223': '茂汶羌族自治县',
'513224': '松潘县',
'513225': '南坪县',
'513226': '金川县',
'513227': '小金县',
'513228': '黑水县',
'513229': '马尔康县',
'513230': '壤塘县',
'513231': '阿坝县',
'513232': '若尔盖县',
'513233': '红原县',
'513300': '甘孜藏族自治州',
'513321': '康定县',
'513322': '泸定县',
'513323': '丹巴县',
'513324': '九龙县',
'513325': '雅江县',
'513326': '道孚县',
'513327': '炉霍县',
'513328': '甘孜县',
'513329': '新龙县',
'513330': '德格县',
'513331': '白玉县',
'513332': '石渠县',
'513333': '色达县',
'513334': '理塘县',
'513335': '巴塘县',
'513336': '乡城县',
'513337': '稻城县',
'513338': '得荣县',
'513400': '凉山彝族自治州',
'513401': '西昌市',
'513421': '西昌县',
'513422': '木里藏族自治县',
'513423': '盐源县',
'513424': '德昌县',
'513425': '会理县',
'513426': '会东县',
'513427': '宁南县',
'513428': '普格县',
'513429': '布拖县',
'513430': '金阳县',
'513431': '昭觉县',
'513432': '喜德县',
'513433': '冕宁县',
'513434': '越西县',
'513435': '甘洛县',
'513436': '美姑县',
'513437': '雷波县',
'520000': '贵州省',
'520100': '贵阳市',
'520102': '南明区',
'520103': '云岩区',
'520111': '花溪区',
'520112': '乌当区',
'520113': '白云区',
'520200': '六盘水市',
'520201': '水城特区',
'520202': '盘县特区',
'520203': '六枝特区',
'522100': '遵义地区',
'522101': '遵义市',
'522121': '遵义县',
'522122': '桐梓县',
'522123': '绥阳县',
'522124': '正安县',
'522125': '道真县',
'522126': '务川县',
'522127': '凤冈县',
'522128': '湄潭县',
'522129': '余庆县',
'522130': '仁怀县',
'522131': '赤水县',
'522132': '习水县',
'522200': '铜仁地区',
'522221': '铜仁县',
'522222': '江口县',
'522223': '玉屏侗族自治县',
'522224': '石阡县',
'522225': '思南县',
'522226': '印江县',
'522227': '德江县',
'522228': '沿河县',
'522229': '松桃苗族自治县',
'522230': '万山特区',
'522300': '黔西南布依族苗族自治州',
'522321': '兴义县',
'522322': '兴仁县',
'522323': '普安县',
'522324': '晴隆县',
'522325': '贞丰县',
'522326': '望谟县',
'522327': '册亨县',
'522328': '安龙县',
'522400': '毕节地区',
'522421': '毕节县',
'522422': '大方县',
'522423': '黔西县',
'522424': '金沙县',
'522425': '织金县',
'522426': '纳雍县',
'522427': '威宁彝族回族苗族自治县',
'522428': '赫章县',
'522500': '安顺地区',
'522501': '安顺市',
'522521': '安顺县',
'522522': '开阳县',
'522523': '息烽县',
'522524': '修文县',
'522525': '清镇县',
'522526': '平坝县',
'522527': '普定县',
'522528': '关岭布依族苗族自治县',
'522529': '镇宁布依族苗族自治县',
'522530': '紫云苗族布依族自治县',
'522600': '黔东南苗族侗族自治州',
'522601': '凯里市',
'522622': '黄平县',
'522623': '施秉县',
'522624': '三穗县',
'522625': '镇远县',
'522626': '岑巩县',
'522627': '天柱县',
'522628': '锦屏县',
'522629': '剑河县',
'522630': '台江县',
'522631': '黎平县',
'522632': '榕江县',
'522633': '从江县',
'522634': '雷山县',
'522635': '麻江县',
'522636': '丹寨县',
'522700': '黔南布依族苗族自治州',
'522701': '都匀市',
'522722': '荔波县',
'522723': '贵定县',
'522724': '福泉县',
'522725': '瓮安县',
'522726': '独山县',
'522727': '平塘县',
'522728': '罗甸县',
'522729': '长顺县',
'522730': '龙里县',
'522731': '惠水县',
'522732': '三都水族自治县',
'530000': '云南省',
'530100': '昆明市',
'530102': '五华区',
'530103': '盘龙区',
'530111': '官渡区',
'530112': '西山区',
'530121': '呈贡县',
'530122': '晋宁县',
'530123': '安宁县',
'530124': '富民县',
'530125': '宜良县',
'530126': '路南彝族自治县',
'530127': '嵩明县',
'530128': '禄劝彝族苗族自治县',
'530200': '东川市',
'532100': '昭通地区',
'532101': '邵通市',
'532122': '鲁甸县',
'532123': '巧家县',
'532124': '盐津县',
'532125': '大关县',
'532126': '永善县',
'532127': '绥江县',
'532128': '镇雄县',
'532129': '彝良县',
'532130': '威信县',
'532131': '水富县',
'532200': '曲靖地区',
'532201': '曲靖市',
'532223': '马龙县',
'532224': '宣威县',
'532225': '富源县',
'532226': '罗平县',
'532227': '师宗县',
'532228': '陆良县',
'532231': '寻甸回族彝族自治县',
'532233': '会泽县',
'532300': '楚雄彝族自治州',
'532301': '楚雄市',
'532322': '双柏县',
'532323': '牟定县',
'532324': '南华县',
'532325': '姚安县',
'532326': '大姚县',
'532327': '永仁县',
'532328': '元谋县',
'532329': '武定县',
'532331': '禄丰县',
'532400': '玉溪地区',
'532401': '玉溪市',
'532422': '江川县',
'532423': '澄江县',
'532424': '通海县',
'532425': '华宁县',
'532426': '易门县',
'532427': '峨山彝族自治县',
'532428': '新平彝族傣族自治县',
'532429': '元江哈尼族彝族傣族自治县',
'532500': '红河哈尼族彝族自治州',
'532501': '个旧市',
'532502': '开远市',
'532522': '蒙自县',
'532523': '屏边苗族自治县',
'532524': '建水县',
'532525': '石屏县',
'532526': '弥勒县',
'532527': '泸西县',
'532528': '元阳县',
'532529': '红河县',
'532530': '金平苗族瑶族傣族自治县',
'532531': '绿春县',
'532532': '河口瑶族自治县',
'532600': '文山壮族苗族自治州',
'532621': '文山县',
'532622': '砚山县',
'532623': '西畴县',
'532624': '麻栗坡县',
'532625': '马关县',
'532626': '丘北县',
'532627': '广南县',
'532628': '富宁县',
'532700': '思茅地区',
'532721': '思茅县',
'532722': '普洱哈尼族彝族自治县',
'532723': '墨江哈尼族自治县',
'532724': '景东彝族自治县',
'532725': '景谷傣族彝族自治县',
'532726': '镇沅县',
'532727': '江城哈尼族彝族自治县',
'532728': '孟连傣族拉祜族佤族自治县',
'532729': '澜沧拉祜族自治县',
'532730': '西盟佤族自治县',
'532800': '西双版纳傣族自治州',
'532821': '景洪县',
'532822': '勐海县',
'532823': '勐腊县',
'532900': '大理白族自治州',
'532901': '大理市',
'532922': '漾濞彝族自治县',
'532923': '祥云县',
'532924': '宾川县',
'532925': '弥渡县',
'532926': '南涧彝族自治县',
'532927': '巍山彝族回族自治县',
'532928': '永平县',
'532929': '云龙县',
'532930': '洱源县',
'532931': '剑川县',
'532932': '鹤庆县',
'533000': '保山地区',
'533001': '保山市',
'533022': '施甸县',
'533023': '腾冲县',
'533024': '龙陵县',
'533025': '昌宁县',
'533100': '德宏傣族景颇族自治州',
'533101': '畹町市',
'533121': '潞西县',
'533122': '梁河县',
'533123': '盈江县',
'533124': '陇川县',
'533125': '瑞丽县',
'533200': '丽江地区',
'533221': '丽江纳西族自治县',
'533222': '永胜县',
'533223': '华坪县',
'533224': '宁蒗彝族自治县',
'533300': '怒江傈僳族自治州',
'533321': '泸水县',
'533322': '碧江县',
'533323': '福贡县',
'533324': '贡山独龙族怒族自治县',
'533325': '兰坪县',
'533400': '迪庆藏族自治州',
'533421': '中甸县',
'533422': '德钦县',
'533423': '维西傈僳族自治县',
'533500': '临沧地区',
'533521': '临沧县',
'533522': '凤庆县',
'533523': '云县',
'533524': '永德县',
'533525': '镇康县',
'533526': '双江拉祜族佤族布朗族傣族自治县',
'533527': '耿马傣族佤族自治县',
'533528': '沧源佤族自治县',
'540000': '西藏自治区',
'540100': '拉萨市',
'540102': '城关区',
'540121': '林周县',
'540122': '当雄县',
'540123': '尼木县',
'540124': '曲水县',
'540125': '堆龙德庆县',
'540126': '达孜县',
'540127': '墨竹工卡县',
'542100': '昌都地区',
'542121': '昌都县',
'542122': '江达县',
'542123': '贡觉县',
'542124': '类乌齐县',
'542125': '丁青县',
'542126': '察雅县',
'542127': '八宿县',
'542128': '左贡县',
'542129': '芒康县',
'542132': '洛隆县',
'542133': '边坝县',
'542134': '盐井县',
'542135': '碧土县',
'542136': '妥坝县',
'542137': '生达县',
'542200': '山南地区',
'542221': '乃东县',
'542222': '扎囊县',
'542223': '贡嘎县',
'542224': '桑日县',
'542225': '穷结县',
'542226': '曲松县',
'542227': '措美县',
'542228': '洛扎县',
'542229': '加查县',
'542231': '隆子县',
'542232': '错那县',
'542300': '日喀则地区',
'542321': '日喀则县',
'542322': '南木林县',
'542324': '定日县',
'542325': '萨迦县',
'542326': '拉孜县',
'542327': '昂仁县',
'542328': '谢通门县',
'542332': '定结县',
'542333': '仲巴县',
'542335': '吉隆县',
'542336': '聂拉木县',
'542337': '萨嘎县',
'542400': '那曲地区',
'542421': '那曲县',
'542422': '嘉黎县',
'542423': '比如县',
'542424': '聂荣县',
'542425': '安多县',
'542426': '申扎县',
'542427': '索县',
'542428': '班戈县',
'542429': '巴青县',
'542430': '尼玛县',
'542500': '阿里地区',
'542521': '普兰县',
'542522': '札达县',
'542523': '噶尔县',
'542524': '日土县',
'542525': '革吉县',
'542526': '改则县',
'542527': '措勤县',
'542528': '隆格尔县',
'542600': '林芝地区',
'542621': '林芝县',
'542622': '工布江达县',
'542623': '米林县',
'542624': '墨脱县',
'542625': '波密县',
'542626': '察隅县',
'542627': '朗县',
'542700': '江孜地区',
'542721': '江孜县',
'542722': '浪卡子县',
'542723': '白朗县',
'542724': '仁布县',
'542725': '康马县',
'542726': '亚东县',
'542727': '岗巴县',
'610000': '陕西省',
'610100': '西安市',
'610102': '新城区',
'610103': '碑林区',
'610104': '莲湖区',
'610111': '灞桥区',
'610112': '未央区',
'610113': '雁塔区',
'610114': '阎良区',
'610121': '长安县',
'610122': '蓝田县',
'610123': '临潼县',
'610124': '周至县',
'610125': '户县',
'610126': '高陵县',
'610200': '铜川市',
'610202': '城区',
'610203': '郊区',
'610221': '耀县',
'610222': '宜君县',
'610300': '宝鸡市',
'610302': '渭滨区',
'610303': '金台区',
'610321': '宝鸡县',
'610322': '凤翔县',
'610323': '岐山县',
'610324': '扶风县',
'610326': '眉县',
'610327': '陇县',
'610328': '千阳县',
'610329': '麟游县',
'610330': '凤县',
'610331': '太白县',
'610400': '咸阳市',
'610402': '秦都区',
'610403': '杨陵区',
'610421': '兴平县',
'610422': '三原县',
'610423': '泾阳县',
'610424': '乾县',
'610425': '礼泉县',
'610426': '永寿县',
'610427': '彬县',
'610428': '长武县',
'610429': '旬邑县',
'610430': '淳化县',
'610431': '武功县',
'612100': '渭南地区',
'612101': '渭南市',
'612102': '韩城市',
'612124': '华县',
'612125': '华阴县',
'612126': '潼关县',
'612127': '大荔县',
'612128': '蒲城县',
'612129': '澄城县',
'612130': '白水县',
'612132': '合阳县',
'612133': '富平县',
'612300': '汉中地区',
'612301': '汉中市',
'612321': '南郑县',
'612322': '城固县',
'612323': '洋县',
'612324': '西乡县',
'612325': '勉县',
'612326': '宁强县',
'612327': '略阳县',
'612328': '镇巴县',
'612329': '留坝县',
'612330': '佛坪县',
'612400': '安康地区',
'612421': '安康县',
'612422': '汉阴县',
'612423': '石泉县',
'612424': '宁陕县',
'612425': '紫阳县',
'612426': '岚皋县',
'612427': '平利县',
'612428': '镇坪县',
'612429': '旬阳县',
'612430': '白河县',
'612500': '商洛地区',
'612521': '商县',
'612522': '洛南县',
'612523': '丹凤县',
'612524': '商南县',
'612525': '山阳县',
'612526': '镇安县',
'612527': '柞水县',
'612600': '延安地区',
'612601': '延安市',
'612621': '延长县',
'612622': '延川县',
'612623': '子长县',
'612624': '安塞县',
'612625': '志丹县',
'612626': '吴旗县',
'612627': '甘泉县',
'612628': '富县',
'612629': '洛川县',
'612630': '宜川县',
'612631': '黄龙县',
'612632': '黄陵县',
'612700': '榆林地区',
'612721': '榆林县',
'612722': '神木县',
'612723': '府谷县',
'612724': '横山县',
'612725': '靖边县',
'612726': '定边县',
'612727': '绥德县',
'612728': '米脂县',
'612729': '佳县',
'612730': '吴堡县',
'612731': '清涧县',
'612732': '子洲县',
'620000': '甘肃省',
'620100': '兰州市',
'620102': '城关区',
'620103': '七里河区',
'620104': '西固区',
'620105': '安宁区',
'620111': '红古区',
'620121': '永登县',
'620122': '皋兰县',
'620123': '榆中县',
'620200': '嘉峪关市',
'620300': '金昌市',
'620302': '金川区',
'620321': '永昌县',
'620400': '白银市',
'620402': '白银区',
'620403': '平川区',
'620421': '靖远县',
'620422': '会宁县',
'620423': '景泰县',
'620500': '天水市',
'620502': '秦城区',
'620503': '北道区',
'620521': '清水县',
'620522': '秦安县',
'620523': '甘谷县',
'620524': '武山县',
'620525': '张家川回族自治县',
'622100': '酒泉地区',
'622101': '玉门市',
'622102': '酒泉市',
'622122': '敦煌县',
'622123': '金塔县',
'622124': '肃北蒙古族自治县',
'622125': '阿克塞哈萨克族自治县',
'622126': '安西县',
'622200': '张掖地区',
'622201': '张掖市',
'622222': '肃南裕固族自治县',
'622223': '民乐县',
'622224': '临泽县',
'622225': '高台县',
'622226': '山丹县',
'622300': '武威地区',
'622301': '武威市',
'622322': '民勤县',
'622323': '古浪县',
'622326': '天祝藏族自治县',
'622400': '定西地区',
'622421': '定西县',
'622424': '通渭县',
'622425': '陇西县',
'622426': '渭源县',
'622427': '临洮县',
'622428': '漳县',
'622429': '岷县',
'622600': '陇南地区',
'622621': '武都县',
'622623': '宕昌县',
'622624': '成县',
'622625': '康县',
'622626': '文县',
'622627': '西和县',
'622628': '礼县',
'622629': '两当县',
'622630': '徽县',
'622700': '平凉地区',
'622701': '平凉市',
'622722': '泾川县',
'622723': '灵台县',
'622724': '崇信县',
'622725': '华亭县',
'622726': '庄浪县',
'622727': '静宁县',
'622800': '庆阳地区',
'622801': '西峰市',
'622821': '庆阳县',
'622822': '环县',
'622823': '华池县',
'622824': '合水县',
'622825': '正宁县',
'622826': '宁县',
'622827': '镇原县',
'622900': '临夏回族自治州',
'622901': '临夏市',
'622921': '临夏县',
'622922': '康乐县',
'622923': '永靖县',
'622924': '广河县',
'622925': '和政县',
'622926': '东乡族自治县',
'622927': '积石山保安族东乡族撒拉族自治县',
'623000': '甘南藏族自治州',
'623021': '临潭县',
'623022': '卓尼县',
'623023': '舟曲县',
'623024': '迭部县',
'623025': '玛曲县',
'623026': '碌曲县',
'623027': '夏河县',
'630000': '青海省',
'630100': '西宁市',
'630102': '城东区',
'630103': '城中区',
'630104': '城西区',
'630111': '郊区',
'630121': '大通回族土族自治县',
'632100': '海东地区',
'632121': '平安县',
'632122': '民和回族土族自治县',
'632123': '乐都县',
'632124': '湟中县',
'632125': '湟源县',
'632126': '互助土族自治县',
'632127': '化隆回族自治县',
'632128': '循化撒拉族自治县',
'632200': '海北藏族自治州',
'632221': '门源回族自治县',
'632222': '祁连县',
'632223': '海晏县',
'632224': '刚察县',
'632300': '黄南藏族自治州',
'632321': '同仁县',
'632322': '尖扎县',
'632323': '泽库县',
'632324': '河南蒙古族自治县',
'632500': '海南藏族自治州',
'632521': '共和县',
'632522': '同德县',
'632523': '贵德县',
'632524': '兴海县',
'632525': '贵南县',
'632600': '果洛藏族自治州',
'632621': '玛沁县',
'632622': '班玛县',
'632623': '甘德县',
'632624': '达日县',
'632625': '久治县',
'632626': '玛多县',
'632700': '玉树藏族自治州',
'632721': '玉树县',
'632722': '杂多县',
'632723': '称多县',
'632724': '治多县',
'632725': '囊谦县',
'632726': '曲麻莱县',
'632800': '海西蒙古族藏族自治州',
'632801': '格尔木市',
'632821': '乌兰县',
'632822': '都兰县',
'632823': '天峻县',
'640000': '宁夏回族自治区',
'640100': '银川市',
'640102': '城区',
'640103': '新城区',
'640104': '郊区',
'640121': '永宁县',
'640122': '贺兰县',
'640200': '石咀山市',
'640202': '大武口区',
'640204': '石炭井区',
'640205': '石咀山区',
'640211': '郊区',
'640221': '平罗县',
'640222': '陶乐县',
'642100': '银南地区',
'642101': '吴忠市',
'642102': '青铜峡市',
'642123': '中卫县',
'642124': '中宁县',
'642125': '灵武县',
'642126': '盐池县',
'642127': '同心县',
'642200': '固原地区',
'642221': '固原县',
'642222': '海原县',
'642223': '西吉县',
'642224': '隆德县',
'642225': '泾源县',
'642226': '彭阳县',
'650000': '新疆维吾尔自治区',
'650100': '乌鲁木齐市',
'650102': '天山区',
'650103': '沙依巴克区',
'650104': '新市区',
'650105': '水磨沟区',
'650106': '头屯河区',
'650107': '南山区',
'650121': '乌鲁木齐县',
'650200': '克拉玛依市',
'650202': '独山子区',
'650203': '克拉玛依区',
'650204': '白碱滩区',
'650205': '乌尔禾区',
'652100': '吐鲁番地区',
'652101': '吐鲁番市',
'652122': '鄯善县',
'652123': '托克逊县',
'652200': '哈密地区',
'652201': '哈密市',
'652222': '巴里坤哈萨克自治县',
'652223': '伊吾县',
'652300': '昌吉回族自治州',
'652301': '昌吉市',
'652322': '米泉县',
'652323': '呼图壁县',
'652324': '玛纳斯县',
'652325': '奇台县',
'652326': '阜康县',
'652327': '吉木萨尔县',
'652328': '木垒哈萨克自治县',
'652700': '博尔塔拉蒙古自治州',
'652701': '博乐市',
'652722': '精河县',
'652723': '温泉县',
'652800': '巴音郭楞蒙古自治州',
'652801': '库尔勒市',
'652822': '轮台县',
'652823': '尉犁县',
'652824': '若羌县',
'652825': '且末县',
'652826': '焉耆回族自治县',
'652827': '和静县',
'652828': '和硕县',
'652829': '博湖县',
'652900': '阿克苏地区',
'652901': '阿克苏市',
'652922': '温宿县',
'652923': '库车县',
'652924': '沙雅县',
'652925': '新和县',
'652926': '拜城县',
'652927': '乌什县',
'652928': '阿瓦提县',
'652929': '柯坪县',
'653000': '克孜勒苏柯尔克孜自治州',
'653021': '阿图什县',
'653022': '阿克陶县',
'653023': '阿合奇县',
'653024': '乌恰县',
'653100': '喀什地区',
'653101': '喀什市',
'653121': '疏附县',
'653122': '疏勒县',
'653123': '英吉沙县',
'653124': '泽普县',
'653125': '莎车县',
'653126': '叶城县',
'653127': '麦盖提县',
'653128': '岳普湖县',
'653129': '伽师县',
'653130': '巴楚县',
'653131': '塔什库尔干塔吉克自治县',
'653200': '和田地区',
'653201': '和田市',
'653221': '和田县',
'653222': '墨玉县',
'653223': '皮山县',
'653224': '洛浦县',
'653225': '策勒县',
'653226': '于田县',
'653227': '民丰县',
'654000': '伊犁哈萨克自治州',
'654001': '奎屯市',
'654100': '伊犁地区',
'654101': '伊宁市',
'654121': '伊宁县',
'654122': '察布查尔锡伯自治县',
'654123': '霍城县',
'654124': '巩留县',
'654125': '新源县',
'654126': '昭苏县',
'654127': '特克斯县',
'654128': '尼勒克县',
'654200': '塔城地区',
'654201': '塔城市',
'654221': '额敏县',
'654222': '乌苏县',
'654223': '沙湾县',
'654224': '托里县',
'654225': '裕民县',
'654226': '和布克赛尔蒙古自治县',
'654300': '阿勒泰地区',
'654301': '阿勒泰市',
'654321': '布尔津县',
'654322': '富蕴县',
'654323': '福海县',
'654324': '哈巴河县',
'654325': '青河县',
'654326': '吉木乃县',
'659001': '石河子市',
} | PypiClean |
/EasyDeL-0.0.29-py3-none-any.whl/examples/training/causal-lm/llama.py | from EasyDel import TrainArguments, CausalLMTrainer
from datasets import load_dataset
from huggingface_hub import HfApi
from EasyDel import configs
from jax import numpy as jnp
import EasyDel
from absl import flags, app
from fjutils import get_float_dtype_by_name
FLAGS = flags.FLAGS
flags.DEFINE_string(
name='dataset_name',
required=True,
help='dataset from huggingface and must contains input_ids and attention_mask'
' or other things that model might need to be passed into',
default=None
)
flags.DEFINE_string(
name='ckpt_path',
required=True,
help='path to model weights for example (ckpt/llama_easydel_format)',
default=None
)
flags.DEFINE_string(
name='model_type',
default='7b',
help='which model type of llama 1 to train example [13b , 7b , 3b ,...] (default is 7b model)'
)
flags.DEFINE_bool(
name='use_flash_attention',
default=False,
help='use_flash_attention or no'
)
flags.DEFINE_bool(
name='use_sacn_mlp',
default=False,
help='use_sacn_mlp or no'
)
flags.DEFINE_bool(
name='remove_ckpt_after_load',
default=False,
help='remove_ckpt_after_load or no'
)
flags.DEFINE_bool(
name='do_train',
default=True,
help='do_train or no'
)
flags.DEFINE_bool(
name='do_eval',
default=False,
help='do_eval or no'
)
flags.DEFINE_bool(
name='do_test',
default=False,
help='do_test or no'
)
flags.DEFINE_integer(
name='max_sequence_length',
default=2048,
help='max sequence length for model to train'
)
flags.DEFINE_integer(
name='batch_size',
default=10,
help='the batch size to use to train model (will be multiply to gradient_accumulation_steps)'
)
flags.DEFINE_integer(
name='gradient_accumulation_steps',
default=8,
help='the gradient accumulation steps to use to train model (will be multiply to batch_size)'
)
flags.DEFINE_integer(
name='num_train_epochs',
default=10,
help='number of training epochs'
)
flags.DEFINE_integer(
name='max_steps',
default=None,
help='number of max_steps (have been set to None for max number of steps)'
)
flags.DEFINE_string(
name="optimizer",
default='adamw',
help='which optimizer to use (available Optimizers are lion adamw adafactor )'
)
flags.DEFINE_string(
name="scheduler",
default='cosine',
help='which scheduler to use (available schedulers are cosine linear none warm_up_cosine)'
)
flags.DEFINE_string(
name="rotary_type",
default='complex',
help='what kind of implementation of rotary embedding to be used for model (available are lm2, open, complex) '
)
flags.DEFINE_string(
name="project_name",
default='LLama',
help='name for project and model (be used for model naming and wandb logging)'
)
flags.DEFINE_string(
name='config_repo',
default=None,
help='in case that you want to load configs from an huggingface repo'
)
flags.DEFINE_string(
name='dtype',
default='bf16',
help='dtype for model (bf16,fp16,fp32,fp64)'
)
flags.DEFINE_string(
name='backend',
default='tpu',
help='which backend to use supported backends are (tpu ,gpu ,cpu)'
)
flags.DEFINE_float(
name='learning_rate',
default=4e-5,
help='start of learning_rate'
)
flags.DEFINE_float(
name='learning_rate_end',
default=4e-6,
help='end of learning_rate in case of using scheduler linear'
)
api = HfApi()
def main(argv):
dataset_train = load_dataset(FLAGS.dataset_name)
if FLAGS.config_repo is not None:
conf = None
config = EasyDel.LlamaConfig.from_pretrained(FLAGS.config_repo, trust_remote_code=True)
config.use_flash_attention = FLAGS.use_flash_attention
config.use_sacn_mlp = FLAGS.use_sacn_mlp
else:
conf = EasyDel.configs.configs.llama_configs[FLAGS.model_type]
config = EasyDel.LlamaConfig(**conf, rotary_type=FLAGS.rotary_type)
config.use_flash_attention = FLAGS.use_flash_attention
config.use_sacn_mlp = FLAGS.use_sacn_mlp
config.max_sequence_length = FLAGS.max_sequence_length
config.rope_scaling = None
train_args = TrainArguments(
model_class=EasyDel.FlaxLlamaForCausalLM,
configs_to_init_model_class={'config': config, 'dtype': get_float_dtype_by_name(FLAGS.dtype),
'param_dtype': get_float_dtype_by_name(FLAGS.dtype)},
custom_rule=config.get_partition_rules(True),
model_name=FLAGS.project_name,
num_train_epochs=FLAGS.num_train_epochs,
learning_rate=FLAGS.learning_rate,
learning_rate_end=FLAGS.learning_rate_end,
optimizer=FLAGS.optimizer,
scheduler=FLAGS.scheduler,
weight_decay=0.01,
total_batch_size=FLAGS.batch_size,
max_steps=FLAGS.max_steps,
do_train=FLAGS.do_train,
do_eval=FLAGS.do_eval,
do_test=FLAGS.do_test,
backend=FLAGS.backend,
max_length=FLAGS.max_sequence_length,
gradient_checkpointing='nothing_saveable',
sharding_array=(1, -1, 1),
use_pjit_attention_force=False,
extra_configs=conf,
gradient_accumulation_steps=FLAGS.gradient_accumulation_steps,
remove_ckpt_after_load=FLAGS.remove_ckpt_after_load,
)
trainer = CausalLMTrainer(train_args,
dataset_train=dataset_train['train'],
dataset_eval=dataset_train['eval'] if FLAGS.do_eval else None,
ckpt_path=FLAGS.ckpt_path)
# output = trainer.train()
# Done You can simply train any llama LLM that you want in less than 50 lines of code
if __name__ == "__main__":
app.run(main) | PypiClean |
/ELO_utils-1.3.1.tar.gz/ELO_utils-1.3.1/ELOutils/ELO.py | import random
import math
def approxRollingAverage(avg, new_sample):
avg -= avg / 100
avg += new_sample / 100
return avg
class Player(object):
def __init__(self, name, rating):
self.rating = int(rating)
self.name = name
self.verification = random.randint(0,10000)
self.win_ratio = 1
# used for locating players
def __eq__(self,other):
return other.name == self.name and other.rating == self.rating and other.verification == self.verification
# used for comparisons/orderings
def __lt__(self,other):
if not self.rating == other.rating:
return self.rating < other.rating
elif not self.name == other.name:
return self.name < other.name
else:
return self.verification < other.verification
def __gt__(self,other):
if not self.rating == other.rating:
return self.rating > other.rating
elif not self.name == other.name:
return self.name > other.name
else:
return self.verification > other.verification
def __le__(self,other):
if self == other:
return True
elif not self.rating == other.rating:
return self.rating < other.rating
elif not self.name == other.name:
return self.name < other.name
else:
return self.verification < other.verification
def __ge__(self,other):
if self == other:
return True
elif not self.rating == other.rating:
return self.rating > other.rating
elif not self.name == other.name:
return self.name > other.name
else:
return self.verification > other.verification
def __str__(self):
return 'Player({0},{1})'.format(self.name,self.rating)
def __repr__(self):
return str(self)
def get_exp_score(rating_a, rating_b):
return 1.0 /(1 + 10**((rating_b - rating_a)/400.0))
def rating_adj(rating, exp_score, score, k=32):
return rating + k * (score - exp_score)
def match_result(player, challenger, result, floor = None):
exp_score_a = get_exp_score(player.rating, challenger.rating)
old_player = player.rating
old_challenger = challenger.rating
player.rating = math.floor(rating_adj(player.rating, exp_score_a, result))
challenger.rating = math.floor(rating_adj(challenger.rating, exp_score_a, result))
player_ratio = 0 if player.rating-old_player <= 0 else 1
challenger_ratio = 0 if challenger.rating-old_challenger <= 0 else 1
player.win_ratio = approxRollingAverage(player.win_ratio, player_ratio)
challenger.win_ratio = approxRollingAverage(challenger.win_ratio, challenger_ratio)
if floor:
if player.rating < floor:
player.rating = floor
if challenger.rating < floor:
challenger.rating = floor
def create_match(players, player, fairness= 0.5, margin= 0.01):
if not players or not player:
raise ValueError("There must be a list of players and a player to have as reference")
# Garantee that the list is sorted
players.sort()
# Helper variables
index = players.index(player)
weaker = True if player.win_ratio < 0.5 else False
rival = index-1 if weaker else index+1
change_rate = -1 if weaker else 1
# limits of fairness
lower_bound = (fairness-margin)
higher_bound = (fairness+margin)
# search for a rival
while lower_bound <= get_exp_score(player.rating, players[rival].rating) <= higher_bound:
rival += change_rate
if not -1 < rival < len(players):
break
# Fixing in case of small list or quirks of search
if not -1 < rival < len(players) or not lower_bound < get_exp_score(player.rating, players[rival].rating) < higher_bound:
rival -= change_rate
if rival == index:
rival += change_rate
rival = random.randint(index+1,rival)
return players[rival] | PypiClean |
/FEV_KEGG-1.1.4.tar.gz/FEV_KEGG-1.1.4/FEV_KEGG/Experiments/52.py | from FEV_KEGG.KEGG.File import cache
from FEV_KEGG.Evolution.Clade import Clade
from FEV_KEGG.Statistics import Percent
from FEV_KEGG.Robustness.Topology.Redundancy import RedundancyType, Redundancy, RedundancyContribution
from FEV_KEGG import settings
from FEV_KEGG.Util.Util import dictToHtmlFile
@cache(folder_path='experiments', file_name='deltaproteobacteria_clade')
def getCladeA():
clade = Clade('Deltaproteobacteria')
# pre-fetch collective metabolism into memory
clade.collectiveMetabolism(excludeMultifunctionalEnzymes=settings.defaultNoMultifunctional)
# pre-fetch collective enzyme metabolism into memory
clade.collectiveMetabolismEnzymes(excludeMultifunctionalEnzymes=settings.defaultNoMultifunctional)
return clade
if __name__ == '__main__':
output = ['']
#- get clade
clade = getCladeA()
majorityPercentageCoreMetabolism = 80
majorityPercentageNeofunctionalisation = 0
redundancyType = RedundancyType.ROBUSTNESS
output.append( 'core metabolism majority: ' + str(majorityPercentageCoreMetabolism) + '%' )
output.append( 'neofunctionalisation majority: ' + str(majorityPercentageNeofunctionalisation) + '% (this means that gene duplication within a single organism is enough)' )
output.append('')
output.append(', '.join(clade.ncbiNames) + ':')
output.append('')
#- get core metabolism
cladeEcGraph = clade.coreMetabolism(majorityPercentageCoreMetabolism)
cladeEcCount = len(cladeEcGraph.getECs())
output.append( 'core metabolism ECs: ' + str(cladeEcCount) )
output.append('')
#- calculate "neofunctionalised" ECs
cladeNeofunctionalisedMetabolismSet = clade.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation).getECs()
cladeNeofunctionalisationsForFunctionChange = clade.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation)
#- calculate redundancy
cladeRedundancy = Redundancy(cladeEcGraph)
cladeRedundancyContribution = RedundancyContribution(cladeRedundancy, cladeNeofunctionalisedMetabolismSet)
cladeRobustnessContributedECsForContributingNeofunctionalisedEC = cladeRedundancyContribution.getContributedKeysForSpecial(redundancyType)
cladeRobustnessContributingNeofunctionalisedECs = set(cladeRobustnessContributedECsForContributingNeofunctionalisedEC.keys())
#- REPEAT for each function change consisting of "neofunctionalised" ECs, which also contribute to redundancy
output.append( '"neofunctionalised" ECs: ' + str(len(cladeNeofunctionalisedMetabolismSet)) + ' (' + str(Percent.getPercentStringShort(len(cladeNeofunctionalisedMetabolismSet), cladeEcCount, 0)) + '%)' )
robustnessContributingNeofunctionalisations = dict()
for functionChange, neofunctionalisations in cladeNeofunctionalisationsForFunctionChange.items():
#- report enzyme pairs of neofunctionalisations, which caused the EC to be considered "neofunctionalised", and are in return contributing to redundancy
if functionChange.ecA in cladeRobustnessContributingNeofunctionalisedECs or functionChange.ecB in cladeRobustnessContributingNeofunctionalisedECs: # function change contributes to robustness
for neofunctionalisation in neofunctionalisations:
currentSetOfContributedECs = robustnessContributingNeofunctionalisations.get(neofunctionalisation, None)
if currentSetOfContributedECs is None:
currentSetOfContributedECs = set()
robustnessContributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs
for ec in functionChange.ecPair:
contributedECs = cladeRobustnessContributedECsForContributingNeofunctionalisedEC.get(ec, None)
if contributedECs is not None:
currentSetOfContributedECs.update(contributedECs)
output.append('')
output.append( 'Neofunctionalisations contributing to robustness: ' + str(len(robustnessContributingNeofunctionalisations)) )
neofunctionalisationsForContributedEC = dict()
for neofunctionalisation, contributedECs in robustnessContributingNeofunctionalisations.items():
for contributedEC in contributedECs:
currentSetOfNeofunctionalisations = neofunctionalisationsForContributedEC.get(contributedEC, None)
if currentSetOfNeofunctionalisations is None:
currentSetOfNeofunctionalisations = set()
neofunctionalisationsForContributedEC[contributedEC] = currentSetOfNeofunctionalisations
currentSetOfNeofunctionalisations.add(neofunctionalisation)
ecNumbers = set()
for contributedEC in neofunctionalisationsForContributedEC.keys():
ecNumbers.add( contributedEC )
dictToHtmlFile(neofunctionalisationsForContributedEC, clade.ncbiNames[0] + '_' + redundancyType.name + '_Neofunctionalisations-For-Contributed-EC.html', byValueFirst=False, inCacheFolder=True, addEcDescriptions = ecNumbers)
for line in output:
print( line ) | PypiClean |
/dragonflow-4.0.0.tar.gz/dragonflow-4.0.0/dragonflow/controller/common/cookies.py |
import collections
from oslo_log import log
from dragonflow._i18n import _
from dragonflow.common import exceptions
LOG = log.getLogger(__name__)
GLOBAL_APP_NAME = 'global cookie namespace'
"""Dictionary to hold a map from a task name to its cookie info"""
_cookies = {}
# Maximum number of bits that can be encoded. Taken from OVS
_cookie_max_bits = 64
# Maximum number of bits allocated to global cookies
_cookie_max_bits_global = 32
# Turn on all bits in the cookie mask. There are 64 (_cookie_max_bits)
# bits. -1 is all (infinite) bits on. Shift right and left again to have all
# bits but the least 64 bits on. Bitwise not to have only the 64 LSBits on.
_cookie_mask_all = ~((-1 >> _cookie_max_bits) << _cookie_max_bits)
# Maximum number of bits allocated to local cookies (total bits - global bits)
_cookie_max_bits_local = _cookie_max_bits - _cookie_max_bits_global
# Number of allocated bits for a given application (including global)
_cookies_used_bits = collections.defaultdict(int)
# Global cookie modifiers, which modify the global cookie space automatically
_cookie_modifiers = {}
# A class holding the cookie's offset and bit-mask
CookieBitPair = collections.namedtuple('CookieBitPair', ('offset', 'mask'))
def register_cookie_bits(name, length, is_local=False, app_name=None):
"""Register this many cookie bits for the given 'task'.
There are two types of cookies: global and local.
Global cookies are global accross all applications. All applications share
the information, and the cookie bits can only be assigned once.
Local cookies are local to a specific application. That application is
responsible to the data encoded in the cookie. Therefore, local cookie
bits can be reused between applications, i.e. different applications can
use the same local cookie bits to write different things.
This function raises an error if there are not enough bits to allocate.
:param name: The name of the 'task'
:type name: string
:param length: The length of the cookie to allocate
:type length: int
:param is_local: The cookie space is local, as defined above.
:type is_local: bool
:param app_name: Owner application of the cookie (None for global)
:type app_name: string
"""
if not is_local:
app_name = GLOBAL_APP_NAME
shift = 0
max_bits = _cookie_max_bits_global
else:
shift = _cookie_max_bits_global
max_bits = _cookie_max_bits_local
if not app_name:
raise TypeError(_(
"app_name must be provided if is_local is True"))
if (app_name, name) in _cookies:
LOG.info("Cookie for %(app_name)s/%(name)s already registered.",
{"app_name": app_name, "name": name})
return
start = _cookies_used_bits[app_name]
if start + length > max_bits:
LOG.error("Out of cookie space: "
"offset: %(offset)d length: %(length)d",
{"offset": start, "length": length})
raise exceptions.OutOfCookieSpaceException()
_cookies_used_bits[app_name] = start + length
start += shift
mask = (_cookie_mask_all >> (_cookie_max_bits - length)) << start
_cookies[(app_name, name)] = CookieBitPair(start, mask)
LOG.info("Registered cookie for %(app_name)s/%(name)s, "
"mask: %(mask)x, offset: %(offset)d, length: %(length)d",
{"app_name": app_name, "name": name,
"mask": mask, "offset": start, "length": length})
def get_cookie(name, value, old_cookie=0, old_mask=0,
is_local=False, app_name=None):
"""Encode the given cookie value as the registered cookie. i.e. shift
it to the correct location, and verify there are no overflows.
:param name: The name of the 'task'
:type name: string
:param value: The value of the cookie to encode
:type value: int
:param old_cookie: Encode this cookie alongside other cookie values
:type old_cookie: int
:param old_mask: The mask (i.e. encoded relevant bits) in old_cookie
:type old_mask: int
:param is_local: The cookie space is local, as defined in
register_cookie_bits
:type is_local: bool
:param app_name: Owner application of the cookie (None for global)
:type app_name: string
"""
if not is_local:
app_name = GLOBAL_APP_NAME
else:
if not app_name:
raise TypeError(
_("app_name must be provided if is_local is True"))
pair = _cookies[(app_name, name)]
mask_overlap = old_mask & pair.mask
if mask_overlap != 0:
if mask_overlap != pair.mask:
raise exceptions.MaskOverlapException(app_name=app_name, name=name)
return old_cookie, old_mask
result_unmasked = (value << pair.offset)
result = (result_unmasked & pair.mask)
if result != result_unmasked:
raise exceptions.CookieOverflowExcpetion(
cookie=value, offset=pair.offset, mask=pair.mask)
return result | (old_cookie & ~pair.mask), pair.mask | old_mask
def extract_value_from_cookie(name, cookie_value,
is_local=False, app_name=None):
"""This method is the inverse of get_cookie. i.e. if cookie_value was
encoded with get_cookie, this method extracts the value encoded in it.
:param name: The name of the 'task'
:type name: string
:param cookie_value: The value of the cookie to encode
:type cookie_value: int
:param is_local: The cookie space is local, as defined in
register_cookie_bits
:type is_local: bool
:param app_name: Owner application of the cookie (None for global)
:type app_name: string
"""
if not is_local:
app_name = GLOBAL_APP_NAME
else:
if not app_name:
raise TypeError(_("app_name must be provided if is_local is True"))
pair = _cookies[(app_name, name)]
masked_value = (cookie_value & pair.mask)
extracted_value = masked_value >> pair.offset
return extracted_value
def add_global_cookie_modifier(name, length, modifier):
"""Allocate `length` global cookie bits, and add a modifier function
that sets these cookie bits for all applications. The modifier
accepts an opaque value (passed to apply_global_cookie_modifiers below),
and should return the cookie value it wants to set.
It is an error for the modifier to return a cookie value that requires
more than length bits.
This function raises an error if there are not enough bits to allocate.
:param name: The name of the global cookie bits
:type name: string
:param length: The number of global cookie bits to allocate
:type length: int
:name modifier: A function returning the cookie value
:type modifier: function, accepting opaque, returning int
"""
register_cookie_bits(name, length)
_cookie_modifiers[name] = modifier
def apply_global_cookie_modifiers(cookie, mask, opaque):
"""For each modifier registered with `add_global_cookie_modifier` above,
get the value, and encode it onto the cookie.
:param cookie: The current cookie value
:type cookie: int
:param mask: The current cookie's mask
:type mask: int
:param opaque: parameter to pass to the modifier
"""
for name, modifier in _cookie_modifiers.items():
value = modifier(opaque)
cookie, mask = get_cookie(name, value, cookie, mask)
return cookie, mask | PypiClean |
/KolejkaJudge-0.1.202211021413-py3-none-any.whl/kolejka/judge/systems/local.py |
import datetime
import math
import os
import pathlib
import pwd
import resource
import signal
import tempfile
import time
import threading
import traceback
import kolejka.common.subprocess
from kolejka.common.gpu import gpu_stats
from kolejka.judge import config
from kolejka.judge.result import Result
from kolejka.judge.systems.base import *
from kolejka.judge.parse import *
__all__ = [ 'LocalSystem' ]
def __dir__():
return __all__
page_size = int(os.sysconf("SC_PAGE_SIZE"))
clock_ticks = int(os.sysconf("SC_CLK_TCK"))
def proc_info(pid):
proc = pathlib.Path('/proc/'+str(pid))
with pathlib.Path('/proc/uptime').open() as uptime_file:
uptime = float(uptime_file.read().strip().split()[0])
try:
with ( proc / 'stat' ).open() as stat_file:
stat = stat_file.read().strip().split()
with ( proc / 'statm' ).open() as statm_file:
statm = statm_file.read().strip().split()
with ( proc / 'io' ).open() as io_file:
io = dict( [ (k.strip().lower(), int(v.strip())) for k,v in [ l.split(':') for l in io_file.read().strip().split('\n') ] ] )
result = dict()
result['ppid'] = int(stat[3])
result['cpu_user'] = int(stat[13]) / clock_ticks
result['cpu_sys'] = int(stat[14]) / clock_ticks
result['rss'] = int(statm[1]) * page_size
result['threads'] = int(stat[19])
result['read'] = io['rchar']
result['write'] = io['wchar']
result['real_time'] = uptime - int(stat[21]) / clock_ticks
return result
except:
return None
def proc_ppid(pid):
proc = pathlib.Path('/proc/'+str(pid))
try:
with ( proc / 'stat' ).open() as stat_file:
stat = stat_file.read().strip().split()
return int(stat[3])
except:
return None
def proc_pids():
proc = pathlib.Path('/proc')
return [ int(p.name) for p in proc.iterdir() if p.is_dir() and not p.is_symlink() and p.name.isdigit() ]
def proc_ppids():
result = dict()
for p in proc_pids():
pp = proc_ppid(p)
if pp is not None:
result[p] = pp
return result
def proc_children(pid):
return [ p for p in proc_pids() if proc_ppid(p) == pid ]
def proc_descendants(pid):
parents = proc_ppids()
children = dict([ (p,list()) for p in parents.values() ])
for child, parent in parents.items():
children[parent].append(child)
new_descendants = [ pid ]
all_descendants = []
while new_descendants:
active = new_descendants
new_descendants = []
for p in active:
all_descendants += children.get(p,[])
new_descendants += children.get(p,[])
return all_descendants
def monitor_safe_process(process, limits, result):
while True:
info = proc_info(process.pid)
if info is None:
break
result.update_memory(info['rss'])
result.update_real_time(info['real_time'])
result.update_cpu_time(info['cpu_user'] + info['cpu_sys'])
if limits.cpu_time and result.cpu_time > limits.cpu_time:
process.kill()
if limits.real_time and result.real_time > limits.real_time:
process.kill()
if limits.memory and result.memory > limits.memory:
process.kill()
time.sleep(0.05)
def end_process(process):
try:
pids = proc_descendants(process.pid)
try:
process.terminate()
time.sleep(0.1)
except:
pass
for pid in pids:
try:
os.kill(pid)
except:
pass
while True:
pids = proc_descendants(process.pid)
if pids:
for pid in pids:
try:
os.kill(pid)
except:
pass
else:
break
except:
pass
def monitor_process(process, limits, result):
real_time = dict()
cpu_time = dict()
while True:
info = proc_info(process.pid)
if info is None:
break
memory = info['rss']
real_time[process.pid] = info['real_time']
cpu_time[process.pid] = info['cpu_user'] + info['cpu_sys']
infos = dict([ (pid, proc_info(pid)) for pid in proc_descendants(process.pid) ])
for pid, info in infos.items():
if info is None:
continue
memory += info['rss']
real_time[pid] = max(real_time.get(pid,0), info['real_time'])
cpu_time[pid] = max(cpu_time.get(pid,0), info['cpu_user'] + info['cpu_sys'])
result.update_memory(memory)
result.update_real_time(sum(real_time.values()))
result.update_cpu_time(sum(cpu_time.values()))
gpu_memory = 0
for gpu, stats in gpu_stats().dump().get('gpus').items():
usage = parse_memory(stats.get('memory_usage'))
if limits.gpu_memory:
total = parse_memory(stats.get('memory_total'))
gpu_memory = max(gpu_memory, limits.gpu_memory - (total - usage))
else:
gpu_memory = max(gpu_memory, usage)
result.update_gpu_memory(gpu_memory)
if limits.cpu_time and result.cpu_time > limits.cpu_time:
end_process(process)
if limits.real_time and result.real_time > limits.real_time:
end_process(process)
if limits.memory and result.memory > limits.memory:
end_process(process)
if limits.gpu_memory and result.gpu_memory > limits.gpu_memory:
end_process(process)
time.sleep(0.05)
class LocalSystem(SystemBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_directory.mkdir(parents=True, exist_ok=True)
self.preserved_gpu_memory = {}
def get_superuser(self):
return os.getuid() == 0
def get_current_user(self):
return pwd.getpwuid(os.getuid()).pw_name
def get_resources(self, limits):
resources = dict()
for limit in [
resource.RLIMIT_CORE,
resource.RLIMIT_CPU,
# resource.RLIMIT_FSIZE,
resource.RLIMIT_DATA,
resource.RLIMIT_STACK,
# resource.RLIMIT_RSS,
# resource.RLIMIT_NPROC,
# resource.RLIMIT_NOFILE,
# resource.RLIMIT_MEMLOCK,
# resource.RLIMIT_AS,
# resource.RLIMIT_MSGQUEUE,
# resource.RLIMIT_NICE,
# resource.RLIMIT_RTPRIO,
# resource.RLIMIT_RTTIME,
# resource.RLIMIT_SIGPENDING,
]:
resources[limit] = (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
resources[resource.RLIMIT_CORE] = (0,0)
if limits.cpu_time:
seconds = int(math.ceil((limits.cpu_time + parse_time('1s')).total_seconds()))
resources[resource.RLIMIT_CPU] = (seconds, seconds)
if limits.memory:
memory = int(math.ceil(limits.memory + parse_memory('1mb')))
resources[resource.RLIMIT_DATA] = (limits.memory,limits.memory)
return resources
def preserve_gpu_memory(self, memory_limit: int) -> None:
"""
Preserves every GPU to have at most desired memory free
"""
try:
import numpy as np
from numba import cuda
from numba.cuda.cudadrv.driver import CudaAPIError, Device
except ImportError:
raise RuntimeError("Numba is required to limit GPU memory")
ARRAY_ELEMENT_DTYPE = np.uint8
ARRAY_ELEMENT_SIZE = np.dtype(ARRAY_ELEMENT_DTYPE).itemsize
self.preserved_gpu_memory = {}
for gpu_index, gpu in enumerate(cuda.gpus.lst):
with gpu:
# Initialize CUDA context preserves minor amount of memory to be allocated
_ = cuda.device_array((1,))
# Retrieve current device free memory space (in bytes)
bytes_free, bytes_total = cuda.current_context().get_memory_info()
bytes_to_preserve = bytes_free - memory_limit
if bytes_to_preserve < 0:
raise RuntimeError(f"Not enough memory on {repr(gpu)} requested {bytes_to_preserve}")
if bytes_to_preserve > 0:
try:
self.preserved_gpu_memory[gpu_index] = cuda.device_array(
(bytes_to_preserve // ARRAY_ELEMENT_SIZE,),
dtype=ARRAY_ELEMENT_DTYPE
)
except CudaAPIError as e:
raise RuntimeError(f"CUDA operation failure: {e}")
def release_gpu_memory(self):
for gpu, memory in self.preserved_gpu_memory.items():
del memory
def execute_safe_command(self, command, stdin_path, stdout_path, stdout_append, stdout_max_bytes, stderr_path, stderr_append, stderr_max_bytes, environment, work_path, user, group, limits, result):
stdin_file = self.read_file(stdin_path)
stdout_file, stdout_writer = self.file_writer(stdout_path, stdout_append, max_bytes=stdout_max_bytes)
stderr_file, stderr_writer = self.file_writer(stderr_path, stderr_append, max_bytes=stderr_max_bytes)
writers = (stdout_writer, stderr_writer)
change_user, change_group, change_groups = self.get_user_group_groups(user, group)
resources = self.get_resources(limits)
#resources[resource.RLIMIT_NPROC] = (1,1) #This is a very bad idea, read notes in man execv on EAGAIN
process = kolejka.common.subprocess.start(
command,
user=change_user,
group=change_group,
groups=change_groups,
resources=resources,
stdin=stdin_file,
stdout=stdout_file,
stderr=stderr_file,
env=environment,
cwd=work_path,
)
stdin_file.close()
stdout_file.close()
stderr_file.close()
monitoring_thread = threading.Thread(target=monitor_safe_process, args=(process, limits, result))
monitoring_thread.start()
returncode = process.wait()
monitoring_thread.join()
for writer in writers:
writer.join()
result.set_returncode(returncode)
def start_command(self, command, stdin_path, stdout_path, stdout_append, stdout_max_bytes, stderr_path, stderr_append, stderr_max_bytes, environment, work_path, user, group, limits):
stdin_file = self.read_file(stdin_path)
stdout_file, stdout_writer = self.file_writer(stdout_path, stdout_append, max_bytes=stdout_max_bytes)
stderr_file, stderr_writer = self.file_writer(stderr_path, stderr_append, max_bytes=stderr_max_bytes)
writers = (stdout_writer, stderr_writer)
change_user, change_group, change_groups = self.get_user_group_groups(user, group)
resources = self.get_resources(limits)
if limits.gpu_memory:
self.preserve_gpu_memory(limits.gpu_memory)
process = kolejka.common.subprocess.start(
command,
user=change_user,
group=change_group,
groups=change_groups,
resources=resources,
stdin=stdin_file,
stdout=stdout_file,
stderr=stderr_file,
env=environment,
cwd=work_path,
)
stdin_file.close()
stdout_file.close()
stderr_file.close()
result = Result()
monitoring_thread = threading.Thread(target=monitor_process, args=(process, limits, result))
monitoring_thread.start()
return (process, monitoring_thread, result, writers)
def terminate_command(self, process):
process, monitoring_thread, monitor_result, writers = process
process.terminate()
for writer in writers:
writer.join()
self.release_gpu_memory()
def wait_command(self, process, result):
process, monitoring_thread, monitor_result, writers = process
completed = kolejka.common.subprocess.wait(process)
monitoring_thread.join()
for writer in writers:
writer.join()
result.update_memory(monitor_result.memory)
result.update_real_time(monitor_result.real_time)
result.update_cpu_time(monitor_result.cpu_time)
result.update_gpu_memory(monitor_result.gpu_memory)
result.update_real_time(completed.time)
result.set_returncode(completed.returncode)
self.release_gpu_memory() | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/polys/polyoptions.py |
from __future__ import annotations
import re
import typing
from ..core import Basic, I
from ..core.sympify import sympify
from ..utilities import has_dups, numbered_symbols, topological_sort
from .polyerrors import FlagError, GeneratorsError, OptionError
__all__ = 'Options', 'Order'
class Option:
"""Base class for all kinds of options."""
option: str
is_Flag = False
requires: list[str] = []
excludes: list[str] = []
after: list[str] = []
before: list[str] = []
@classmethod
def default(cls):
return
@classmethod
def preprocess(cls, option):
return # pragma: no cover
@classmethod
def postprocess(cls, options):
return
class Flag(Option):
"""Base class for all kinds of flags."""
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned."""
@classmethod
def preprocess(cls, option):
if option in [True, False]:
return bool(option)
else:
raise OptionError(f"'{cls.option}' must have a boolean value "
f'assigned, got {option}')
class OptionType(type):
"""Base type for all options that does registers options."""
def __init__(cls, *args, **kwargs):
super().__init__(cls)
@property
def getter(a):
try:
return a[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__: typing.Optional[list[str]] = None
__options__: dict[str, type[Option]] = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError("both '*gens' and keyword "
"argument 'gens' supplied")
if gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError as exc:
raise OptionError(f"'{option}' is not a "
'valid option') from exc
if issubclass(cls, Flag):
if strict and (flags is None or option not in flags):
raise OptionError(f"'{option}' flag is not "
'allowed in this context')
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key in dict(defaults):
if key in self:
del defaults[key]
else:
for option in self:
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self:
cls = self.__options__[option]
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError(f"'{option}' option is not allowed together with '{exclude_option}'")
for option in self.__order__: # pylint: disable=not-an-iterable
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing."""
if cls.__order__ is None:
vertices, edges = [], set()
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError as exc:
raise RuntimeError('cycle detected in diofant.polys'
' options framework') from exc
def clone(self, updates={}):
"""Clone ``self`` and update specified options."""
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super().__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption, metaclass=OptionType):
"""``expand`` option to polynomial manipulation functions."""
option = 'expand'
@classmethod
def default(cls):
return True
class Gens(Option, metaclass=OptionType):
"""``gens`` option to polynomial manipulation functions."""
option = 'gens'
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
option = option,
if option == (None,):
return ()
elif has_dups(option):
raise GeneratorsError(f'duplicated generators: {option}')
elif any(gen.is_commutative is False for gen in option):
raise GeneratorsError(f'non-commutative generators: {option}')
else:
return tuple(option)
class Wrt(Option, metaclass=OptionType):
"""``wrt`` option to polynomial manipulation functions."""
option = 'wrt'
_re_split = re.compile(r'\s*,\s*|\s+')
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
return [str(option)]
elif isinstance(option, str):
option = option.strip()
if option.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not option:
return []
return list(cls._re_split.split(option))
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option, metaclass=OptionType):
"""``sort`` option to polynomial manipulation functions."""
option = 'sort'
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return [gen.strip() for gen in option.split('>')]
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions."""
option = 'order'
@classmethod
def default(cls):
from .orderings import lex
return lex
@classmethod
def preprocess(cls, option):
from .orderings import monomial_key
return monomial_key(option)
class Field(BooleanOption, metaclass=OptionType):
"""``field`` option to polynomial manipulation functions."""
option = 'field'
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions."""
option = 'greedy'
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus']
class Composite(BooleanOption, metaclass=OptionType):
"""``composite`` option to polynomial manipulation functions."""
option = 'composite'
@classmethod
def default(cls):
return
excludes = ['domain', 'split', 'gaussian', 'modulus']
class Domain(Option, metaclass=OptionType):
"""``domain`` option to polynomial manipulation functions."""
option = 'domain'
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r'^(R|RR)(_(\d+))?$')
_re_complexfield = re.compile(r'^(C|CC)(_(\d+))?$')
_re_finitefield = re.compile(r'^(FF|GF)\((\d+)\)$')
_re_polynomial = re.compile(r'^(Z|ZZ|Q|QQ)\[(.+)\]$')
_re_fraction = re.compile(r'^(Z|ZZ|Q|QQ)\((.+)\)$')
_re_algebraic = re.compile(r'^(Q|QQ)\<(.+)\>$')
@classmethod
def preprocess(cls, option):
from .. import domains
if isinstance(option, domains.Domain):
return option
elif isinstance(option, str):
if option in ['Z', 'ZZ']:
return domains.ZZ
if option in ['Q', 'QQ']:
return domains.QQ
if option == 'EX':
return domains.EX
r = cls._re_realfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.RR
else:
return domains.RealField(int(prec))
r = cls._re_complexfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.CC
else:
return domains.ComplexField(int(prec))
r = cls._re_finitefield.match(option)
if r is not None:
return domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens)
else:
return domains.QQ.inject(*gens)
r = cls._re_fraction.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens).field
else:
return domains.QQ.inject(*gens).field
r = cls._re_algebraic.match(option)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, '
f'got {option}')
@classmethod
def postprocess(cls, options):
from .. import domains
from ..domains.compositedomain import CompositeDomain
if 'gens' in options and 'domain' in options and isinstance(options['domain'], CompositeDomain) and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError('ground domain and generators '
'interfere together')
if ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == domains.EX:
raise GeneratorsError('you have to provide generators because'
' EX domain was requested')
class Split(BooleanOption, metaclass=OptionType):
"""``split`` option to polynomial manipulation functions."""
option = 'split'
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption, metaclass=OptionType):
"""``gaussian`` option to polynomial manipulation functions."""
option = 'gaussian'
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = {I}
Extension.postprocess(options)
class Extension(Option, metaclass=OptionType):
"""``extension`` option to polynomial manipulation functions."""
option = 'extension'
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus']
@classmethod
def preprocess(cls, option):
if option == 1:
return bool(option)
elif option == 0:
return bool(option)
else:
if not hasattr(option, '__iter__'):
option = {option}
else:
if not option:
option = None
else:
option = set(option)
return option
@classmethod
def postprocess(cls, options):
from .. import domains
if 'extension' in options and options['extension'] not in (True, False):
options['domain'] = domains.QQ.algebraic_field(
*options['extension'])
class Modulus(Option, metaclass=OptionType):
"""``modulus`` option to polynomial manipulation functions."""
option = 'modulus'
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, option):
option = sympify(option)
if option.is_Integer and option > 0:
return int(option)
else:
raise OptionError(
f"'modulus' must a positive integer, got {option}")
@classmethod
def postprocess(cls, options):
from .. import domains
if 'modulus' in options:
modulus = options['modulus']
options['domain'] = domains.FF(modulus)
class Strict(BooleanOption, metaclass=OptionType):
"""``strict`` option to polynomial manipulation functions."""
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` flag to polynomial manipulation functions."""
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag, metaclass=OptionType):
"""``frac`` option to polynomial manipulation functions."""
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions."""
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag, metaclass=OptionType):
"""``polys`` flag to polynomial manipulation functions."""
option = 'polys'
class Include(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions."""
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag, metaclass=OptionType):
"""``all`` flag to polynomial manipulation functions."""
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions."""
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, option):
if isinstance(option, (Basic, int)):
return option
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag, metaclass=OptionType):
"""``symbols`` flag to polynomial manipulation functions."""
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, option):
if hasattr(option, '__iter__'):
return iter(option)
else:
raise OptionError('expected an iterator or '
f'iterable container, got {option}')
class Method(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions."""
option = 'method'
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return option.lower()
else:
raise OptionError(f'expected a string, got {option}')
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options."""
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args:
try:
if Options.__options__[arg].is_Flag and arg not in flags:
raise FlagError(f"'{arg}' flag is not allowed "
'in this context')
except KeyError as exc:
raise OptionError(f"'{arg}' is not a valid option") from exc
def set_defaults(options, **defaults):
"""Update options with default values."""
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order() | PypiClean |
/AtomPy-0.5.1.1.zip/AtomPy-0.5.1.1/atompy/__init__.py | print 'Initializing AtomPy...'
import DownloadAPI as API
import pandas
import os, sys
import matplotlib.pyplot as plt
import refs
import xlrd
from scipy import constants
#Global Refs class for element, ion, isotope data
Refs = refs.Refs()
#Wrapper functions
def element(Z):
return Refs.element(Z)
def elementaw(Z):
return Refs.elementaw(Z)
def elementryd(Z):
return Refs.elementryd(Z)
def ion(Z, N):
return Refs.ion(Z, N)
def ionip(Z, N):
return Refs.ionip(Z, N)
def isotope(Z, M):
return Refs.isotope(Z, M)
def isotopeaw(Z, M):
return Refs.isotopeaw(Z, M)
def isotopecomp(Z, M):
return Refs.isotopecomp(Z, M)
class IonAttribute():
def __init__(self):
#Title
self.title = None
#Data holds the pandas dataframe
self.data = None
#List of sources
self.sources = ''
class Ion:
def __init__(self, _Z, _N):
self.Z = _Z
self.N = _N
self.name = 'None'
self.levels = []
self.avalues = []
self.collisions = []
self.object = []
def Name(self):
return self.name
def Z(self):
return self.Z
def N(self):
return self.N
def E(self, index=0, sources=False):
if sources:
print self.levels[index].sources
else:
return self.levels[index].data
def A(self, index=0, sources=False):
if sources:
print self.avalues[index].sources
else:
return self.avalues[index].data
def U(self, index=0, sources=False):
if sources:
print self.collisions[index].sources
else:
return self.collisions[index].data
def O(self, index=0, sources=False):
if sources:
print self.object[index].sources
else:
return self.object[index].data
def generateName(self):
name = ''
if self.Z < 10:
name += '0' + str(self.Z)
else:
name += str(self.Z)
name += '_'
if self.N < 10:
name += '0' + str(self.N)
else:
name += str(self.N)
self.name = name
def __str__(self):
myString = ''
myString += 'Ion: Z = ' + self.name.split('_')[0] + ', N = ' + self.name.split('_')[0] + '\n'
#E Sheet Count
if len(self.levels) == 0:
myString += ' No E sheets found...\n'
else:
for num in range(len(self.levels)):
myString += ' E' + str(num) + ': ' + self.levels[num].title + '\n'
#A Sheet Count
if len(self.avalues) == 0:
myString += ' No A sheets found...\n'
else:
for num in range(len(self.avalues)):
myString += ' A' + str(num) + ': ' + self.avalues[num].title + '\n'
#U Sheet Count
if len(self.collisions) == 0:
myString += ' No U sheets found...\n'
else:
for num in range(len(self.collisions)):
myString += ' U' + str(num) + ': ' + self.collisions[num].title + '\n'
#O Sheet Count
if len(self.object) == 0:
myString += ' No O sheets found...\n'
else:
for num in range(len(self.object)):
myString += ' O' + str(num) + ': ' + self.object[num].title + '\n'
#Return
return myString
def getE(Z1, N1):
return getdata(Z1, N1).E()
def getA(Z1, N1):
return getdata(Z1, N1).A()
def getU(Z1, N1):
return getdata(Z1, N1).U()
def getO(Z1, N1):
return getdata(Z1, N1).O()
def listcontent():
API.listContent()
def getdata(Z, N):
#Downloads various atomic data files and stores
#them in Panda dataframes
#Takes: Single data set
#Returns: Single ion
#Make sure Z and N are INTS
Z = int(Z)
N = int(N)
myIon = Ion(Z, N)
#Generate our name
myIon.generateName()
#Build the filename
filename = myIon.name
#Get the file
file = API.getFile(filename)
#Error may have occurred, print error
if 'ERROR' in file:
print file
return None
#If no error, continue with ion appending
else:
for attribute in range(len(file['worksheets'])):
newAttribute = IonAttribute()
newAttribute.title = file['worksheets'][attribute]['title']
newAttribute.data = file['worksheets'][attribute]['data']
newAttribute.sources = file['worksheets'][attribute]['sources']
if 'E' in file['worksheets'][attribute]['type']:
myIon.levels.append(newAttribute)
if 'A' in file['worksheets'][attribute]['type']:
myIon.avalues.append(newAttribute)
if 'U' in file['worksheets'][attribute]['type']:
myIon.collisions.append(newAttribute)
if 'O' in file['worksheets'][attribute]['type']:
myIon.object.append(newAttribute)
#Return the ion
print myIon
return myIon
def EUnit(x,unit='cm-1'):
#Unit conversion function
if unit == 'Ryd':
return x/109737.31568539
elif unit == 'eV':
return x*1.239841930e-4
return x
def plot(df, xAxis, yAxis, scatter=True, line=True, color='blue'):
myMarker = 'o'
if scatter == False:
myMarker = ' '
myLine = '-'
if line == False:
myLine = ' '
#Graph data
plt.plot(df[xAxis],df[yAxis], marker=myMarker, linestyle=myLine, color=color)
plt.xlabel(xAxis)
plt.ylabel(yAxis)
plt.show()
def plotall(df, xAxis, yAxis, fileloc=None, scatter=True, line=True, color='blue', color2='red'):
myMarker = 'o'
if scatter == False:
myMarker = ' '
myLine = '-'
if line == False:
myLine = ' '
#Check to make sure the axis values are correct
if len(xAxis) < 1:
print 'XAxis not given...'
return None
if len(yAxis) < 1:
print 'YAxis not given...'
return None
if len(xAxis) > len(yAxis) or len(yAxis) > len(xAxis):
print 'Axis arrays don\'t match in length...'
return None
#Go through all of the possible indexes of the data
for x in range(len(df.index)):
#Create a dataframe to hold the data for this particular transition
myDF = df
#Index and build filename
filename = ''
for y in range(len(df.index[x])-1):
myDF = myDF.loc[df.index[x][y]]
filename += str(int(df.index[x][y])) + '.'
filename = filename[:-1]
#Now graph the data
for z in range(len(xAxis)):
plt.plot(myDF[xAxis[z]],myDF[yAxis[z]], marker=myMarker, linestyle=myLine, color=color)
plt.plot(myDF[xAxis[z]],myDF[yAxis[z]], marker=myMarker, linestyle=myLine, color=color2)
plt.xlabel(xAxis[z] + ' vs ' + xAxis[z])
plt.ylabel(yAxis[z] + ' vs ' + yAxis[z])
#Save the graph and close
if(fileloc != None):
plt.savefig(fileloc + filename + '.png')
plt.close()
#Dev tools for debugging purposes
def printexpanded(df):
#Get the current settings
before_height = pandas.get_option('display.height')
before_max_rows = pandas.get_option('display.max_rows')
#Set to the new settings to print the dataframe correctly
pandas.set_option('display.height', len(df)+1)
pandas.set_option('display.max_rows', len(df)+1)
#Print the dataframe
print df
#Set the settings back to the previous values
pandas.set_option('display.height', before_height)
pandas.set_option('display.max_rows', before_max_rows)
def printstats(df):
#Prints some statistical data of the dataframe provided
print df.describe()
def clear():
os.system('cls')
print 'AtomPy ready!' | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/form/DateTextBox.js | if(!dojo._hasResource["dojox.form.DateTextBox"]){
dojo._hasResource["dojox.form.DateTextBox"]=true;
dojo.provide("dojox.form.DateTextBox");
dojo.experimental("dojox.form.DateTextBox");
dojo.require("dojox.widget.Calendar");
dojo.require("dojox.widget.CalendarViews");
dojo.require("dijit.form._DateTimeTextBox");
dojo.declare("dojox.form.DateTextBox",dijit.form._DateTimeTextBox,{popupClass:"dojox.widget.Calendar",_selector:"date",_open:function(){
this.inherited(arguments);
dojo.style(this._picker.domNode.parentNode,"position","absolute");
}});
dojo.declare("dojox.form.DayTextBox",dojox.form.DateTextBox,{popupClass:"dojox.widget.DailyCalendar",parse:function(_1){
return _1;
},format:function(_2){
return _2.getDate?_2.getDate():_2;
},validator:function(_3){
var _4=Number(_3);
var _5=/(^-?\d\d*$)/.test(String(_3));
return _3==""||_3==null||(_5&&_4>=1&&_4<=31);
},_open:function(){
this.inherited(arguments);
this._picker.onValueSelected=dojo.hitch(this,function(_6){
this.focus();
setTimeout(dojo.hitch(this,"_close"),1);
dijit.form.TextBox.prototype._setValueAttr.call(this,String(_6.getDate()),true,String(_6.getDate()));
});
}});
dojo.declare("dojox.form.MonthTextBox",dojox.form.DateTextBox,{popupClass:"dojox.widget.MonthlyCalendar",selector:"date",postMixInProperties:function(){
this.inherited(arguments);
this.constraints.datePattern="MM";
},format:function(_7){
return Number(_7)+1;
},parse:function(_8,_9){
return Number(_8)-1;
},serialize:function(_a,_b){
return String(_a);
},validator:function(_c){
var _d=Number(_c);
var _e=/(^-?\d\d*$)/.test(String(_c));
return _c==""||_c==null||(_e&&_d>=1&&_d<=12);
},_open:function(){
this.inherited(arguments);
this._picker.onValueSelected=dojo.hitch(this,function(_f){
this.focus();
setTimeout(dojo.hitch(this,"_close"),1);
dijit.form.TextBox.prototype._setValueAttr.call(this,_f,true,_f);
});
}});
dojo.declare("dojox.form.YearTextBox",dojox.form.DateTextBox,{popupClass:"dojox.widget.YearlyCalendar",format:function(_10){
if(typeof _10=="string"){
return _10;
}else{
if(_10.getFullYear){
return _10.getFullYear();
}
}
return _10;
},validator:function(_11){
return _11==""||_11==null||/(^-?\d\d*$)/.test(String(_11));
},_open:function(){
this.inherited(arguments);
this._picker.onValueSelected=dojo.hitch(this,function(_12){
this.focus();
setTimeout(dojo.hitch(this,"_close"),1);
dijit.form.TextBox.prototype._setValueAttr.call(this,_12,true,_12);
});
},parse:function(_13,_14){
return _13||(this._isEmpty(_13)?null:undefined);
},filter:function(val){
if(val&&val.getFullYear){
return val.getFullYear().toString();
}
return this.inherited(arguments);
}});
} | PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20/lib/util/dom.js | * Aloha Editor is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.*
*
* Aloha Editor is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Ensure GENTICS Namespace
GENTICS = window.GENTICS || {};
GENTICS.Utils = GENTICS.Utils || {};
define(
['aloha/jquery', 'util/class', 'aloha/ecma5shims'],
function(jQuery, Class, $_) {
var
GENTICS = window.GENTICS,
// Class = window.Class,
// http://www.w3.org/TR/DOM-Level-3-Core/core.html#ID-1841493061
Node = {
'ELEMENT_NODE' : 1,
'ATTRIBUTE_NODE': 2,
'TEXT_NODE': 3,
'CDATA_SECTION_NODE': 4,
'ENTITY_REFERENCE_NODE': 5,
'ENTITY_NODE': 6,
'PROCESSING_INSTRUCTION_NODE': 7,
'COMMENT_NODE': 8,
'DOCUMENT_NODE': 9,
'DOCUMENT_TYPE_NODE': 10,
'DOCUMENT_FRAGMENT_NODE': 11,
'NOTATION_NODE': 12,
//The two nodes are disconnected. Order between disconnected nodes is always implementation-specific.
'DOCUMENT_POSITION_DISCONNECTED': 0x01,
//The second node precedes the reference node.
'DOCUMENT_POSITION_PRECEDING': 0x02,
//The node follows the reference node.
'DOCUMENT_POSITION_FOLLOWING': 0x04,
//The node contains the reference node. A node which contains is always preceding, too.
'DOCUMENT_POSITION_CONTAINS': 0x08,
//The node is contained by the reference node. A node which is contained is always following, too.
'DOCUMENT_POSITION_CONTAINED_BY': 0x10,
//The determination of preceding versus following is implementation-specific.
'DOCUMENT_POSITION_IMPLEMENTATION_SPECIFIC': 0x20
};
/**
* @namespace GENTICS.Utils
* @class Dom provides methods to get information about the DOM and to manipulate it
* @singleton
*/
var Dom = Class.extend({
/**
* Regex to find word characters.
*/
wordRegex: /[\u0041-\u005A\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u0525\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0621-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971\u0972\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D3D\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC\u0EDD\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8B\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u2094\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCB\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA65F\uA662-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B\uA78C\uA7FB-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA2D\uFA30-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]/,
/**
* Regex to find non-word characters.
*/
nonWordRegex: /[^\u0041-\u005A\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5\u03F7-\u0481\u048A-\u0525\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0621-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971\u0972\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D3D\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC\u0EDD\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8B\u1000-\u102A\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081\u108E\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC\u1CEE-\u1CF1\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u2094\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCB\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B\uA640-\uA65F\uA662-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788\uA78B\uA78C\uA7FB-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA2D\uFA30-\uFA6D\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]/,
/**
* Tags which can safely be merged
* @hide
*/
mergeableTags: ['b', 'code', 'del', 'em', 'i', 'ins', 'strong', 'sub', 'sup', '#text'],
/**
* Tags which do not mark word boundaries
* @hide
*/
nonWordBoundaryTags: ['a', 'b', 'code', 'del', 'em', 'i', 'ins', 'span', 'strong', 'sub', 'sup', '#text'],
/**
* Tags which are considered 'nonempty', even if they have no children (or not data)
* TODO: finish this list
* @hide
*/
nonEmptyTags: ['br'],
/**
* Tags which make up Flow Content or Phrasing Content, according to the HTML 5 specification,
* @see http://dev.w3.org/html5/spec/Overview.html#flow-content
* @see http://dev.w3.org/html5/spec/Overview.html#phrasing-content
* @hide
*/
tags: {
'flow' : [ 'a', 'abbr', 'address', 'area', 'article', 'aside', 'audio',
'b', 'bdi','bdo', 'blockquote', 'br', 'button', 'canvas', 'cite', 'code',
'command', 'datalist', 'del', 'details', 'dfn', 'div', 'dl', 'em',
'embed', 'fieldset', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'i', 'iframe', 'img',
'input', 'ins', 'kbd', 'keygen', 'label', 'map', 'mark', 'math',
'menu', 'meter', 'nav', 'noscript', 'object', 'ol', 'output', 'p',
'pre', 'progress', 'q', 'ruby', 's', 'samp', 'script', 'section',
'select', 'small', 'span', 'strong', 'style', 'sub', 'sup', 'svg',
'table', 'textarea', 'time', 'u', 'ul', 'var', 'video', 'wbr', '#text' ],
'phrasing' : [ 'a', 'abbr', 'area', 'audio', 'b', 'bdi', 'bdo', 'br', 'button',
'canvas', 'cite', 'code', 'command', 'datalist', 'del', 'dfn',
'em', 'embed', 'i', 'iframe', 'img', 'input', 'ins', 'kbd',
'keygen', 'label', 'map', 'mark', 'math', 'meter', 'noscript',
'object', 'output', 'progress', 'q', 'ruby', 'samp', 'script',
'select', 'small', 'span', 'strong', 'sub', 'sup', 'svg',
'textarea', 'time', 'u', 'var', 'video', 'wbr', '#text' ]
},
/**
* Possible children of tags, according to the HTML 5
* specification.
* See http://dev.w3.org/html5/spec/Overview.html#elements-1
* Moved to http://www.whatwg.org/specs/web-apps/current-work/#elements-1
* @hide
*/
children: {
'a' : 'phrasing', // transparent
'abbr' : 'phrasing',
'address' : 'flow',
'area' : 'empty',
'article' : 'flow',
'aside' : 'flow',
'audio' : 'source', // transparent
'b' : 'phrasing',
'base' : 'empty',
'bdo' : 'phrasing',
'blockquote' : 'phrasing',
'body' : 'flow',
'br' : 'empty',
'button' : 'phrasing',
'canvas' : 'phrasing', // transparent
'caption' : 'flow',
'cite' : 'phrasing',
'code' : 'phrasing',
'col' : 'empty',
'colgroup' : 'col',
'command' : 'empty',
'datalist' : ['phrasing', 'option'],
'dd' : 'flow',
'del' : 'phrasing',
'div' : 'flow',
'details' : ['summary', 'flow'],
'dfn' : 'flow',
'dl' : ['dt','dd'],
'dt' : 'phrasing', // varies
'em' : 'phrasing',
'embed' : 'empty',
'fieldset' : ['legend', 'flow'],
'figcaption': 'flow',
'figure' : ['figcaption', 'flow'],
'footer' : 'flow',
'form' : 'flow',
'h1' : 'phrasing',
'h2' : 'phrasing',
'h3' : 'phrasing',
'h4' : 'phrasing',
'h5' : 'phrasing',
'h6' : 'phrasing',
//head
'header' : 'flow',
'hgroup' : ['h1','h2','h3','h4','h5','h6'],
'hr' : 'empty',
//html :)
'i' : 'phrasing',
'iframe' : '#text',
'img' : 'empty',
'input' : 'empty',
'ins' : 'phrasing', // transparent
'kbd' : 'phrasing',
'keygen' : 'empty',
'label' : 'phrasing',
'legend' : 'phrasing',
'li' : 'flow',
'link' : 'empty',
'map' : 'area', // transparent
'mark' : 'phrasing',
'menu' : ['li', 'flow'],
'meta' : 'empty',
'meter' : 'phrasing',
'nav' : 'flow',
'noscript' : 'phrasing', // varies
'object' : 'param', // transparent
'ol' : 'li',
'optgroup' : 'option',
'option' : '#text',
'output' : 'phrasing',
'p' : 'phrasing',
'param' : 'empty',
'pre' : 'phrasing',
'progress' : 'phrasing',
'q' : 'phrasing',
'rp' : 'phrasing',
'rt' : 'phrasing',
'ruby' : ['phrasing', 'rt', 'rp'],
's' : 'phrasing',
'samp' : 'pharsing',
'script' : '#script', //script
'section' : 'flow',
'select' : ['option', 'optgroup'],
'small' : 'phrasing',
'source' : 'empty',
'span' : 'phrasing',
'strong' : 'phrasing',
'style' : 'phrasing', // varies
'sub' : 'phrasing',
'summary' : 'phrasing',
'sup' : 'phrasing',
'table' : ['caption', 'colgroup', 'thead', 'tbody', 'tfoot', 'tr'],
'tbody' : 'tr',
'td' : 'flow',
'textarea' : '#text',
'tfoot' : 'tr',
'th' : 'phrasing',
'thead' : 'tr',
'time' : 'phrasing',
'title' : '#text',
'tr' : ['th', 'td'],
'track' : 'empty',
'u' : 'phrasing',
'ul' : 'li',
'var' : 'phrasing',
'video' : 'source', // transparent
'wbr' : 'empty'
},
/**
* List of nodenames of blocklevel elements
* TODO: finish this list
* @hide
*/
blockLevelElements: ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'blockquote', 'div', 'pre'],
/**
* List of nodenames of list elements
* @hide
*/
listElements: ['li', 'ol', 'ul'],
/**
* Splits a DOM element at the given position up until the limiting object(s), so that it is valid HTML again afterwards.
* @param {RangeObject} range Range object that indicates the position of the splitting.
* This range will be updated, so that it represents the same range as before the split.
* @param {jQuery} limit Limiting node(s) for the split.
* The limiting node will not be included in the split itself.
* If no limiting object is set, the document body will be the limiting object.
* @param {boolean} atEnd If set to true, the DOM will be splitted at the end of the range otherwise at the start.
* @return {object} jQuery object containing the two root DOM objects of the split, true if the DOM did not need to be split or false if the DOM could not be split
* @method
*/
split: function (range, limit, atEnd) {
var
splitElement = jQuery(range.startContainer),
splitPosition = range.startOffset,
updateRange, path, parents,
newDom, insertElement, secondPart,
i, pathLength, element, jqelement, children, newElement,
next, prev, offset;
if (atEnd) {
splitElement = jQuery(range.endContainer);
splitPosition = range.endOffset;
}
if (limit.length < 1) {
limit = jQuery(document.body);
}
// we may have to update the range if it is not collapsed and we are splitting at the start
updateRange = (!range.isCollapsed() && !atEnd);
// find the path up to the highest object that will be splitted
parents = splitElement.parents().get();
parents.unshift(splitElement.get(0));
jQuery.each(parents, function(index, element) {
var isLimit = limit.filter(
function(){
return this == element;
}).length;
if (isLimit) {
if (index > 0) {
path = parents.slice(0, index);
}
return false;
}
});
// nothing found to split -> return here
if (! path) {
return true;
}
path = path.reverse();
// iterate over the path, create new dom nodes for every element and move
// the contents right of the split to the new element
for( i=0, pathLength = path.length; i < pathLength; ++i) {
element = path[i];
if (i === pathLength - 1) {
// last element in the path -> we have to split it
// split the last part into two parts
if (element.nodeType === 3) {
// text node
secondPart = document.createTextNode(element.data.substring(splitPosition, element.data.length));
element.data = element.data.substring(0, splitPosition);
} else {
// other nodes
jqelement = jQuery(element);
children = jqelement.contents();
newElement = jqelement.clone(false).empty();
secondPart = newElement.append(children.slice(splitPosition, children.length)).get(0);
}
// update the range if necessary
if (updateRange && range.endContainer === element) {
range.endContainer = secondPart;
range.endOffset -= splitPosition;
range.clearCaches();
}
// add the second part
if (insertElement) {
insertElement.prepend(secondPart);
} else {
jQuery(element).after(secondPart);
}
} else {
// create the new element of the same type and prepend it to the previously created element
newElement = jQuery(element).clone(false).empty();
if (!newDom) {
newDom = newElement;
} else {
insertElement.prepend(newElement);
}
insertElement = newElement;
// move all contents right of the split to the new element
while ( true ) {
next = path[i+1].nextSibling;
if ( !next ) { break; }
insertElement.append(next);
}
// update the range if necessary
if (updateRange && range.endContainer === element) {
range.endContainer = newElement.get(0);
prev = path[i+1];
offset = 0;
while ( true ) {
prev = prev.previousSibling;
if ( !prev ) { break; }
offset++;
}
range.endOffset -= offset;
range.clearCaches();
}
}
}
// append the new dom
jQuery(path[0]).after(newDom);
return jQuery([path[0], newDom ? newDom.get(0) : secondPart]);
},
/**
* Check whether the HTML 5 specification allows direct nesting of the given DOM
* objects.
* @param {object} outerDOMObject
* outer (nesting) DOM Object
* @param {object} innerDOMObject
* inner (nested) DOM Object
* @return {boolean} true when the nesting is allowed, false if not
* @method
*/
allowsNesting: function (outerDOMObject, innerDOMObject) {
if (!outerDOMObject || !outerDOMObject.nodeName || !innerDOMObject
|| !innerDOMObject.nodeName) {
return false;
}
var outerNodeName = outerDOMObject.nodeName.toLowerCase(),
innerNodeName = innerDOMObject.nodeName.toLowerCase();
if (!this.children[outerNodeName]) {
return false;
}
// check whether the nesting is configured by node names (like for table)
if (this.children[outerNodeName] == innerNodeName) {
return true;
}
if (jQuery.isArray(this.children[outerNodeName])
&& jQuery.inArray(innerNodeName, this.children[outerNodeName]) >= 0) {
return true;
}
if (jQuery.isArray(this.tags[this.children[outerNodeName]])
&& jQuery.inArray(innerNodeName,
this.tags[this.children[outerNodeName]]) >= 0) {
return true;
}
return false;
},
/**
* Apply the given markup additively to the given range. The given rangeObject will be modified if necessary
* @param {GENTICS.Utils.RangeObject} rangeObject range to which the markup shall be added
* @param {jQuery} markup markup to be applied as jQuery object
* @param {boolean} allownesting true when nesting of the added markup is allowed, false if not (default: false)
* @method
*/
addMarkup: function (rangeObject, markup, nesting) {
// split partially contained text nodes at the start and end of the range
if (rangeObject.startContainer.nodeType === 3 && rangeObject.startOffset > 0
&& rangeObject.startOffset < rangeObject.startContainer.data.length) {
this.split(rangeObject, jQuery(rangeObject.startContainer).parent(),
false);
}
if (rangeObject.endContainer.nodeType === 3 && rangeObject.endOffset > 0
&& rangeObject.endOffset < rangeObject.endContainer.data.length) {
this.split(rangeObject, jQuery(rangeObject.endContainer).parent(),
true);
}
// get the range tree
var rangeTree = rangeObject.getRangeTree();
this.recursiveAddMarkup(rangeTree, markup, rangeObject, nesting);
// cleanup DOM
this.doCleanup({'merge' : true, 'removeempty' : true}, rangeObject);
},
/**
* Recursive helper method to add the given markup to the range
* @param rangeTree rangetree at the current level
* @param markup markup to be applied
* @param rangeObject range object, which eventually is updated
* @param nesting true when nesting of the added markup is allowed, false if not
* @hide
*/
recursiveAddMarkup: function (rangeTree, markup, rangeObject, nesting) {
var i, innerRange, rangeLength;
// iterate through all rangetree objects of that level
for ( i = 0, rangeLength = rangeTree.length; i < rangeLength; ++i) {
// check whether the rangetree object is fully contained and the markup may be wrapped around the object
if (rangeTree[i].type == 'full' && this.allowsNesting(markup.get(0), rangeTree[i].domobj)) {
// we wrap the object, when
// 1. nesting of markup is allowed or the node is not of the markup to be added
// 2. the node an element node or a non-empty text node
if ((nesting || rangeTree[i].domobj.nodeName != markup.get(0).nodeName)
&& (rangeTree[i].domobj.nodeType !== 3 || jQuery
.trim(rangeTree[i].domobj.data).length !== 0)) {
// wrap the object
jQuery(rangeTree[i].domobj).wrap(markup);
// TODO eventually update the range (if it changed)
// when nesting is not allowed, we remove the markup from the inner element
if (!nesting && rangeTree[i].domobj.nodeType !== 3) {
innerRange = new GENTICS.Utils.RangeObject();
innerRange.startContainer = innerRange.endContainer = rangeTree[i].domobj.parentNode;
innerRange.startOffset = 0;
innerRange.endOffset = innerRange.endContainer.childNodes.length;
this.removeMarkup(innerRange, markup, jQuery(rangeTree[i].domobj.parentNode));
}
}
} else {
// TODO check whether the object may be replaced by the given markup
if (false) {
// TODO replace
} else {
// recurse into the children (if any), but not if nesting is not
// allowed and the object is of the markup to be added
if ((nesting || (rangeTree[i].domobj && rangeTree[i].domobj.nodeName !== markup.get(0).nodeName))
&& rangeTree[i].children && rangeTree[i].children.length > 0) {
this.recursiveAddMarkup(rangeTree[i].children, markup);
}
}
}
}
},
/**
* Find the highest occurrence of a node with given nodename within the parents
* of the start. When limit objects are given, the search stops there.
* The limiting object is of the found type, it won't be considered
* @param {DOMObject} start start object
* @param {String} nodeName name of the node to search for (case-insensitive)
* @param {jQuery} limit Limiting node(s) as jQuery object (if none given, the search will stop when there are no more parents)
* @return {DOMObject} the found DOM object or undefined
* @method
*/
findHighestElement: function (start, nodeName, limit) {
nodeName = nodeName.toLowerCase();
// this will be the highest found markup object (up to a limit object)
var highestObject,
// blah
testObject = start,
// helper function to stop when we reach a limit object
isLimit = limit ? function () {
return limit.filter(
function() {
return testObject == this;
}
).length;
} : function () {
return false;
};
// now get the highest parent that has the given markup (until we reached
// one of the limit objects or there are no more parent nodes)
while (!isLimit() && testObject) {
if (testObject.nodeName.toLowerCase() === nodeName) {
highestObject = testObject;
}
testObject = testObject.parentNode;
}
return highestObject;
},
/**
* Remove the given markup from the given range. The given rangeObject will be modified if necessary
* TODO: add parameter deep/shallow
* @param {GENTICS.Utils.RangeObject} rangeObject range from which the markup shall be removed
* @param {jQuery} markup markup to be removed as jQuery object
* @param {jQuery} limit Limiting node(s) as jQuery object
* @method
*/
removeMarkup: function (rangeObject, markup, limit) {
var nodeName = markup.get(0).nodeName,
startSplitLimit = this.findHighestElement(rangeObject.startContainer, nodeName, limit),
endSplitLimit = this.findHighestElement(rangeObject.endContainer, nodeName, limit),
didSplit = false,
highestObject, root, rangeTree;
if (startSplitLimit && rangeObject.startOffset > 0) {
// when the start is in the start of its container, we don't split
this.split(rangeObject, jQuery(startSplitLimit).parent(), false);
didSplit = true;
}
if (endSplitLimit) {
// when the end is in the end of its container, we don't split
if (rangeObject.endContainer.nodeType === 3 && rangeObject.endOffset < rangeObject.endContainer.data.length) {
this.split(rangeObject, jQuery(endSplitLimit).parent(), true);
didSplit = true;
}
if (rangeObject.endContainer.nodeType === 1 && rangeObject.endOffset < rangeObject.childNodes.length) {
this.split(rangeObject, jQuery(endSplitLimit).parent(), true);
didSplit = true;
}
}
// when we split the DOM, we maybe need to correct the range
if (didSplit) {
rangeObject.correctRange();
}
// find the highest occurrence of the markup
highestObject = this.findHighestElement(rangeObject.getCommonAncestorContainer(), nodeName, limit);
root = highestObject ? highestObject.parentNode : rangeObject.getCommonAncestorContainer();
if (root) {
// construct the range tree
rangeTree = rangeObject.getRangeTree(root);
// remove the markup from the range tree
this.recursiveRemoveMarkup(rangeTree, markup);
// cleanup DOM
this.doCleanup({'merge' : true, 'removeempty' : true}, rangeObject, root);
}
},
/**
* TODO: pass the range itself and eventually update it if necessary
* Recursive helper method to remove the given markup from the range
* @param rangeTree rangetree at the current level
* @param markup markup to be applied
* @hide
*/
recursiveRemoveMarkup: function (rangeTree, markup) {
var i, rangeLength, content;
// iterate over the rangetree objects of this level
for (i = 0, rangeLength = rangeTree.length; i < rangeLength; ++i) {
// check whether the object is the markup to be removed and is fully into the range
if (rangeTree[i].type == 'full' && rangeTree[i].domobj.nodeName == markup.get(0).nodeName) {
// found the markup, so remove it
content = jQuery(rangeTree[i].domobj).contents();
if (content.length > 0) {
// when the object has children, we unwrap them
content.first().unwrap();
} else {
// obj has no children, so just remove it
jQuery(rangeTree[i].domobj).remove();
}
}
// if the object has children, we do the recursion now
if (rangeTree[i].children) {
this.recursiveRemoveMarkup(rangeTree[i].children, markup);
}
}
},
/**
* Cleanup the DOM, starting with the given startobject (or the common ancestor container of the given range)
* ATTENTION: If range is a selection you need to update the selection after doCleanup
* Cleanup modes (given as properties in 'cleanup'):
* <pre>
* - merge: merges multiple successive nodes of same type, if this is allowed, starting at the children of the given node (defaults to false)
* - removeempty: removes empty element nodes (defaults to false)
* </pre>
* Example for calling this method:<br/>
* <code>GENTICS.Utils.Dom.doCleanup({merge:true,removeempty:false}, range)</code>
* @param {object} cleanup type of cleanup to be done
* @param {GENTICS.Utils.RangeObject} rangeObject range which is eventually updated
* @param {DOMObject} start start object, if not given, the commonancestorcontainer is used as startobject insted
* @return {boolean} true when the range (startContainer/startOffset/endContainer/endOffset) was modified, false if not
* @method
*/
doCleanup: function(cleanup, rangeObject, start) {
var that = this, prevNode, modifiedRange, startObject, startOffset, endOffset;
if (typeof cleanup === 'undefined') {
cleanup = {};
}
if (typeof cleanup.merge === 'undefined') {
cleanup.merge = false;
}
if (typeof cleanup.removeempty === 'undefined') {
cleanup.removeempty = false;
}
if (typeof start === 'undefined' && rangeObject) {
start = rangeObject.getCommonAncestorContainer();
}
// remember the previous node here (successive nodes of same type will be merged into this)
prevNode = false;
// check whether the range needed to be modified during merging
modifiedRange = false;
// get the start object
startObject = jQuery(start);
startOffset = rangeObject.startOffset;
endOffset = rangeObject.endOffset;
// iterate through all sub nodes
startObject.contents().each(function() {
var index;
// Try to read the nodeType property and return if we do not have permission
// ie.: frame document to an external URL
var nodeType;
try {
nodeType = this.nodeType;
index = that.getIndexInParent(this);
}
catch (e) {
return;
}
// decide further actions by node type
switch(nodeType) {
// found a non-text node
case 1:
if (prevNode && prevNode.nodeName == this.nodeName) {
// found a successive node of same type
// now we check whether the selection starts or ends in the mother node after the current node
if (rangeObject.startContainer === startObject && startOffset > index) {
// there will be one less object, so reduce the startOffset by one
rangeObject.startOffset -= 1;
// set the flag for range modification
modifiedRange = true;
}
if (rangeObject.endContainer === startObject && endOffset > index) {
// there will be one less object, so reduce the endOffset by one
rangeObject.endOffset -= 1;
// set the flag for range modification
modifiedRange = true;
}
// merge the contents of this node into the previous one
jQuery(prevNode).append(jQuery(this).contents());
// after merging, we eventually need to cleanup the prevNode again
modifiedRange |= that.doCleanup(cleanup, rangeObject, prevNode);
// remove this node
jQuery(this).remove();
} else {
// do the recursion step here
modifiedRange |= that.doCleanup(cleanup, rangeObject, this);
// eventually remove empty elements
var removed = false;
if (cleanup.removeempty) {
if (GENTICS.Utils.Dom.isBlockLevelElement(this) && this.childNodes.length === 0) {
// jQuery(this).remove();
removed = true;
}
if (jQuery.inArray(this.nodeName.toLowerCase(), that.mergeableTags) >= 0
&& jQuery(this).text().length === 0 && this.childNodes.length === 0) {
// jQuery(this).remove();
removed = true;
}
}
// when the current node was not removed, we eventually store it as previous (mergeable) tag
if (!removed) {
if (jQuery.inArray(this.nodeName.toLowerCase(), that.mergeableTags) >= 0) {
prevNode = this;
} else {
prevNode = false;
}
} else {
// now we check whether the selection starts or ends in the mother node of this
if (rangeObject.startContainer === this.parentNode && startOffset > index) {
// there will be one less object, so reduce the startOffset by one
rangeObject.startOffset = rangeObject.startOffset - 1;
// set the flag for range modification
modifiedRange = true;
}
if (rangeObject.endContainer === this.parentNode && endOffset > index) {
// there will be one less object, so reduce the endOffset by one
rangeObject.endOffset = rangeObject.endOffset - 1;
// set the flag for range modification
modifiedRange = true;
}
// remove this text node
jQuery(this).remove();
}
}
break;
// found a text node
case 3:
// found a text node
if (prevNode && prevNode.nodeType === 3 && cleanup.merge) {
// the current text node will be merged into the last one, so
// check whether the selection starts or ends in the current
// text node
if (rangeObject.startContainer === this) {
// selection starts in the current text node
// update the start container to the last node
rangeObject.startContainer = prevNode;
// update the start offset
rangeObject.startOffset += prevNode.nodeValue.length;
// set the flag for range modification
modifiedRange = true;
} else if (rangeObject.startContainer === prevNode.parentNode
&& rangeObject.startOffset === that.getIndexInParent(prevNode) + 1) {
// selection starts right between the previous and current text nodes (which will be merged)
// update the start container to the previous node
rangeObject.startContainer = prevNode;
// set the start offset
rangeObject.startOffset = prevNode.nodeValue.length;
// set the flag for range modification
modifiedRange = true;
}
if (rangeObject.endContainer === this) {
// selection ends in the current text node
// update the end container to be the last node
rangeObject.endContainer = prevNode;
// update the end offset
rangeObject.endOffset += prevNode.nodeValue.length;
// set the flag for range modification
modifiedRange = true;
} else if (rangeObject.endContainer === prevNode.parentNode
&& rangeObject.endOffset === that.getIndexInParent(prevNode) + 1) {
// selection ends right between the previous and current text nodes (which will be merged)
// update the end container to the previous node
rangeObject.endContainer = prevNode;
// set the end offset
rangeObject.endOffset = prevNode.nodeValue.length;
// set the flag for range modification
modifiedRange = true;
}
// now append the contents of the current text node into the previous
prevNode.data += this.data;
// remove empty text nodes
} else if ( this.nodeValue === '' && cleanup.removeempty ) {
// do nothing here.
// remember it as the last text node if not empty
} else if ( !(this.nodeValue === '' && cleanup.removeempty) ) {
prevNode = this;
// we are finish here don't delete this node
break;
}
// now we check whether the selection starts or ends in the mother node of this
if (rangeObject.startContainer === this.parentNode && rangeObject.startOffset > index) {
// there will be one less object, so reduce the startOffset by one
rangeObject.startOffset = rangeObject.startOffset - 1;
// set the flag for range modification
modifiedRange = true;
}
if (rangeObject.endContainer === this.parentNode && rangeObject.endOffset > index) {
// there will be one less object, so reduce the endOffset by one
rangeObject.endOffset = rangeObject.endOffset - 1;
// set the flag for range modification
modifiedRange = true;
}
// remove this text node
jQuery(this).remove();
break;
}
});
// eventually remove the startnode itself
// if (cleanup.removeempty
// && GENTICS.Utils.Dom.isBlockLevelElement(start)
// && (!start.childNodes || start.childNodes.length === 0)) {
// if (rangeObject.startContainer == start) {
// rangeObject.startContainer = start.parentNode;
// rangeObject.startOffset = GENTICS.Utils.Dom.getIndexInParent(start);
// }
// if (rangeObject.endContainer == start) {
// rangeObject.endContainer = start.parentNode;
// rangeObject.endOffset = GENTICS.Utils.Dom.getIndexInParent(start);
// }
// startObject.remove();
// modifiedRange = true;
// }
if (modifiedRange) {
rangeObject.clearCaches();
}
return modifiedRange;
},
/**
* Get the index of the given node within its parent node
* @param {DOMObject} node node to check
* @return {Integer} index in the parent node or false if no node given or node has no parent
* @method
*/
getIndexInParent: function (node) {
if (!node) {
return false;
}
var
index = 0,
check = node.previousSibling;
while(check) {
index++;
check = check.previousSibling;
}
return index;
},
/**
* Check whether the given node is a blocklevel element
* @param {DOMObject} node node to check
* @return {boolean} true if yes, false if not (or null)
* @method
*/
isBlockLevelElement: function (node) {
if (!node) {
return false;
}
if (node.nodeType === 1 && jQuery.inArray(node.nodeName.toLowerCase(), this.blockLevelElements) >= 0) {
return true;
} else {
return false;
}
},
/**
* Check whether the given node is a linebreak element
* @param {DOMObject} node node to check
* @return {boolean} true for linebreak elements, false for everything else
* @method
*/
isLineBreakElement: function (node) {
if (!node) {
return false;
}
return node.nodeType === 1 && node.nodeName.toLowerCase() == 'br';
},
/**
* Check whether the given node is a list element
* @param {DOMObject} node node to check
* @return {boolean} true for list elements (li, ul, ol), false for everything else
* @method
*/
isListElement: function (node) {
if (!node) {
return false;
}
return node.nodeType === 1 && jQuery.inArray(node.nodeName.toLowerCase(), this.listElements) >= 0;
},
/**
* This method checks, whether the passed dom object is a dom object, that would
* be split in cases of pressing enter. This currently is true for paragraphs
* and headings
* @param {DOMObject} el
* dom object to check
* @return {boolean} true for split objects, false for other
* @method
*/
isSplitObject: function(el) {
if (el.nodeType === 1){
switch(el.nodeName.toLowerCase()) {
case 'p':
case 'h1':
case 'h2':
case 'h3':
case 'h4':
case 'h5':
case 'h6':
case 'li':
return true;
}
}
return false;
},
/**
* Starting with the given position (between nodes), search in the given direction to an adjacent notempty text node
* @param {DOMObject} parent parent node containing the position
* @param {Integer} index index of the position within the parent node
* @param {boolean} searchleft true when search direction is 'left' (default), false for 'right'
* @param {object} stopat define at which types of element we shall stop, may contain the following properties
* <pre>
* - blocklevel (default: true)
* - list (default: true)
* - linebreak (default: true)
* </pre>
* @return {DOMObject} the found text node or false if none found
* @method
*/
searchAdjacentTextNode: function (parent, index, searchleft, stopat) {
if (!parent || parent.nodeType !== 1 || index < 0 || index > parent.childNodes.length) {
return false;
}
if (typeof stopat === 'undefined') {
stopat = {'blocklevel' : true, 'list' : true, 'linebreak' : true};
}
if (typeof stopat.blocklevel === 'undefined') {
stopat.blocklevel = true;
}
if (typeof stopat.list === 'undefined') {
stopat.list = true;
}
if (typeof stopat.linebreak === 'undefined') {
stopat.linebreak = true;
}
if (typeof searchleft === 'undefined') {
searchleft = true;
}
var
nextNode,
currentParent = parent;
// start at the node left/right of the given position
if (searchleft && index > 0) {
nextNode = parent.childNodes[index - 1];
}
if (!searchleft && index < parent.childNodes.length) {
nextNode = parent.childNodes[index];
}
//currentParent is not a number therefore it is sufficient to directly test for it with while(currentParent)
//otherwise there would be an error if the object is null
while (currentParent) {
//while (typeof currentParent !== 'undefined') {
if (!nextNode) {
// no next node found, check whether the parent is a blocklevel element
if (stopat.blocklevel && this.isBlockLevelElement(currentParent)) {
// do not leave block level elements
return false;
} else if (stopat.list && this.isListElement(currentParent)) {
// do not leave list elements
return false;
} else {
// continue with the parent
nextNode = searchleft ? currentParent.previousSibling : currentParent.nextSibling;
currentParent = currentParent.parentNode;
}
} else if (nextNode.nodeType === 3 && jQuery.trim(nextNode.data).length > 0) {
// we are lucky and found a notempty text node
return nextNode;
} else if (stopat.blocklevel && this.isBlockLevelElement(nextNode)) {
// we found a blocklevel element, stop here
return false;
} else if (stopat.linebreak && this.isLineBreakElement(nextNode)) {
// we found a linebreak, stop here
return false;
} else if (stopat.list && this.isListElement(nextNode)) {
// we found a linebreak, stop here
return false;
} else if (nextNode.nodeType === 3) {
// we found an empty text node, so step to the next
nextNode = searchleft ? nextNode.previousSibling : nextNode.nextSibling;
} else {
// we found a non-blocklevel element, step into
currentParent = nextNode;
nextNode = searchleft ? nextNode.lastChild : nextNode.firstChild;
}
}
},
/**
* Insert the given DOM Object into the start/end of the given range. The method
* will find the appropriate place in the DOM tree for inserting the given
* object, and will eventually split elements in between. The given range will
* be updated if necessary. The updated range will NOT embrace the inserted
* object, which means that the object is actually inserted before or after the
* given range (depending on the atEnd parameter)
*
* @param {jQuery}
* object object to insert into the DOM
* @param {GENTICS.Utils.RangeObject}
* range range where to insert the object (at start or end)
* @param {jQuery}
* limit limiting object(s) of the DOM modification
* @param {boolean}
* atEnd true when the object shall be inserted at the end, false for
* insertion at the start (default)
* @param {boolean}
* true when the insertion shall be done, even if inserting the element
* would not be allowed, false to deny inserting unallowed elements (default)
* @return true if the object could be inserted, false if not.
* @method
*/
insertIntoDOM: function (object, range, limit, atEnd, force) {
// first find the appropriate place to insert the given object
var parentElements = range.getContainerParents(limit, atEnd),
that = this,
newParent,
container, offset, splitParts, contents;
if (!limit) {
limit = jQuery(document.body);
}
// if no parent elements exist (up to the limit), the new parent will be the
// limiter itself
if (parentElements.length === 0) {
newParent = limit.get(0);
} else {
jQuery.each(parentElements, function (index, parent) {
if (that.allowsNesting(parent, object.get(0))) {
newParent = parent;
return false;
}
});
}
if (typeof newParent === 'undefined' && limit.length > 0) {
// found no possible new parent, so split up to the limit object
newParent = limit.get(0);
}
// check whether it is allowed to insert the element at all
if (!this.allowsNesting(newParent, object.get(0)) && !force) {
return false;
}
if (typeof newParent !== 'undefined') {
// we found a possible new parent, so we split the DOM up to the new parent
splitParts = this.split(range, jQuery(newParent), atEnd);
if (splitParts === true) {
// DOM was not split (there was no need to split it), insert the new object anyway
container = range.startContainer;
offset = range.startOffset;
if (atEnd) {
container = range.endContainer;
offset = range.endOffset;
}
if (offset === 0) {
// insert right before the first element in the container
contents = jQuery(container).contents();
if (contents.length > 0) {
contents.eq(0).before(object);
} else {
jQuery(container).append(object);
}
return true;
} else {
// insert right after the element at offset-1
jQuery(container).contents().eq(offset-1).after(object);
return true;
}
} else if (splitParts) {
// if the DOM could be split, we insert the new object in between the split parts
splitParts.eq(0).after(object);
return true;
} else {
// could not split, so could not insert
return false;
}
} else {
// found no possible new parent, so we shall not insert
return false;
}
},
/**
* Remove the given DOM object from the DOM and modify the given range to reflect the user expected range after the object was removed
* TODO: finish this
* @param {DOMObject} object DOM object to remove
* @param {GENTICS.Utils.RangeObject} range range which eventually be modified
* @param {boolean} preserveContent true if the contents of the removed DOM object shall be preserved, false if not (default: false)
* @return true if the DOM object could be removed, false if not
* @hide
*/
removeFromDOM: function (object, range, preserveContent) {
if (preserveContent) {
// check whether the range will need modification
var indexInParent = this.getIndexInParent(object),
numChildren = jQuery(object).contents().length,
parent = object.parentNode;
if (range.startContainer == parent && range.startOffset > indexInParent) {
range.startOffset += numChildren - 1;
} else if (range.startContainer == object) {
range.startContainer = parent;
range.startOffset = indexInParent + range.startOffset;
}
if (range.endContainer == parent && range.endOffset > indexInParent) {
range.endOffset += numChildren - 1;
} else if (range.endContainer == object) {
range.endContainer = parent;
range.endOffset = indexInParent + range.endOffset;
}
// we simply unwrap the children of the object
jQuery(object).contents().unwrap();
// optionally do cleanup
this.doCleanup({'merge' : true}, range, parent);
} else {
// TODO
}
},
/**
* Remove the content defined by the given range from the DOM. Update the given
* range object to be a collapsed selection at the place of the previous
* selection.
* @param rangeObject range object
* @return true if the range could be removed, false if not
*/
removeRange: function (rangeObject) {
if (!rangeObject) {
// no range given
return false;
}
if (rangeObject.isCollapsed()) {
// the range is collapsed, nothing to delete
return false;
}
// split partially contained text nodes at the start and end of the range
if (rangeObject.startContainer.nodeType == 3 && rangeObject.startOffset > 0
&& rangeObject.startOffset < rangeObject.startContainer.data.length) {
this.split(rangeObject, jQuery(rangeObject.startContainer).parent(),
false);
}
if (rangeObject.endContainer.nodeType == 3 && rangeObject.endOffset > 0
&& rangeObject.endOffset < rangeObject.endContainer.data.length) {
this.split(rangeObject, jQuery(rangeObject.endContainer).parent(),
true);
}
// construct the range tree
var rangeTree = rangeObject.getRangeTree();
// collapse the range
rangeObject.endContainer = rangeObject.startContainer;
rangeObject.endOffset = rangeObject.startOffset;
// remove the markup from the range tree
this.recursiveRemoveRange(rangeTree, rangeObject);
// do some cleanup
this.doCleanup({'merge' : true}, rangeObject);
// this.doCleanup({'merge' : true, 'removeempty' : true}, rangeObject);
// clear the caches of the range object
rangeObject.clearCaches();
},
recursiveRemoveRange: function (rangeTree, rangeObject) {
// iterate over the rangetree objects of this level
for (var i = 0; i < rangeTree.length; ++i) {
// check for nodes fully in the range
if (rangeTree[i].type == 'full') {
// if the domobj is the startcontainer, or the startcontainer is inside the domobj, we need to update the rangeObject
if (jQuery(rangeObject.startContainer).parents().andSelf().filter(rangeTree[i].domobj).length > 0) {
rangeObject.startContainer = rangeObject.endContainer = rangeTree[i].domobj.parentNode;
rangeObject.startOffset = rangeObject.endOffset = this.getIndexInParent(rangeTree[i].domobj);
}
// remove the object from the DOM
jQuery(rangeTree[i].domobj).remove();
} else if (rangeTree[i].type == 'partial' && rangeTree[i].children) {
// node partially selected and has children, so do recursion
this.recursiveRemoveRange(rangeTree[i].children, rangeObject);
}
}
},
/**
* Extend the given range to have start and end at the nearest word boundaries to the left (start) and right (end)
* @param {GENTICS.Utils.RangeObject} range range to be extended
* @param {boolean} fromBoundaries true if extending will also be done, if one or both ends of the range already are at a word boundary, false if not, default: false
* @method
*/
extendToWord: function (range, fromBoundaries) {
// search the word boundaries to the left and right
var leftBoundary = this.searchWordBoundary(range.startContainer, range.startOffset, true),
rightBoundary = this.searchWordBoundary(range.endContainer, range.endOffset, false);
// check whether we must not extend the range from word boundaries
if (!fromBoundaries) {
// we only extend the range if both ends would be different
if (range.startContainer == leftBoundary.container && range.startOffset == leftBoundary.offset) {
return;
}
if (range.endContainer == rightBoundary.container && range.endOffset == rightBoundary.offset) {
return;
}
}
// set the new boundaries
range.startContainer = leftBoundary.container;
range.startOffset = leftBoundary.offset;
range.endContainer = rightBoundary.container;
range.endOffset = rightBoundary.offset;
// correct the range
range.correctRange();
// clear caches
range.clearCaches();
},
/**
* Helper method to check whether the given DOM object is a word boundary.
* @param {DOMObject} object DOM object in question
* @return {boolean} true when the DOM object is a word boundary, false if not
* @hide
*/
isWordBoundaryElement: function (object) {
if (!object || !object.nodeName) {
return false;
}
return jQuery.inArray(object.nodeName.toLowerCase(), this.nonWordBoundaryTags) == -1;
},
/**
* Search for the next word boundary, starting at the given position
* @param {DOMObject} container container of the start position
* @param {Integer} offset offset of the start position
* @param {boolean} searchleft true for searching to the left, false for searching to the right (default: true)
* @return {object} object with properties 'container' and 'offset' marking the found word boundary
* @method
*/
searchWordBoundary: function (container, offset, searchleft) {
if (typeof searchleft === 'undefined') {
searchleft = true;
}
var boundaryFound = false, wordBoundaryPos, tempWordBoundaryPos, textNode;
while (!boundaryFound) {
// check the node type
if (container.nodeType === 3) {
// we are currently in a text node
// find the nearest word boundary character
if (!searchleft) {
// search right
wordBoundaryPos = container.data.substring(offset).search(this.nonWordRegex);
if (wordBoundaryPos != -1) {
// found a word boundary
offset = offset + wordBoundaryPos;
boundaryFound = true;
} else {
// found no word boundary, so we set the position after the container
offset = this.getIndexInParent(container) + 1;
container = container.parentNode;
}
} else {
// search left
wordBoundaryPos = container.data.substring(0, offset).search(this.nonWordRegex);
tempWordBoundaryPos = wordBoundaryPos;
while (tempWordBoundaryPos != -1) {
wordBoundaryPos = tempWordBoundaryPos;
tempWordBoundaryPos = container.data.substring(
wordBoundaryPos + 1, offset).search(this.nonWordRegex);
if (tempWordBoundaryPos != -1) {
tempWordBoundaryPos = tempWordBoundaryPos + wordBoundaryPos + 1;
}
}
if (wordBoundaryPos != -1) {
// found a word boundary
offset = wordBoundaryPos + 1;
boundaryFound = true;
} else {
// found no word boundary, so we set the position before the container
offset = this.getIndexInParent(container);
container = container.parentNode;
}
}
} else if (container.nodeType === 1) {
// we are currently in an element node (between nodes)
if (!searchleft) {
// check whether there is an element to the right
if (offset < container.childNodes.length) {
// there is an element to the right, check whether it is a word boundary element
if (this.isWordBoundaryElement(container.childNodes[offset])) {
// we are done
boundaryFound = true;
} else {
// element to the right is no word boundary, so enter it
container = container.childNodes[offset];
offset = 0;
}
} else {
// no element to the right, check whether the element itself is a boundary element
if (this.isWordBoundaryElement(container)) {
// we are done
boundaryFound = true;
} else {
// element itself is no boundary element, so go to parent
offset = this.getIndexInParent(container) + 1;
container = container.parentNode;
}
}
} else {
// check whether there is an element to the left
if (offset > 0) {
// there is an element to the left, check whether it is a word boundary element
if (this.isWordBoundaryElement(container.childNodes[offset - 1])) {
// we are done
boundaryFound = true;
} else {
// element to the left is no word boundary, so enter it
container = container.childNodes[offset - 1];
offset = container.nodeType === 3 ? container.data.length : container.childNodes.length;
}
} else {
// no element to the left, check whether the element itself is a boundary element
if (this.isWordBoundaryElement(container)) {
// we are done
boundaryFound = true;
} else {
// element itself is no boundary element, so go to parent
offset = this.getIndexInParent(container);
container = container.parentNode;
}
}
}
}
}
if (container.nodeType !== 3) {
textNode = this.searchAdjacentTextNode(container, offset, !searchleft);
if (textNode) {
container = textNode;
offset = searchleft ? 0 : container.data.length;
}
}
return {'container' : container, 'offset' : offset};
},
/**
* Check whether the given dom object is empty
* @param {DOMObject} domObject object to check
* @return {boolean} true when the object is empty, false if not
* @method
*/
isEmpty: function (domObject) {
// a non dom object is considered empty
if (!domObject) {
return true;
}
// some tags are considered to be non-empty
if (jQuery.inArray(domObject.nodeName.toLowerCase(), this.nonEmptyTags) != -1) {
return false;
}
// text nodes are not empty, if they contain non-whitespace characters
if (domObject.nodeType === 3) {
return domObject.data.search(/\S/) == -1;
}
// all other nodes are not empty if they contain at least one child which is not empty
for (var i = 0, childNodes = domObject.childNodes.length; i < childNodes; ++i) {
if (!this.isEmpty(domObject.childNodes[i])) {
return false;
}
}
// found no contents, so the element is empty
return true;
},
/**
* Set the cursor (collapsed selection) right after the given DOM object
* @param domObject DOM object
* @method
*/
setCursorAfter: function (domObject) {
var
newRange = new GENTICS.Utils.RangeObject(),
index = this.getIndexInParent(domObject),
targetNode,
offset;
// selection cannot be set between to TEXT_NODEs
// if domOject is a Text node set selection at last position in that node
if ( domObject.nodeType == 3) {
targetNode = domObject;
offset = targetNode.nodeValue.length;
// if domOject is a Text node set selection at last position in that node
} else if ( domObject.nextSibling && domObject.nextSibling.nodeType == 3) {
targetNode = domObject.nextSibling;
offset = 0;
} else {
targetNode = domObject.parentNode;
offset = this.getIndexInParent(domObject) + 1;
}
newRange.startContainer = newRange.endContainer = targetNode;
newRange.startOffset = newRange.endOffset = offset;
// select the range
newRange.select();
return newRange;
},
/**
* Select a DOM node
* will create a new range which spans the provided dom node and selects it afterwards
* @param domObject DOM object
* @method
*/
selectDomNode: function (domObject) {
var newRange = new GENTICS.Utils.RangeObject();
newRange.startContainer = newRange.endContainer = domObject.parentNode;
newRange.startOffset = this.getIndexInParent(domObject);
newRange.endOffset = newRange.startOffset + 1;
newRange.select();
},
/**
* Set the cursor (collapsed selection) at the start into the given DOM object
* @param domObject DOM object
* @method
*/
setCursorInto: function (domObject) {
// set a new range into the given dom object
var newRange = new GENTICS.Utils.RangeObject();
newRange.startContainer = newRange.endContainer = domObject;
newRange.startOffset = newRange.endOffset = 0;
// select the range
newRange.select();
},
/**
* "An editing host is a node that is either an Element with a contenteditable
* attribute set to the true state, or the Element child of a Document whose
* designMode is enabled."
* @param domObject DOM object
* @method
*/
isEditingHost: function (node) {
return node
&& node.nodeType == 1 //ELEMENT_NODE
&& (node.contentEditable == "true"
|| (node.parentNode
&& node.parentNode.nodeType == 9 //DOCUEMENT_NODE
&& node.parentNode.designMode == "on"));
},
/**
* "Something is editable if it is a node which is not an editing host, does
* not have a contenteditable attribute set to the false state, and whose
* parent is an editing host or editable."
* @param domObject DOM object
* @method
*/
isEditable: function (node) {
// This is slightly a lie, because we're excluding non-HTML elements with
// contentEditable attributes.
return node
&& !this.isEditingHost(node)
&& (node.nodeType != 1 || node.contentEditable != "false") // ELEMENT_NODE
&& (this.isEditingHost(node.parentNode) || this.isEditable(node.parentNode));
},
/**
* "The editing host of node is null if node is neither editable nor an editing
* host; node itself, if node is an editing host; or the nearest ancestor of
* node that is an editing host, if node is editable."
* @param domObject DOM object
* @method
*/
getEditingHostOf: function(node) {
if (this.isEditingHost(node)) {
return node;
} else if (this.isEditable(node)) {
var ancestor = node.parentNode;
while (!this.isEditingHost(ancestor)) {
ancestor = ancestor.parentNode;
}
return ancestor;
} else {
return null;
}
},
/**
*
* "Two nodes are in the same editing host if the editing host of the first is
* non-null and the same as the editing host of the second."
* @param node1 DOM object
* @param node2 DOM object
* @method
*/
inSameEditingHost: function (node1, node2) {
return this.getEditingHostOf(node1)
&& this.getEditingHostOf(node1) == this.getEditingHostOf(node2);
},
// "A block node is either an Element whose "display" property does not have
// resolved value "inline" or "inline-block" or "inline-table" or "none", or a
// Document, or a DocumentFragment."
isBlockNode: function (node) {
return node
&& ((node.nodeType == $_.Node.ELEMENT_NODE && $_( ["inline", "inline-block", "inline-table", "none"] ).indexOf($_.getComputedStyle(node).display) == -1)
|| node.nodeType == $_.Node.DOCUMENT_NODE
|| node.nodeType == $_.Node.DOCUMENT_FRAGMENT_NODE);
},
/**
* Get the first visible child of the given node.
* @param node node
* @param includeNode when set to true, the node itself may be returned, otherwise only children are allowed
* @return first visible child or null if none found
*/
getFirstVisibleChild: function (node, includeNode) {
// no node -> no child
if (!node) {
return null;
}
// check whether the node itself is visible
if ((node.nodeType == $_.Node.TEXT_NODE && this.isEmpty(node))
|| (node.nodeType == $_.Node.ELEMENT_NODE && node.offsetHeight == 0 && jQuery.inArray(node.nodeName.toLowerCase(), this.nonEmptyTags) === -1)) {
return null;
}
// if the node is a text node, or does not have children, or is not editable, it is the first visible child
if (node.nodeType == $_.Node.TEXT_NODE
|| (node.nodeType == $_.Node.ELEMENT_NODE && node.childNodes.length == 0)
|| !jQuery(node).contentEditable()) {
return includeNode ? node : null;
}
// otherwise traverse through the children
for (var i = 0; i < node.childNodes.length; ++i) {
var visibleChild = this.getFirstVisibleChild(node.childNodes[i], true);
if (visibleChild != null) {
return visibleChild;
}
}
return null;
},
/**
* Get the last visible child of the given node.
* @param node node
* @param includeNode when set to true, the node itself may be returned, otherwise only children are allowed
* @return last visible child or null if none found
*/
getLastVisibleChild: function (node, includeNode) {
// no node -> no child
if (!node) {
return null;
}
// check whether the node itself is visible
if ((node.nodeType == $_.Node.TEXT_NODE && this.isEmpty(node))
|| (node.nodeType == $_.Node.ELEMENT_NODE && node.offsetHeight == 0 && jQuery.inArray(node.nodeName.toLowerCase(), this.nonEmptyTags) === -1)) {
return null;
}
// if the node is a text node, or does not have children, or is not editable, it is the first visible child
if (node.nodeType == $_.Node.TEXT_NODE
|| (node.nodeType == $_.Node.ELEMENT_NODE && node.childNodes.length == 0)
|| !jQuery(node).contentEditable()) {
return includeNode ? node : null;
}
// otherwise traverse through the children
for (var i = node.childNodes.length - 1; i >= 0; --i) {
var visibleChild = this.getLastVisibleChild(node.childNodes[i], true);
if (visibleChild != null) {
return visibleChild;
}
}
return null;
}
});
/**
* Create the singleton object
* @hide
*/
GENTICS.Utils.Dom = new Dom();
return GENTICS.Utils.Dom;
}); | PypiClean |
/MCRAMP-0.0.3-py3-none-any.whl/mcramp/scat/collimator_lin.py | from .sprim import SPrim
import numpy as np
import pyopencl as cl
import pyopencl.array as clarr
import os
import re
class SLinearCollimator(SPrim):
"""
Scattering kernel for Linear Collimator component. Recreates the functionality
of the Collimator_linear component in McStas. Neutrons with divergence
exceeding that permitted by the collimator are terminated.
Parameters
----------
length : float
Flight path length of the collimator
divergence_H : float
Maximum horizontal divergence accepted by the collimator
divergence_V : float
Maximum vertical divergence accepted by the collimator
transmission : float
Transmission coefficient of the collimator
Methods
-------
Data
None
Plot
None
Save
None
"""
def __init__(self, length=0.0, divergence_H=0.0, divergence_V=0.0, transmission=1.0, idx=0, ctx=0,
**kwargs):
min2rad = lambda x: x * np.pi / (60.0 * 180.0)
self.length = np.float32(length)
self.slope_H = np.float32(np.tan(min2rad(divergence_H)))
self.slope_V = np.float32(np.tan(min2rad(divergence_V)))
self.transmission = np.float32(transmission)
self.idx = np.uint32(idx)
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'collimator_lin.cl'), mode='r') as f:
self.prg = cl.Program(ctx, f.read()).build(options=r'-I "{}/include"'.format(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
def scatter_prg(self, queue, N, neutron_buf, intersection_buf, iidx_buf):
self.prg.collimator(queue, (N, ),
None,
neutron_buf,
intersection_buf,
iidx_buf,
self.idx,
self.length,
self.slope_H,
self.slope_V,
self.transmission) | PypiClean |
/Flask-Cache-0.13.1.tar.gz/Flask-Cache-0.13.1/docs/_themes/flask_theme_support.py | from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
} | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/monitoring/frontend/monitoring/src/components/cels/geonode-status/index.js | import React from 'react';
import PropTypes from 'prop-types';
import { connect } from 'react-redux';
import SelectField from 'material-ui/SelectField';
import MenuItem from 'material-ui/MenuItem';
import AverageCPU from '../../molecules/average-cpu';
import AverageMemory from '../../molecules/average-memory';
import styles from './styles';
import actions from './actions';
const mapStateToProps = (state) => ({
cpu: state.geonodeCpuStatus.response,
interval: state.interval.interval,
mem: state.geonodeMemStatus.response,
services: state.services.hostgeonode,
timestamp: state.interval.timestamp,
});
@connect(mapStateToProps, actions)
class GeonodeStatus extends React.Component {
static propTypes = {
cpu: PropTypes.object,
getCpu: PropTypes.func.isRequired,
getMem: PropTypes.func.isRequired,
interval: PropTypes.number,
mem: PropTypes.object,
resetCpu: PropTypes.func.isRequired,
resetMem: PropTypes.func.isRequired,
services: PropTypes.array,
timestamp: PropTypes.instanceOf(Date),
}
constructor(props) {
super(props);
this.state = {
host: '',
};
this.get = (
host = this.state.host,
interval = this.props.interval,
) => {
this.props.getCpu(host, interval);
this.props.getMem(host, interval);
};
this.handleChange = (event, target, host) => {
this.setState({ host });
this.get();
};
}
componentWillReceiveProps(nextProps) {
if (nextProps && nextProps.services && nextProps.timestamp) {
let host = nextProps.services[0].name;
let firstTime = false;
if (this.state.host === '') {
firstTime = true;
this.setState({ host });
} else {
host = this.state.host;
}
if (firstTime || nextProps.timestamp !== this.props.timestamp) {
this.get(host, nextProps.interval);
}
}
}
componentWillUnmount() {
this.props.resetCpu();
this.props.resetMem();
}
render() {
let cpu = 0;
if (this.props.cpu) {
cpu = undefined;
const data = this.props.cpu.data.data;
if (data.length > 0) {
if (data[0].data.length > 0) {
const metric = data[0].data[0];
const value = Number(metric.val);
if (value > 1) {
cpu = Math.floor(value);
} else {
cpu = Number(value.toFixed(2));
}
}
}
}
let mem = 0;
if (this.props.mem) {
mem = undefined;
const data = this.props.mem.data.data;
if (data.length > 0) {
if (data[0].data.length > 0) {
const metric = data[0].data[0];
const value = Number(metric.val);
if (value > 1) {
mem = Math.floor(value);
} else {
mem = Number(value.toFixed(2));
}
}
}
}
const hosts = this.props.services
? this.props.services.map((host) =>
<MenuItem
key={host.name}
value={host.name}
primaryText={ `${host.name} [${host.host}]` }
/>
)
: undefined;
return (
<div style={styles.content}>
<SelectField
floatingLabelText="Host"
value={this.state.host}
onChange={this.handleChange}
>
{hosts}
</SelectField>
<h5>GeoNode HW Status</h5>
<div style={styles.geonode}>
<AverageCPU cpu={cpu} />
<AverageMemory mem={mem} />
</div>
</div>
);
}
}
export default GeonodeStatus; | PypiClean |
/CoWIN_API_by_Kunal_Kumar_Sahoo-1.0.0-py3-none-any.whl/cowin_api/api.py | from typing import Union, List
from cowin_api.base_api import BaseApi
from cowin_api.constants import Constants
from cowin_api.utils import today, filter_centers_by_age_limit
class CoWinAPI(BaseApi):
def get_states(self):
url = Constants.states_list_url
return self._call_api(url)
def get_districts(self, state_id: str):
url = f"{Constants.districts_list_url}/{state_id}"
return self._call_api(url)
def get_availability_by_base(self, caller: str,
areas: Union[str, List[str]],
date: str, min_age_limt: int):
"""this function is called by the get availability function
this is separated out so that the parent functions have the same
structure and development becomes easier"""
area_type, base_url = 'pincode', Constants.availability_by_pin_code_url
if caller == 'district':
area_type, base_url = 'district_id', Constants.availability_by_district_url
# if the areas is a str, convert to list
if isinstance(areas, str):
areas = [areas]
# make a separate call for each of the areas
results = []
for area_id in areas:
url = f"{base_url}?{area_type}={area_id}&date={date}"
if min_age_limt:
curr_result = filter_centers_by_age_limit(self._call_api(url),
min_age_limt)
else:
curr_result = self._call_api(url)
# append
if curr_result:
results += curr_result['centers']
# return the results in the same format as returned by the api
return {'centers': results}
def get_availability_by_district(self, district_id: Union[str, List[str]],
date: str = today(),
min_age_limt: int = None):
return self.get_availability_by_base(caller='district', areas=district_id,
date=date, min_age_limt=min_age_limt)
def get_availability_by_pincode(self, pin_code: Union[str, List[str]],
date: str = today(),
min_age_limt: int = None):
return self.get_availability_by_base(caller='pincode', areas=pin_code,
date=date, min_age_limt=min_age_limt) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/PharLapCommon.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import os.path
import SCons.Errors
import SCons.Util
import re
def getPharLapPath():
"""Reads the registry to find the installed path of the Phar Lap ETS
development kit.
Raises UserError if no installed version of Phar Lap can
be found."""
if not SCons.Util.can_read_reg:
raise SCons.Errors.InternalError("No Windows registry module was found")
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Pharlap\\ETS')
val, type = SCons.Util.RegQueryValueEx(k, 'BaseDir')
# The following is a hack...there is (not surprisingly)
# an odd issue in the Phar Lap plug in that inserts
# a bunch of junk data after the phar lap path in the
# registry. We must trim it.
idx=val.find('\0')
if idx >= 0:
val = val[:idx]
return os.path.normpath(val)
except SCons.Util.RegError:
raise SCons.Errors.UserError("Cannot find Phar Lap ETS path in the registry. Is it installed properly?")
REGEX_ETS_VER = re.compile(r'#define\s+ETS_VER\s+([0-9]+)')
def getPharLapVersion():
"""Returns the version of the installed ETS Tool Suite as a
decimal number. This version comes from the ETS_VER #define in
the embkern.h header. For example, '#define ETS_VER 1010' (which
is what Phar Lap 10.1 defines) would cause this method to return
1010. Phar Lap 9.1 does not have such a #define, but this method
will return 910 as a default.
Raises UserError if no installed version of Phar Lap can
be found."""
include_path = os.path.join(getPharLapPath(), os.path.normpath("include/embkern.h"))
if not os.path.exists(include_path):
raise SCons.Errors.UserError("Cannot find embkern.h in ETS include directory.\nIs Phar Lap ETS installed properly?")
with open(include_path, 'r') as f:
mo = REGEX_ETS_VER.search(f.read())
if mo:
return int(mo.group(1))
# Default return for Phar Lap 9.1
return 910
def addPharLapPaths(env):
"""This function adds the path to the Phar Lap binaries, includes,
and libraries, if they are not already there."""
ph_path = getPharLapPath()
try:
env_dict = env['ENV']
except KeyError:
env_dict = {}
env['ENV'] = env_dict
SCons.Util.AddPathIfNotExists(env_dict, 'PATH',
os.path.join(ph_path, 'bin'))
SCons.Util.AddPathIfNotExists(env_dict, 'INCLUDE',
os.path.join(ph_path, 'include'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, 'lib'))
SCons.Util.AddPathIfNotExists(env_dict, 'LIB',
os.path.join(ph_path, os.path.normpath('lib/vclib')))
env['PHARLAP_PATH'] = getPharLapPath()
env['PHARLAP_VERSION'] = str(getPharLapVersion())
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/models/MLP/flag_reader.py | import argparse
import pickle
import os
# Libs
# Own module
from parameters import *
# Torch
def read_flag():
"""
This function is to write the read the flags from a parameter file and put them in formats
:return: flags: a struct where all the input params are stored
"""
parser = argparse.ArgumentParser()
parser.add_argument('--data-set', default=DATA_SET, type=str, help='which data set you are chosing')
# Model Architectural Params
parser.add_argument('--skip-connection', type=bool, default=SKIP_CONNECTION, help='The boolean flag indicates whether use skip connections')
parser.add_argument('--use-conv', type=bool, default=USE_CONV, help='The boolean flag that indicate whether we use upconv layer if not using lorentz')
parser.add_argument('--linear', type=list, default=LINEAR, help='The fc layers units')
parser.add_argument('--conv-out-channel', type=list, default=CONV_OUT_CHANNEL, help='The output channel of your 1d conv')
parser.add_argument('--conv-kernel-size', type=list, default=CONV_KERNEL_SIZE, help='The kernel size of your 1d conv')
parser.add_argument('--conv-stride', type=list, default=CONV_STRIDE, help='The strides of your 1d conv')
# Optimization params
parser.add_argument('--optim', default=OPTIM, type=str, help='the type of optimizer that you want to use')
parser.add_argument('--reg-scale', type=float, default=REG_SCALE, help='#scale for regularization of dense layers')
parser.add_argument('--x-range', type=list, default=X_RANGE, help='columns of input parameters')
parser.add_argument('--y-range', type=list, default=Y_RANGE, help='columns of output parameters')
parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (100)')
parser.add_argument('--eval-step', default=EVAL_STEP, type=int, help='# steps between evaluations')
parser.add_argument('--train-step', default=TRAIN_STEP, type=int, help='# steps to train on the dataSet')
parser.add_argument('--lr', default=LEARN_RATE, type=float, help='learning rate')
# parser.add_argument('--decay-step', default=DECAY_STEP, type=int,
# help='decay learning rate at this number of steps')
parser.add_argument('--lr-decay-rate', default=LR_DECAY_RATE, type=float,
help='decay learn rate by multiplying this factor')
parser.add_argument('--stop_threshold', default=STOP_THRESHOLD, type=float,
help='The threshold below which training should stop')
parser.add_argument('--dropout', default=DROPOUT, type=float,
help='dropout rate')
parser.add_argument('--skip_head', default=SKIP_HEAD, type=int,
help='skip head')
parser.add_argument('--skip_tail', default=SKIP_TAIL, type=list,
help='skip tail')
# Data specific Params
parser.add_argument('--geoboundary', default=GEOBOUNDARY, type=tuple, help='the boundary of the geometric data')
parser.add_argument('--data-dir', default=DATA_DIR, type=str, help='data directory')
parser.add_argument('--normalize-input', default=NORMALIZE_INPUT, type=bool,
help='whether we should normalize the input or not')
parser.add_argument('--test-ratio', default=TEST_RATIO, type=float, help='the ratio of test case')
parser.add_argument('--rand-seed', default=RAND_SEED, type=float, help='Random seed for train/val split')
# Running specific
parser.add_argument('--eval-model', default=EVAL_MODEL, type=str,
help='the folder name of the model that you want to evaluate')
parser.add_argument('--use-cpu-only', type=bool, default=USE_CPU_ONLY,
help='The boolean flag that indicate use CPU only')
parser.add_argument('--num-plot-compare', type=int, default=NUM_COM_PLOT_TENSORBOARD,
help='#Plots to store in tensorboard during training for spectra compare')
parser.add_argument('--model-name', default=MODEL_NAME, type=str, help='name of the model')
flags = parser.parse_args() # This is for command line version of the code
# flags = parser.parse_args(args = [])#This is for jupyter notebook version of the code
# flagsVar = vars(flags)
return flags
def save_flags(flags, save_file="flags.obj"):
"""
This function serialize the flag object and save it for further retrieval during inference time
:param flags: The flags object to save
:param save_file: The place to save the file
:return: None
"""
with open(save_file,'wb') as f: # Open the file
pickle.dump(flags, f) # Use Pickle to serialize the object
def load_flags(save_dir, save_file="flags.obj"):
"""
This function inflate the pickled object to flags object for reuse, typically during evaluation (after training)
:param save_dir: The place where the obj is located
:param save_file: The file name of the file, usually flags.obj
:return: flags
"""
with open(os.path.join(save_dir, save_file), 'rb') as f: # Open the file
flags = pickle.load(f) # Use pickle to inflate the obj back to RAM
return flags
def write_flags_and_BVE(flags, best_validation_loss):
"""
The function that is usually executed at the end of the training where the flags and the best validation loss are recorded
They are put in the folder that called this function and save as "parameters.txt"
This parameter.txt is also attached to the generated email
:param flags: The flags struct containing all the parameters
:param best_validation_loss: The best_validation_loss recorded in a training
:return: None
"""
#To avoid terrible looking shape of y_range
yrange = flags.y_range
# yrange_str = str(yrange[0]) + ' to ' + str(yrange[-1])
yrange_str = [yrange[0], yrange[-1]]
flags_dict = vars(flags)
flags_dict_copy = flags_dict.copy() # in order to not corrupt the original data strucutre
flags_dict_copy['y_range'] = yrange_str # Change the y range to be acceptable long string
flags_dict_copy['best_validation_loss'] = best_validation_loss #Append the bvl
# Convert the dictionary into pandas data frame which is easier to handle with and write read
print(flags_dict_copy)
with open('parameters.txt','w') as f:
print(flags_dict_copy, file = f )
# Pickle the obj
save_flags(flags) | PypiClean |
/Easykiwi-1.3-py3-none-any.whl/easykiwi/core.py |
import logging
import time
import sys
import kiwipy
from ._singleton import Singleton
class Kiwi(Singleton):
def __init__(self):
self.remote = '127.0.0.1'
self._rpcs = list()
self._tasks = list()
self._broadcasts = list()
def add_rpc(self, name=None):
def decorator(f):
if not name:
name = f.__name__
rpc = (f, name,)
self._rpcs.append(rpc)
return f
return decorator
def add_task(self):
def decorator(f):
self._tasks.append(f)
return f
return decorator
def add_broadcast(self, filters=[]):
def decorator(f):
broadcast = (f, filters)
self._broadcasts.append(broadcast)
return f
return decorator
def _add_rpcs(self):
for f, name in self._rpcs:
self.comm.add_rpc_subscriber(f, name)
def _add_tasks(self):
for task in self._tasks:
self.comm.add_task_subscriber(task)
def _add_broadcasts(self):
for f, filters in self._broadcasts:
if filters:
filtered = kiwipy.BroadcastFilter(f)
for filter in filters:
filtered.add_subject_filter(filter)
self.comm.add_broadcast_subscriber(filtered)
else:
self.comm.add_broadcast_subscriber(f)
def run(self, remote='localhost', secured=False):
self.remote = remote
if "amqp://" in self.remote:
url = self.remote
elif "amqps://" in self.remote:
url = self.remote
else:
url = 'amqp://{}'.format(self.remote)
if secured:
url = url.replace("amqp://", "amqps://")
self.comm = kiwipy.connect(url)
self._add_rpcs()
self._add_tasks()
self._add_broadcasts()
try:
while True:
time.sleep(0.25)
except (KeyboardInterrupt, SystemExit):
logging.warning("Manual Shutting down!!!")
self.comm.close()
time.sleep(0.25)
sys.exit() | PypiClean |
/jpfreq-0.3.0.tar.gz/jpfreq-0.3.0/README.md | # JPFreq


[](https://opensource.org/licenses/MIT)


<!-- TOC -->
* [JPFreq](#jpfreq)
* [Installation](#installation)
* [Usage](#usage)
* [Getting the most frequent words](#getting-the-most-frequent-words)
* [Reading from a file](#reading-from-a-file)
<!-- TOC -->
JPFreq is a frequency processor for Japanese text. It uses the Cython wrapper for MeCab [Fugashi](https://github.com/polm/fugashi)
to process Japanese text.
## Installation
1. Install Fugashi and Unidic
```bash
pip install fugashi[unidic]
python3 -m unidic download
```
2. Install JPFreq
```bash
pip install jpfreq
```
## Usage
For detailed usage, see the [documentation](https://marley-mulvin-broome.github.io/JapaneseFrequencyProcessor/).
### Getting the most frequent words
```python
from jpfreq.jp_frequency_list import JapaneseFrequencyList
freq_list = JapaneseFrequencyList()
freq_list.process_line("私は猫です。")
print(freq_list.get_most_frequent())
```
### Reading from a file
```python
from jpfreq.jp_frequency_list import JapaneseFrequencyList
freq_list = JapaneseFrequencyList()
freq_list.process_file("path/to/file.txt")
print(freq_list.get_most_frequent())
```
| PypiClean |
/CubeLang-0.1.4-py3-none-any.whl/cubelang/scrambler.py | import random
from argparse import ArgumentParser
from typing import List
from cubelang.actions import Turn
from cubelang.cli.options import integer_type
from cubelang.cube import Cube
from cubelang.orientation import Orientation, Side
# noinspection PyTypeChecker
SIDES = tuple(Side)
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument("-d", dest="dimension", help="dimensions of a cube",
default=3, metavar="N", type=integer_type(2))
arg_parser.add_argument("-n", dest="turns_num", help="number of turns",
type=integer_type(1), default=20)
arg_parser.add_argument("-a", dest="output_args", action="store_true",
help="display the state of the cube after the turns instead of the formula")
arg_parser.add_argument("-s", dest="seed", help="the seed for the pseudorandom number generator")
args = arg_parser.parse_args()
dim = args.dimension
if args.seed is not None:
random.seed(args.seed)
actions: List[Turn] = []
prev_side = None
for i in range(args.turns_num):
if prev_side is None:
sides = SIDES
else:
sides = [x for x in SIDES if x != prev_side]
prev_side = random.choice(sides)
first_index = random.randint(1, dim // 2)
last_index = random.randint(1, first_index)
if first_index == last_index:
indices = [first_index]
else:
indices = [last_index, ..., first_index]
turn = Turn(prev_side, indices, random.randint(1, 3))
actions.append(turn)
if not args.output_args:
for action in actions:
print(str(action), end="")
print()
else:
cube = Cube((dim,) * 3)
orientation = Orientation()
for action in actions:
action.perform(cube, orientation)
print("--front", repr(cube.get_side(orientation).colors))
print("--right", repr(cube.get_side(orientation.to_right).colors))
print("--left", repr(cube.get_side(orientation.to_left).colors))
print("--back", repr(cube.get_side(orientation.to_right.to_right).colors))
print("--top", repr(cube.get_side(orientation.to_top).colors))
print("--bottom", repr(cube.get_side(orientation.to_bottom).colors))
if __name__ == "__main__":
main() | PypiClean |
/MacroPy-1.0.3.zip/MacroPy-1.0.3/macropy/core/walkers.py | from macropy.core import *
from ast import *
class Walker(object):
def __init__(self, func):
self.func = func
def walk_children(self, tree, ctx=None):
if isinstance(tree, AST):
aggregates = []
for field, old_value in iter_fields(tree):
old_value = getattr(tree, field, None)
new_value, new_aggregate = self.recurse_collect(old_value, ctx)
aggregates.extend(new_aggregate)
setattr(tree, field, new_value)
return aggregates
elif isinstance(tree, list) and len(tree) > 0:
aggregates = []
new_tree = []
for t in tree:
new_t, new_a = self.recurse_collect(t, ctx)
if type(new_t) is list:
new_tree.extend(new_t)
else:
new_tree.append(new_t)
aggregates.extend(new_a)
tree[:] = new_tree
return aggregates
else:
return []
def recurse(self, tree, ctx=None):
"""Traverse the given AST and return the transformed tree."""
return self.recurse_collect(tree, ctx)[0]
def collect(self, tree, ctx=None):
"""Traverse the given AST and return the transformed tree."""
return self.recurse_collect(tree, ctx)[1]
def recurse_collect(self, tree, ctx=None):
"""Traverse the given AST and return the transformed tree together
with any values which were collected along with way."""
if isinstance(tree, AST) or type(tree) is Literal or type(tree) is Captured:
aggregates = []
stop_now = [False]
def stop():
stop_now[0] = True
new_ctx = [ctx]
def set_ctx(new):
new_ctx[0] = new
# Provide the function with a bunch of controls, in addition to
# the tree itself.
new_tree = self.func(
tree=tree,
ctx=ctx,
collect=aggregates.append,
set_ctx=set_ctx,
stop=stop
)
if new_tree is not None:
tree = new_tree
if not stop_now[0]:
aggregates.extend(self.walk_children(tree, new_ctx[0]))
else:
aggregates = self.walk_children(tree, ctx)
return tree, aggregates | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/backend/static/cubane/backend/tinymce/js/tinymce/plugins/importcss/plugin.min.js | !function(){"use strict";var e=tinymce.util.Tools.resolve("tinymce.PluginManager"),t=tinymce.util.Tools.resolve("tinymce.dom.DOMUtils"),n=tinymce.util.Tools.resolve("tinymce.EditorManager"),r=tinymce.util.Tools.resolve("tinymce.Env"),i=tinymce.util.Tools.resolve("tinymce.util.Tools"),c=function(e){return e.getParam("importcss_merge_classes")},o=function(e){return e.getParam("importcss_exclusive")},s=function(e){return e.getParam("importcss_selector_converter")},u=function(e){return e.getParam("importcss_selector_filter")},l=function(e){return e.getParam("importcss_groups")},a=function(e){return e.getParam("importcss_append")},f=function(e){return e.getParam("importcss_file_filter")},m=function(e){var t=r.cacheSuffix;return"string"==typeof e&&(e=e.replace("?"+t,"").replace("&"+t,"")),e},g=function(e,t){var r=e.settings,i=!1!==r.skin&&(r.skin||"lightgray");return!!i&&t===(r.skin_url?e.documentBaseURI.toAbsolute(r.skin_url):n.baseURL+"/skins/"+i)+"/content"+(e.inline?".inline":"")+".min.css"},p=function(e){return"string"==typeof e?function(t){return-1!==t.indexOf(e)}:e instanceof RegExp?function(t){return e.test(t)}:e},v=function(e,t,n){var r=[],c={};i.each(e.contentCSS,function(e){c[e]=!0}),n||(n=function(e,t){return t||c[e]});try{i.each(t.styleSheets,function(t){!function c(t,o){var s,u=t.href;if((u=m(u))&&n(u,o)&&!g(e,u)){i.each(t.imports,function(e){c(e,!0)});try{s=t.cssRules||t.rules}catch(l){}i.each(s,function(e){e.styleSheet?c(e.styleSheet,!0):e.selectorText&&i.each(e.selectorText.split(","),function(e){r.push(i.trim(e))})})}}(t)})}catch(o){}return r},h=function(e,t){var n,r=/^(?:([a-z0-9\-_]+))?(\.[a-z0-9_\-\.]+)$/i.exec(t);if(r){var o=r[1],s=r[2].substr(1).split(".").join(" "),u=i.makeMap("a,img");return r[1]?(n={title:t},e.schema.getTextBlockElements()[o]?n.block=o:e.schema.getBlockElements()[o]||u[o.toLowerCase()]?n.selector=o:n.inline=o):r[2]&&(n={inline:"span",title:t.substr(1),classes:s}),!1!==c(e)?n.classes=s:n.attributes={"class":s},n}},d=function(e,t){return null===t||!1!==o(e)},y=h,_=function(e){e.on("renderFormatsMenu",function(n){var r,c={},o=p(u(e)),m=n.control,g=(r=l(e),i.map(r,function(e){return i.extend({},e,{original:e,selectors:{},filter:p(e.filter),item:{text:e.title,menu:[]}})})),y=function(n,r){if(_=n,T=c,!(d(e,x=r)?_ in T:_ in x.selectors)){p=n,y=c,d(e,v=r)?y[p]=!0:v.selectors[p]=!0;var o=(l=e,a=e.plugins.importcss,f=n,((g=r)&&g.selector_converter?g.selector_converter:s(l)?s(l):function(){return h(l,f)}).call(a,f,g));if(o){var u=o.name||t.DOM.uniqueId();return e.formatter.register(u,o),i.extend({},m.settings.itemDefaults,{text:o.title,format:u})}}var l,a,f,g,p,v,y,_,x,T;return null};a(e)||m.items().remove(),i.each(v(e,n.doc||e.getDoc(),p(f(e))),function(e){if(-1===e.indexOf(".mce-")&&(!o||o(e))){var t=(r=g,c=e,i.grep(r,function(e){return!e.filter||e.filter(c)}));if(t.length>0)i.each(t,function(t){var n=y(e,t);n&&t.item.menu.push(n)});else{var n=y(e,null);n&&m.add(n)}}var r,c}),i.each(g,function(e){e.item.menu.length>0&&m.add(e.item)}),n.control.renderNew()})},x=function(e){return{convertSelectorToFormat:function(t){return y(e,t)}}};e.add("importcss",function(e){return _(e),x(e)})}(); | PypiClean |
/Flask-AuthOOB-0.0.34.tar.gz/Flask-AuthOOB-0.0.34/flask_authoob/routes.py | import datetime
from uuid import uuid4
from flask import abort, jsonify, make_response, redirect, request
from flask_security.core import current_user
from flask_security.decorators import auth_token_required
from flask_security.utils import logout_user
from password_strength import PasswordPolicy
from validate_email import validate_email
class FlaskOOBRoutes:
def register_routes(self, app, db, authoob):
User = self.User
UserSchema = self.UserSchema
policy = PasswordPolicy.from_names(
length=8, # min length: 8
uppercase=1, # need min. 2 uppercase letters
numbers=1, # need min. 2 digits
special=0, # need min. 2 special characters
nonletters=0, # need min. 2 non-letter characters (digits, specials, anything)
)
def fail(code=401, message="Authentication failed", data={}):
abort(make_response(jsonify(message=message, data=data), code))
@app.route(f"{self.prefix}/logout", methods=["POST"])
@auth_token_required
def logout():
self.hook("pre_logout", {"user": current_user})
logout_user()
self.hook("post_logout", None)
return "", 204
@app.route(f"{self.prefix}/login", methods=["POST"])
def login():
self.hook("pre_login", {"payload": request.json})
try:
user = User.query.filter_by(email=request.json.get("email", None)).one()
except Exception:
fail()
self.hook("before_login", {"payload": request.json, "user": user})
if authoob.verify_password(request.json.get("password", ""), user.password):
user.login_count += 1
db.session.add(user)
db.session.commit()
self.hook("post_login", {"payload": request.json, "user": user})
return jsonify({"token": user.get_auth_token()})
else:
fail()
@app.route(f"{self.prefix}/profile")
@auth_token_required
def profile():
self.hook("pre_profile", {"user": current_user})
response = UserSchema().jsonify(current_user)
self.hook("post_profile", {"user": current_user, "response": response})
return response
@app.route(f"{self.prefix}/profile", methods=["PUT"])
@auth_token_required
def update_profile():
self.hook(
"pre_update_profile", {"payload": request.json, "user": current_user}
)
# TODO Fix & enhence
# try:
# data = UserSchema(load_only=self.updatable_fields).load(request.json)
# except ValidationError as errors:
# fail(
# code=400, message="Invalid parameters for user update", data=errors.messages
# )
# for field in self.updatable_fields:
# setattr(current_user, field, data.get(field, None))
# db.session.add(current_user)
# db.session.commit()
response = UserSchema().jsonify(current_user)
self.hook(
"post_update_profile",
{"payload": request.json, "response": response, "user": current_user},
)
return response
@app.route(f"{self.prefix}/profile/<int:user_id>")
def user_profile(user_id):
self.hook("pre_user_profile", {"user_id": user_id})
try:
user = User.query.get(user_id)
response = UserSchema(only=["username", "id", "created_at"]).jsonify(
user
)
self.hook("post_user_profile", {"user": user, "response": response})
return response
except Exception:
fail(code=404, message="User not found")
@app.route(f"{self.prefix}/token")
@auth_token_required
def token():
self.hook("pre_token", {"user": current_user})
response = jsonify({"token": current_user.get_auth_token()})
self.hook("post_token", {"user": current_user, "response": response})
return response
@app.route(f"{self.prefix}/activate/<string:token>")
def activate(token):
self.hook("pre_activate", {"token": token})
try:
user = User.query.filter_by(activation_token=token).one()
except Exception:
fail(code=404, message="No token match")
if not user.active and user.confirmed_at is None:
user.active = True
user.confirmed_at = datetime.datetime.now()
db.session.add(user)
db.session.commit()
self.hook("post_activate", {"user": user})
default_redirect = "{}?validated_user={}".format(
app.config["APP_URL"], user.id
)
hook_url = self.hook(
"mail_activate_redirect",
{"user": user, "app_url": app.config["APP_URL"]},
)
return redirect(hook_url if hook_url else default_redirect)
else:
response = self.hook("already_activated", {"user": user})
if response is None:
fail(code=409, message="Unable to activate")
else:
return response
def do_reset(payload, user):
password1 = payload.get("password1")
password2 = payload.get("password2")
if password1 != password2:
fail(code=400, message="Password mismatch")
if policy.test(password1):
fail(code=400, message="Passwords strength policy invalid")
user.password = authoob.hash_password(password1)
user.reset_password_token = None
db.session.add(user)
db.session.commit()
return "", 204
@app.route(f"{self.prefix}/password/ask", methods=["POST"])
def ask_reset_password():
self.hook("pre_ask_reset", {"payload": request.json})
if request.json is None:
fail(code=400, message="Missing data")
email = request.json.get("email", None)
if email is None:
fail(code=400, message="Missing data")
try:
user = User.query.filter_by(email=email).one()
except Exception:
fail(code=400, message="Missing data") # This prevents email scans
user.reset_password_token = str(uuid4())
db.session.add(user)
db.session.commit()
if not self.hook(
"mail_ask_reset_password",
{"user": user, "mail_provider": self.mail_provider},
):
link = (
f'<a href="{app.config["APP_URL"]}?reset_password_token'
f'={user.reset_password_token}">this link</a>'
)
self.mail_provider.send_mail(
to_emails=user.email,
subject="Email reset link",
html=(f"You can reset your password by following {link}."),
)
self.hook("post_ask_reset", {"payload": request.json, "user": user})
return "", 204
@app.route(f"{self.prefix}/password/reset", methods=["PUT"])
@auth_token_required
def reset_password_auth():
self.hook("pre_reset_auth", {"payload": request.json, "user": current_user})
if request.json is None:
fail(code=400, message="Missing data")
response = do_reset(request.json, current_user)
self.hook(
"post_reset_auth",
{"payload": request.json, "user": current_user, "response": response},
)
return response
@app.route(f"{self.prefix}/password/reset/token", methods=["PUT"])
def reset_password_token():
self.hook("pre_reset_token", {"token": token})
if request.json is None or not request.json["token"]:
fail(code=400, message="Missing data")
try:
user = User.query.filter_by(
reset_password_token=request.json["token"]
).one()
except Exception:
fail(code=404, message="No token match")
response = do_reset(request.json, user)
user.reset_password_token = None
db.session.add(user)
db.session.commit()
self.hook("post_reset_token", {"token": token, "user": user})
return response
@app.route(f"{self.prefix}/register", methods=["POST"])
def register():
if self.mail_provider is None:
fail(
code=500, message="No email provider defined, cannot register user"
)
if request.json is None:
fail(code=400, message="Missing data")
self.hook("pre_register", {"payload": request.json})
password = request.json.get("password1")
email = request.json.get("email")
if policy.test(password):
fail(code=400, message="Passwords strength policy invalid")
if password is None or password != request.json.get("password2"):
fail(code=400, message="Mismatching passwords")
if not validate_email(email):
fail(code=400, message="Invalid email given")
if User.query.filter_by(email=email).count():
fail(code=409, message="User already registered")
self.user_datastore.create_user(
email=email,
password=authoob.hash_password(password),
firstname=request.json.get("firstname", None),
lastname=request.json.get("lastname", None),
active=False,
)
db.session.commit()
user = User.query.filter_by(email=email).one()
if not self.hook(
"mail_register", {"user": user, "mail_provider": self.mail_provider}
):
# This is default registration text
link = (
f'<a href="{app.config["API_URL"]}/authoob/'
f'activate/{user.activation_token}">this link</a>'
)
self.mail_provider.send_mail(
to_emails=user.email,
subject="Email confirmation",
html=(
"Please activate your account by following "
f"{link} to confirm your account creation"
),
)
self.hook("post_register", {"user": user, "payload": request.json})
return jsonify({"token": user.get_auth_token()}) | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/plugins/input/my_anime_list.py | from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
logger = logger.bind(name='my_anime_list')
STATUS = {'watching': 1, 'completed': 2, 'on_hold': 3, 'dropped': 4, 'plan_to_watch': 6, 'all': 7}
AIRING_STATUS = {'airing': 1, 'finished': 2, 'planned': 3, 'all': 6}
ANIME_TYPE = ['all', 'tv', 'ova', 'movie', 'special', 'ona', 'music', 'unknown']
class MyAnimeList:
""" " Creates entries for series and movies from MyAnimeList list
Syntax:
my_anime_list:
username: <value>
status:
- <watching|completed|on_hold|dropped|plan_to_watch>
- <watching|completed|on_hold|dropped|plan_to_watch>
...
airing_status:
- <airing|finished|planned>
- <airing|finished|planned>
...
type:
- <series|ova...>
"""
schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'status': one_or_more(
{'type': 'string', 'enum': list(STATUS.keys()), 'default': 'all'},
unique_items=True,
),
'airing_status': one_or_more(
{'type': 'string', 'enum': list(AIRING_STATUS.keys()), 'default': 'all'},
unique_items=True,
),
'type': one_or_more(
{'type': 'string', 'enum': list(ANIME_TYPE), 'default': 'all'}, unique_items=True
),
},
'required': ['username'],
'additionalProperties': False,
}
@cached('my_anime_list', persist='2 hours')
def on_task_input(self, task, config):
selected_status = config.get('status', ['all'])
if not isinstance(selected_status, list):
selected_status = [selected_status]
selected_airing_status = config.get('airing_status', ['all'])
if not isinstance(selected_airing_status, list):
selected_airing_status = [selected_airing_status]
selected_types = config.get('type', ['all'])
if not isinstance(selected_types, list):
selected_types = [selected_types]
selected_status = [STATUS[s] for s in selected_status]
selected_airing_status = [AIRING_STATUS[s] for s in selected_airing_status]
list_json = []
for status in selected_status:
# JSON pages are limited to 300 entries
offset = 0
results = 300
while results == 300:
try:
list_json += task.requests.get(
f'https://myanimelist.net/animelist/{config.get("username")}/load.json',
params={'status': status, 'offset': offset},
).json()
except RequestException as e:
logger.error(f'Error finding list on url: {e.request.url}')
break
except (ValueError, TypeError):
logger.error('Invalid JSON response')
break
results = len(list_json) or 1
offset += len(list_json)
for anime in list_json:
has_selected_status = (
anime["status"] in selected_status or config['status'] == 'all'
)
has_selected_airing_status = (
anime["anime_airing_status"] in selected_airing_status
or config['airing_status'] == 'all'
)
has_selected_type = (
anime["anime_media_type_string"].lower() in selected_types
or config['type'] == 'all'
)
if has_selected_status and has_selected_type and has_selected_airing_status:
# MAL sometimes returns title as an integer
anime['anime_title'] = str(anime['anime_title'])
entry = Entry()
entry['title'] = anime['anime_title']
entry['url'] = f'https://myanimelist.net{anime["anime_url"]}'
entry['mal_name'] = anime['anime_title']
entry['mal_poster'] = anime['anime_image_path']
entry['mal_type'] = anime['anime_media_type_string']
entry['mal_tags'] = anime['tags']
if entry.isvalid():
yield entry
@event('plugin.register')
def register_plugin():
plugin.register(MyAnimeList, 'my_anime_list', api_ver=2) | PypiClean |
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/lib/websockets.py |
import threading, Queue, os, json, time
from collections import defaultdict
try:
from SimpleWebSocketServer import WebSocket, SimpleSSLWebSocketServer
except ImportError:
import sys
sys.exit("install SimpleWebSocketServer")
import util
request_queue = Queue.Queue()
class ElectrumWebSocket(WebSocket):
def handleMessage(self):
assert self.data[0:3] == 'id:'
util.print_error("message received", self.data)
request_id = self.data[3:]
request_queue.put((self, request_id))
def handleConnected(self):
util.print_error("connected", self.address)
def handleClose(self):
util.print_error("closed", self.address)
class WsClientThread(util.DaemonThread):
def __init__(self, config, network):
util.DaemonThread.__init__(self)
self.network = network
self.config = config
self.response_queue = Queue.Queue()
self.subscriptions = defaultdict(list)
def make_request(self, request_id):
# read json file
rdir = self.config.get('requests_dir')
n = os.path.join(rdir, 'req', request_id[0], request_id[1], request_id, request_id + '.json')
with open(n) as f:
s = f.read()
d = json.loads(s)
addr = d.get('address')
amount = d.get('amount')
return addr, amount
def reading_thread(self):
while self.is_running():
try:
ws, request_id = request_queue.get()
except Queue.Empty:
continue
try:
addr, amount = self.make_request(request_id)
except:
continue
l = self.subscriptions.get(addr, [])
l.append((ws, amount))
self.subscriptions[addr] = l
self.network.send([('blockchain.address.subscribe', [addr])], self.response_queue.put)
def run(self):
threading.Thread(target=self.reading_thread).start()
while self.is_running():
try:
r = self.response_queue.get(timeout=0.1)
except Queue.Empty:
continue
util.print_error('response', r)
method = r.get('method')
params = r.get('params')
result = r.get('result')
if result is None:
continue
if method == 'blockchain.address.subscribe':
self.network.send([('blockchain.address.get_balance', params)], self.response_queue.put)
elif method == 'blockchain.address.get_balance':
addr = params[0]
l = self.subscriptions.get(addr, [])
for ws, amount in l:
if not ws.closed:
if sum(result.values()) >=amount:
ws.sendMessage(unicode('paid'))
class WebSocketServer(threading.Thread):
def __init__(self, config, ns):
threading.Thread.__init__(self)
self.config = config
self.net_server = ns
self.daemon = True
def run(self):
t = WsClientThread(self.config, self.net_server)
t.start()
host = self.config.get('websocket_server')
port = self.config.get('websocket_port', 9999)
certfile = self.config.get('ssl_chain')
keyfile = self.config.get('ssl_privkey')
self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile)
self.server.serveforever() | PypiClean |
/Flask-KQMaps-0.4.2.tar.gz/Flask-KQMaps-0.4.2/flask_kqmaps/static/kqwebclient/leaflet/3rd_libs/leaflet.osmbuildings/OSMBuildings-Leaflet.js | (function(ca){function S(b,a){var c=b.x-a.x,d=b.y-a.y;return c*c+d*d}function va(b){var a=b.length;if(16>a)return!1;var c,d=Infinity,f=-Infinity,e=Infinity,g=-Infinity;for(c=0;c<a-1;c+=2)d=Math.min(d,b[c]),f=Math.max(f,b[c]),e=Math.min(e,b[c+1]),g=Math.max(g,b[c+1]);c=f-d;g-=e;f=c/g;if(0.85>f||1.15<f)return!1;d={x:d+c/2,y:e+g/2};c=(c+g)/4;e=c*c;for(c=0;c<a-1;c+=2)if(g=S({x:b[c],y:b[c+1]},d),0.8>g/e||1.2<g/e)return!1;return!0}function ja(b,a){var c={};b/=T;a/=T;var d=wa,f;f=0>=a?90:1<=a?-90:(2*xa(ya(E*
(1-2*a)))-J)/E*180;c[d]=f;c[za]=360*(1===b?1:(b%1+1)%1)-180;return c}function da(b,a){var c=U(1,K(0,0.5-Aa(ka(Ba+J*b/180))/E/2));return{x:(a/360+0.5)*T<<0,y:c*T<<0}}function V(b){for(var a=B+p,c=v+n,d=0,f=b.length-3;d<f;d+=2)if(b[d]>p&&b[d]<a&&b[d+1]>n&&b[d+1]<c)return!0;return!1}function Ca(){$||($=setInterval(function(){for(var b=F.items,a=!1,c=0,d=b.length;c<d;c++)1>b[c].scale&&(b[c].scale+=0.1,1<b[c].scale&&(b[c].scale=1),a=!0);A.render();a||(clearInterval($),$=null)},33))}function ea(b){M=W+
b.x;N=v+b.y;A.render(!0)}function la(b){B=b.width;v=b.height;W=B/2<<0;fa=v/2<<0;M=W;N=v;A.setSize(B,v);ga=q-50}function ma(b){x=b;T=Da<<x;b=ja(p+W,n+fa);var a=da(b.latitude,0);na=da(b.latitude,1).x-a.x;C=oa(0.95,x-G);ha=""+H.alpha(C);aa=""+ba.alpha(C);X=""+O.alpha(C)}var u=Math,ya=u.exp,Aa=u.log,Ea=u.sin,Fa=u.cos,ka=u.tan,xa=u.atan,P=u.atan2,U=u.min,K=u.max,pa=u.sqrt,qa=u.ceil,oa=u.pow,ra=ra||Array,sa=sa||Array,u=/iP(ad|hone|od)/g.test(navigator.userAgent),t=!!~navigator.userAgent.indexOf("Trident"),
Ga=!ca.requestAnimationFrame||u||t?function(b){b()}:ca.requestAnimationFrame,I=function(b){function a(a,b,c){0>c&&(c+=1);1<c&&(c-=1);return c<1/6?a+6*(b-a)*c:0.5>c?b:c<2/3?a+(b-a)*(2/3-c)*6:a}var c={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",grey:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",orange:"#ffa500",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00"},d=function(a,b,
c,d){this.H=a;this.S=b;this.L=c;this.A=d};d.parse=function(a){var b=0,d=0,h=0,k=1,m;a=(""+a).toLowerCase();a=c[a]||a;if(m=a.match(/^#(\w{2})(\w{2})(\w{2})$/))b=parseInt(m[1],16),d=parseInt(m[2],16),h=parseInt(m[3],16);else if(m=a.match(/rgba?\((\d+)\D+(\d+)\D+(\d+)(\D+([\d.]+))?\)/))b=parseInt(m[1],10),d=parseInt(m[2],10),h=parseInt(m[3],10),k=m[4]?parseFloat(m[5]):1;else return;return this.fromRGBA(b,d,h,k)};d.fromRGBA=function(a,b,c,h){"object"===typeof a?(b=a.g/255,c=a.b/255,h=a.a,a=a.r/255):(a/=
255,b/=255,c/=255);var k=Math.max(a,b,c),m=Math.min(a,b,c),l,y=(k+m)/2,s=k-m;if(s){m=0.5<y?s/(2-k-m):s/(k+m);switch(k){case a:l=(b-c)/s+(b<c?6:0);break;case b:l=(c-a)/s+2;break;case c:l=(a-b)/s+4}l*=60}else l=m=0;return new d(l,m,y,h)};d.prototype={toRGBA:function(){var b=Math.min(360,Math.max(0,this.H)),c=Math.min(1,Math.max(0,this.S)),d=Math.min(1,Math.max(0,this.L)),h=Math.min(1,Math.max(0,this.A)),k;if(0===c)b=k=c=d;else{var m=0.5>d?d*(1+c):d+c-d*c,d=2*d-m,b=b/360,c=a(d,m,b+1/3);k=a(d,m,b);b=
a(d,m,b-1/3)}return{r:Math.round(255*c),g:Math.round(255*k),b:Math.round(255*b),a:h}},toString:function(){var a=this.toRGBA();return 1===a.a?"#"+(16777216+(a.r<<16)+(a.g<<8)+a.b).toString(16).slice(1,7):"rgba("+[a.r,a.g,a.b,a.a.toFixed(2)].join()+")"},hue:function(a){return new d(this.H*a,this.S,this.L,this.A)},saturation:function(a){return new d(this.H,this.S*a,this.L,this.A)},lightness:function(a){return new d(this.H,this.S,this.L*a,this.A)},alpha:function(a){return new d(this.H,this.S,this.L,this.A*
a)}};return d}(this),Ha=function(){var b=Math,a=b.PI,c=b.sin,d=b.cos,f=b.tan,e=b.asin,g=b.atan2,h=a/180,k=23.4397*h;return function(b,l,y){y=h*-y;l*=h;b=b.valueOf()/864E5-0.5+2440588-2451545;var s=h*(357.5291+0.98560028*b),D;D=h*(1.9148*c(s)+0.02*c(2*s)+3E-4*c(3*s));D=s+D+102.9372*h+a;s=e(c(0)*d(k)+d(0)*c(k)*c(D));D=g(c(D)*d(k)-f(0)*c(k),d(D));b=h*(280.16+360.9856235*b)-y-D;y=e(c(l)*c(s)+d(l)*d(s)*d(b));l=g(c(b),d(b)*c(l)-f(s)*d(l));return{altitude:y,azimuth:l-a/2}}}(),Ja=function(){function b(a){a=
a.toLowerCase();return"#"===a[0]?a:d[f[a]||a]||null}function a(a,b){var c,d,f,s,D=0,p,n;p=0;for(n=a.length-3;p<n;p+=2)c=a[p],d=a[p+1],f=a[p+2],s=a[p+3],D+=c*s-f*d;if((0<D/2?e:g)===b)return a;c=[];for(d=a.length-2;0<=d;d-=2)c.push(a[d],a[d+1]);return c}function c(b){var d,f,l=[],y;switch(b.type){case "GeometryCollection":l=[];d=0;for(f=b.geometries.length;d<f;d++)(y=c(b.geometries[d]))&&l.push.apply(l,y);return l;case "MultiPolygon":l=[];d=0;for(f=b.coordinates.length;d<f;d++)(y=c({type:"Polygon",
coordinates:b.coordinates[d]}))&&l.push.apply(l,y);return l;case "Polygon":b=b.coordinates;break;default:return[]}var s,p=[],n=[];s=b[0];d=0;for(f=s.length;d<f;d++)p.push(s[d][1],s[d][0]);p=a(p,e);d=0;for(f=b.length-1;d<f;d++){s=b[d+1];n[d]=[];l=0;for(y=s.length;l<y;l++)n[d].push(s[l][1],s[l][0]);n[d]=a(n[d],g)}return[{outer:p,inner:n.length?n:null}]}var d={brick:"#cc7755",bronze:"#ffeecc",canvas:"#fff8f0",concrete:"#999999",copper:"#a0e0d0",glass:"#e8f8f8",gold:"#ffcc00",plants:"#009933",metal:"#aaaaaa",
panel:"#fff8f0",plaster:"#999999",roof_tiles:"#f08060",silver:"#cccccc",slate:"#666666",stone:"#996666",tar_paper:"#333333",wood:"#deb887"},f={asphalt:"tar_paper",bitumen:"tar_paper",block:"stone",bricks:"brick",glas:"glass",glassfront:"glass",grass:"plants",masonry:"stone",granite:"stone",panels:"panel",paving_stones:"stone",plastered:"plaster",rooftiles:"roof_tiles",roofingfelt:"tar_paper",sandstone:"stone",sheet:"canvas",sheets:"canvas",shingle:"tar_paper",shingles:"tar_paper",slates:"slate",steel:"metal",
tar:"tar_paper",tent:"canvas",thatch:"plants",tile:"roof_tiles",tiles:"roof_tiles"},e="CW",g="CCW";return{read:function(a){if(!a||"FeatureCollection"!==a.type)return[];a=a.features;var d,f,e,g,p=[],n,q,t,r;d=0;for(f=a.length;d<f;d++)if(n=a[d],"Feature"===n.type&&!1!==ta(n)){e=n.properties;g={};e=e||{};g.height=e.height||(e.levels?3*e.levels:Ia);g.minHeight=e.minHeight||(e.minLevel?3*e.minLevel:0);if(q=e.material?b(e.material):e.wallColor||e.color)g.wallColor=q;if(q=e.roofMaterial?b(e.roofMaterial):
e.roofColor)g.roofColor=q;switch(e.shape){case "cylinder":case "cone":case "dome":case "sphere":g.shape=e.shape;g.isRotational=!0;break;case "pyramid":g.shape=e.shape}switch(e.roofShape){case "cone":case "dome":g.roofShape=e.roofShape;g.isRotational=!0;break;case "pyramid":g.roofShape=e.roofShape}g.roofShape&&e.roofHeight?(g.roofHeight=e.roofHeight,g.height=K(0,g.height-g.roofHeight)):g.roofHeight=0;t=g;q=c(n.geometry);e=0;for(g=q.length;e<g;e++){r=t;var w={},u=void 0;for(u in r)r.hasOwnProperty(u)&&
(w[u]=r[u]);r=w;r.footprint=q[e].outer;if(r.isRotational){for(var w=r,u=r.footprint,x=180,z=-180,v=0,A=u.length;v<A;v+=2)x=U(x,u[v+1]),z=K(z,u[v+1]);w.radius=(z-x)/2}q[e].inner&&(r.holes=q[e].inner);if(n.id||n.properties.id)r.id=n.id||n.properties.id;n.properties.relationId&&(r.relationId=n.properties.relationId);p.push(r)}}return p}}}(),E=Math.PI,J=E/2,Ba=E/4,Da=256,x,T,G=15,wa="latitude",za="longitude",B=0,v=0,W=0,fa=0,p=0,n=0,H=I.parse("rgba(200, 190, 180)"),ba=H.lightness(0.8),O=H.lightness(1.2),
ha=""+H,aa=""+ba,X=""+O,na=0,C=1,ga,Ia=5,M,N,q=450,Q,Ka=function(){function b(b,g){if(a[b])g&&g(a[b]);else{var h=new XMLHttpRequest;h.onreadystatechange=function(){if(4===h.readyState&&h.status&&!(200>h.status||299<h.status)&&g&&h.responseText){var k=h.responseText;a[b]=k;c.push({url:b,size:k.length});d+=k.length;for(g(k);d>f;)k=c.shift(),d-=k.size,delete a[k.url]}};h.open("GET",b);h.send(null);return h}}var a={},c=[],d=0,f=5242880;return{loadJSON:function(a,c){return b(a,function(a){var b;try{b=
JSON.parse(a)}catch(d){}c(b)})}}}(),F={loadedItems:{},items:[],getPixelFootprint:function(b){for(var a=new ra(b.length),c,d=0,f=b.length-1;d<f;d+=2)c=da(b[d],b[d+1]),a[d]=c.x,a[d+1]=c.y;b=a;a=b.length/2;c=new sa(a);var d=0,f=a-1,e,g,h,k,m=[],l=[],n=[];for(c[d]=c[f]=1;f;){g=0;for(e=d+1;e<f;e++){h=b[2*e];var p=b[2*e+1],q=b[2*d],r=b[2*d+1],u=b[2*f],w=b[2*f+1],t=u-q,v=w-r,x=void 0;if(0!==t||0!==v)x=((h-q)*t+(p-r)*v)/(t*t+v*v),1<x?(q=u,r=w):0<x&&(q+=t*x,r+=v*x);t=h-q;v=p-r;h=t*t+v*v;h>g&&(k=e,g=h)}2<g&&
(c[k]=1,m.push(d),l.push(k),m.push(k),l.push(f));d=m.pop();f=l.pop()}for(e=0;e<a;e++)c[e]&&n.push(b[2*e],b[2*e+1]);a=n;if(!(8>a.length))return a},resetItems:function(){this.items=[];this.loadedItems={};Y.reset()},addRenderItems:function(b,a){for(var c,d,f,e=Ja.read(b),g=0,h=e.length;g<h;g++)c=e[g],f=c.id||[c.footprint[0],c.footprint[1],c.height,c.minHeight].join(),!this.loadedItems[f]&&(d=this.scale(c))&&(d.scale=a?0:1,this.items.push(d),this.loadedItems[f]=1);Ca()},scale:function(b){var a={},c=6/
oa(2,x-G);b.id&&(a.id=b.id);a.height=U(b.height/c,ga);a.minHeight=isNaN(b.minHeight)?0:b.minHeight/c;if(!(a.minHeight>ga)&&(a.footprint=this.getPixelFootprint(b.footprint),a.footprint)){for(var d=a.footprint,f=Infinity,e=-Infinity,g=Infinity,h=-Infinity,k=0,m=d.length-3;k<m;k+=2)f=U(f,d[k]),e=K(e,d[k]),g=U(g,d[k+1]),h=K(h,d[k+1]);a.center={x:f+(e-f)/2<<0,y:g+(h-g)/2<<0};b.radius&&(a.radius=b.radius*na);b.shape&&(a.shape=b.shape);b.roofShape&&(a.roofShape=b.roofShape);"cone"!==a.roofShape&&"dome"!==
a.roofShape||a.shape||!va(a.footprint)||(a.shape="cylinder");if(b.holes){a.holes=[];for(var l,d=0,f=b.holes.length;d<f;d++)(l=this.getPixelFootprint(b.holes[d]))&&a.holes.push(l)}var n;b.wallColor&&(n=I.parse(b.wallColor))&&(n=n.alpha(C),a.altColor=""+n.lightness(0.8),a.wallColor=""+n);b.roofColor&&(n=I.parse(b.roofColor))&&(a.roofColor=""+n.alpha(C));b.relationId&&(a.relationId=b.relationId);a.hitColor=Y.idToColor(b.relationId||b.id);a.roofHeight=isNaN(b.roofHeight)?0:b.roofHeight/c;if(!(a.height+
a.roofHeight<=a.minHeight))return a}},set:function(b){this.isStatic=!0;this.resetItems();this._staticData=b;this.addRenderItems(this._staticData,!0)},load:function(b,a){this.src=b||"http://{s}.data.osmbuildings.org/0.2/{k}/tile/{z}/{x}/{y}.json".replace("{k}",a||"anonymous");this.update()},update:function(){function b(a){g.addRenderItems(a)}this.resetItems();if(!(x<G))if(this.isStatic&&this._staticData)this.addRenderItems(this._staticData);else if(this.src){var a=16<x?256<<x-16:256>>16-x,c=p/a<<0,
d=n/a<<0,f=qa((p+B)/a),a=qa((n+v)/a),e,g=this;for(e=d;e<=a;e++)for(d=c;d<=f;d++)this.loadTile(d,e,16,b)}},loadTile:function(b,a,c,d){b=this.src.replace("{s}","abcd"[(b+a)%4]).replace("{x}",b).replace("{y}",a).replace("{z}",c);return Ka.loadJSON(b,d)}},Z={draw:function(b,a,c,d,f,e,g,h){var k,m=this._extrude(b,a,d,f,e,g),l=[];if(c)for(a=0,k=c.length;a<k;a++)l[a]=this._extrude(b,c[a],d,f,e,g);b.fillStyle=h;b.beginPath();this._ring(b,m);if(c)for(a=0,k=l.length;a<k;a++)this._ring(b,l[a]);b.closePath();
b.stroke();b.fill()},_extrude:function(b,a,c,d,f,e){c=q/(q-c);for(var g=q/(q-d),h={x:0,y:0},k={x:0,y:0},m,l,y=[],s=0,t=a.length-3;s<t;s+=2)h.x=a[s]-p,h.y=a[s+1]-n,k.x=a[s+2]-p,k.y=a[s+3]-n,m=r.project(h,c),l=r.project(k,c),d&&(h=r.project(h,g),k=r.project(k,g)),(k.x-h.x)*(m.y-h.y)>(m.x-h.x)*(k.y-h.y)&&(b.fillStyle=h.x<k.x&&h.y<k.y||h.x>k.x&&h.y>k.y?e:f,b.beginPath(),this._ring(b,[k.x,k.y,h.x,h.y,m.x,m.y,l.x,l.y]),b.closePath(),b.fill()),y[s]=m.x,y[s+1]=m.y;return y},_ring:function(b,a){b.moveTo(a[0],
a[1]);for(var c=2,d=a.length-1;c<d;c+=2)b.lineTo(a[c],a[c+1])},simplified:function(b,a,c){b.beginPath();this._ringAbs(b,a);if(c){a=0;for(var d=c.length;a<d;a++)this._ringAbs(b,c[a])}b.closePath();b.stroke();b.fill()},_ringAbs:function(b,a){b.moveTo(a[0]-p,a[1]-n);for(var c=2,d=a.length-1;c<d;c+=2)b.lineTo(a[c]-p,a[c+1]-n)},shadow:function(b,a,c,d,f){for(var e=null,g={x:0,y:0},h={x:0,y:0},k,m,l=0,q=a.length-3;l<q;l+=2)g.x=a[l]-p,g.y=a[l+1]-n,h.x=a[l+2]-p,h.y=a[l+3]-n,k=z.project(g,d),m=z.project(h,
d),f&&(g=z.project(g,f),h=z.project(h,f)),(h.x-g.x)*(k.y-g.y)>(k.x-g.x)*(h.y-g.y)?(1===e&&b.lineTo(g.x,g.y),e=0,l||b.moveTo(g.x,g.y),b.lineTo(h.x,h.y)):(0===e&&b.lineTo(k.x,k.y),e=1,l||b.moveTo(k.x,k.y),b.lineTo(m.x,m.y));if(c)for(l=0,q=c.length;l<q;l++)this._ringAbs(b,c[l])},shadowMask:function(b,a,c){this._ringAbs(b,a);if(c){a=0;for(var d=c.length;a<d;a++)this._ringAbs(b,c[a])}},hitArea:function(b,a,c,d,f,e){c=null;var g={x:0,y:0},h={x:0,y:0};d=q/(q-d);var k=q/(q-f),m;b.fillStyle=e;b.beginPath();
for(var l=0,t=a.length-3;l<t;l+=2)g.x=a[l]-p,g.y=a[l+1]-n,h.x=a[l+2]-p,h.y=a[l+3]-n,e=r.project(g,d),m=r.project(h,d),f&&(g=r.project(g,k),h=r.project(h,k)),(h.x-g.x)*(e.y-g.y)>(e.x-g.x)*(h.y-g.y)?(1===c&&b.lineTo(g.x,g.y),c=0,l||b.moveTo(g.x,g.y),b.lineTo(h.x,h.y)):(0===c&&b.lineTo(e.x,e.y),c=1,l||b.moveTo(e.x,e.y),b.lineTo(m.x,m.y));b.closePath();b.fill()}},w={draw:function(b,a,c,d,f,e,g,h,k){a={x:a.x-p,y:a.y-n};var m=q/(q-f),l=q/(q-e);f=r.project(a,m);d*=m;e&&(a=r.project(a,l),c*=l);(m=this._tangents(a,
c,f,d))?(e=P(m[0].y1-a.y,m[0].x1-a.x),m=P(m[1].y1-a.y,m[1].x1-a.x)):(e=1.5*E,m=1.5*E);b.fillStyle=g;b.beginPath();b.arc(f.x,f.y,d,J,e,!0);b.arc(a.x,a.y,c,e,J);b.closePath();b.fill();b.fillStyle=h;b.beginPath();b.arc(f.x,f.y,d,m,J,!0);b.arc(a.x,a.y,c,J,m);b.closePath();b.fill();b.fillStyle=k;this._circle(b,f,d)},simplified:function(b,a,c){this._circle(b,{x:a.x-p,y:a.y-n},c)},shadow:function(b,a,c,d,f,e){a={x:a.x-p,y:a.y-n};f=z.project(a,f);var g;e&&(a=z.project(a,e));var h=this._tangents(a,c,f,d);
h?(e=P(h[0].y1-a.y,h[0].x1-a.x),g=P(h[1].y1-a.y,h[1].x1-a.x),b.moveTo(h[1].x2,h[1].y2),b.arc(f.x,f.y,d,g,e),b.arc(a.x,a.y,c,e,g)):(b.moveTo(a.x+c,a.y),b.arc(a.x,a.y,c,0,2*E))},shadowMask:function(b,a,c){var d=a.x-p;a=a.y-n;b.moveTo(d+c,a);b.arc(d,a,c,0,2*E)},hitArea:function(b,a,c,d,f,e,g){a={x:a.x-p,y:a.y-n};var h=q/(q-f),k=q/(q-e);f=r.project(a,h);d*=h;e&&(a=r.project(a,k),c*=k);e=this._tangents(a,c,f,d);b.fillStyle=g;b.beginPath();e?(g=P(e[0].y1-a.y,e[0].x1-a.x),h=P(e[1].y1-a.y,e[1].x1-a.x),b.moveTo(e[1].x2,
e[1].y2),b.arc(f.x,f.y,d,h,g),b.arc(a.x,a.y,c,g,h)):(b.moveTo(a.x+c,a.y),b.arc(a.x,a.y,c,0,2*E));b.closePath();b.fill()},_circle:function(b,a,c){b.beginPath();b.arc(a.x,a.y,c,0,2*E);b.stroke();b.fill()},_tangents:function(b,a,c,d){var f=b.x-c.x,e=b.y-c.y,g=a-d,h=f*f+e*e;if(!(h<=g*g)){var h=pa(h),f=-f/h,e=-e/h,g=g/h,h=[],k,m,l;k=pa(K(0,1-g*g));for(var n=1;-1<=n;n-=2)m=f*g-n*k*e,l=e*g+n*k*f,h.push({x1:b.x+a*m<<0,y1:b.y+a*l<<0,x2:c.x+d*m<<0,y2:c.y+d*l<<0});return h}}},R={draw:function(b,a,c,d,f,e,g){var h=
q/(q-f);c=r.project({x:c.x-p,y:c.y-n},q/(q-d));d={x:0,y:0};for(var k={x:0,y:0},m=0,l=a.length-3;m<l;m+=2)d.x=a[m]-p,d.y=a[m+1]-n,k.x=a[m+2]-p,k.y=a[m+3]-n,f&&(d=r.project(d,h),k=r.project(k,h)),(k.x-d.x)*(c.y-d.y)>(c.x-d.x)*(k.y-d.y)&&(b.fillStyle=d.x<k.x&&d.y<k.y||d.x>k.x&&d.y>k.y?g:e,b.beginPath(),this._triangle(b,d,k,c),b.closePath(),b.fill())},_triangle:function(b,a,c,d){b.moveTo(a.x,a.y);b.lineTo(c.x,c.y);b.lineTo(d.x,d.y)},_ring:function(b,a){b.moveTo(a[0]-p,a[1]-n);for(var c=2,d=a.length-1;c<
d;c+=2)b.lineTo(a[c]-p,a[c+1]-n)},shadow:function(b,a,c,d,f){var e={x:0,y:0},g={x:0,y:0};c=z.project({x:c.x-p,y:c.y-n},d);d=0;for(var h=a.length-3;d<h;d+=2)e.x=a[d]-p,e.y=a[d+1]-n,g.x=a[d+2]-p,g.y=a[d+3]-n,f&&(e=z.project(e,f),g=z.project(g,f)),(g.x-e.x)*(c.y-e.y)>(c.x-e.x)*(g.y-e.y)&&this._triangle(b,e,g,c)},shadowMask:function(b,a){this._ring(b,a)},hitArea:function(b,a,c,d,f,e){var g=q/(q-f);c=r.project({x:c.x-p,y:c.y-n},q/(q-d));d={x:0,y:0};var h={x:0,y:0};b.fillStyle=e;b.beginPath();e=0;for(var k=
a.length-3;e<k;e+=2)d.x=a[e]-p,d.y=a[e+1]-n,h.x=a[e+2]-p,h.y=a[e+3]-n,f&&(d=r.project(d,g),h=r.project(h,g)),(h.x-d.x)*(c.y-d.y)>(c.x-d.x)*(h.y-d.y)&&this._triangle(b,d,h,c);b.closePath();b.fill()}},r={project:function(b,a){return{x:(b.x-M)*a+M<<0,y:(b.y-N)*a+N<<0}},render:function(){var b=this.context;b.clearRect(0,0,B,v);if(!(x<G||Q)){var a,c,d,f={x:M+p,y:N+n},e,g,h,k,m=F.items;m.sort(function(a,b){return a.minHeight-b.minHeight||S(b.center,f)-S(a.center,f)||b.height-a.height});for(var l=0,q=m.length;l<
q;l++)if(a=m[l],!ia.isSimple(a)&&(e=a.footprint,V(e))){c=1>a.scale?a.height*a.scale:a.height;d=0;a.minHeight&&(d=1>a.scale?a.minHeight*a.scale:a.minHeight);g=a.wallColor||ha;h=a.altColor||aa;k=a.roofColor||X;b.strokeStyle=h;switch(a.shape){case "cylinder":w.draw(b,a.center,a.radius,a.radius,c,d,g,h,k);break;case "cone":w.draw(b,a.center,a.radius,0,c,d,g,h);break;case "dome":w.draw(b,a.center,a.radius,a.radius/2,c,d,g,h);break;case "sphere":w.draw(b,a.center,a.radius,a.radius,c,d,g,h,k);break;case "pyramid":R.draw(b,
e,a.center,c,d,g,h);break;default:Z.draw(b,e,a.holes,c,d,g,h,k)}switch(a.roofShape){case "cone":w.draw(b,a.center,a.radius,0,c+a.roofHeight,c,k,""+I.parse(k).lightness(0.9));break;case "dome":w.draw(b,a.center,a.radius,a.radius/2,c+a.roofHeight,c,k,""+I.parse(k).lightness(0.9));break;case "pyramid":R.draw(b,e,a.center,c+a.roofHeight,c,k,I.parse(k).lightness(0.9))}}}}},ia={maxZoom:G+2,maxHeight:5,isSimple:function(b){return x<=this.maxZoom&&b.height+b.roofHeight<this.maxHeight},render:function(){var b=
this.context;b.clearRect(0,0,B,v);if(!(x<G||Q||x>this.maxZoom))for(var a,c,d=F.items,f=0,e=d.length;f<e;f++)if(a=d[f],!(a.height>=this.maxHeight)&&(c=a.footprint,V(c)))switch(b.strokeStyle=a.altColor||aa,b.fillStyle=a.roofColor||X,a.shape){case "cylinder":case "cone":case "dome":case "sphere":w.simplified(b,a.center,a.radius);break;default:Z.simplified(b,c,a.holes)}}},z={enabled:!0,color:"#666666",blurColor:"#000000",blurSize:15,date:new Date,direction:{x:0,y:0},project:function(b,a){return{x:b.x+
this.direction.x*a,y:b.y+this.direction.y*a}},render:function(){var b=this.context,a,c,d;b.clearRect(0,0,B,v);if(!(!this.enabled||x<G||Q||(a=ja(W+p,fa+n),a=Ha(this.date,a.latitude,a.longitude),0>=a.altitude))){c=1/ka(a.altitude);d=5>c?0.75:1/c*5;this.direction.x=Fa(a.azimuth)*c;this.direction.y=Ea(a.azimuth)*c;var f,e,g,h;a=F.items;b.canvas.style.opacity=d/(2*C);b.shadowColor=this.blurColor;b.shadowBlur=C/2*this.blurSize;b.fillStyle=this.color;b.beginPath();d=0;for(c=a.length;d<c;d++)if(f=a[d],h=
f.footprint,V(h)){e=1>f.scale?f.height*f.scale:f.height;g=0;f.minHeight&&(g=1>f.scale?f.minHeight*f.scale:f.minHeight);switch(f.shape){case "cylinder":w.shadow(b,f.center,f.radius,f.radius,e,g);break;case "cone":w.shadow(b,f.center,f.radius,0,e,g);break;case "dome":w.shadow(b,f.center,f.radius,f.radius/2,e,g);break;case "sphere":w.shadow(b,f.center,f.radius,f.radius,e,g);break;case "pyramid":R.shadow(b,h,f.center,e,g);break;default:Z.shadow(b,h,f.holes,e,g)}switch(f.roofShape){case "cone":w.shadow(b,
f.center,f.radius,0,e+f.roofHeight,e);break;case "dome":w.shadow(b,f.center,f.radius,f.radius/2,e+f.roofHeight,e);break;case "pyramid":R.shadow(b,h,f.center,e+f.roofHeight,e)}}b.closePath();b.fill();b.shadowBlur=null;b.globalCompositeOperation="destination-out";b.beginPath();d=0;for(c=a.length;d<c;d++)if(f=a[d],h=f.footprint,V(h)&&!f.minHeight)switch(f.shape){case "cylinder":case "cone":case "dome":w.shadowMask(b,f.center,f.radius);break;default:Z.shadowMask(b,h,f.holes)}b.fillStyle="#00ff00";b.fill();
b.globalCompositeOperation="source-over"}}},Y={_idMapping:[null],reset:function(){this._idMapping=[null]},render:function(){if(!this._timer){var b=this;this._timer=setTimeout(function(){b._timer=null;b._render()},500)}},_render:function(){var b=this.context;b.clearRect(0,0,B,v);if(!(x<G||Q)){var a,c,d,f={x:M+p,y:N+n},e,g,h=F.items;h.sort(function(a,b){return a.minHeight-b.minHeight||S(b.center,f)-S(a.center,f)||b.height-a.height});for(var k=0,m=h.length;k<m;k++)if(a=h[k],g=a.hitColor)if(e=a.footprint,
V(e)){c=a.height;d=0;a.minHeight&&(d=a.minHeight);switch(a.shape){case "cylinder":w.hitArea(b,a.center,a.radius,a.radius,c,d,g);break;case "cone":w.hitArea(b,a.center,a.radius,0,c,d,g);break;case "dome":w.hitArea(b,a.center,a.radius,a.radius/2,c,d,g);break;case "sphere":w.hitArea(b,a.center,a.radius,a.radius,c,d,g);break;case "pyramid":R.hitArea(b,e,a.center,c,d,g);break;default:Z.hitArea(b,e,a.holes,c,d,g)}switch(a.roofShape){case "cone":w.hitArea(b,a.center,a.radius,0,c+a.roofHeight,c,g);break;
case "dome":w.hitArea(b,a.center,a.radius,a.radius/2,c+a.roofHeight,c,g);break;case "pyramid":R.hitArea(b,e,a.center,c+a.roofHeight,c,g)}}B&&v&&(this._imageData=this.context.getImageData(0,0,B,v).data)}},getIdFromXY:function(b,a){var c=this._imageData;if(c){var d=4*((a|0)*B+(b|0));return this._idMapping[c[d]|c[d+1]<<8|c[d+2]<<16]}},idToColor:function(b){var a=this._idMapping.indexOf(b);-1===a&&(this._idMapping.push(b),a=this._idMapping.length-1);return"rgb("+[a&255,a>>8&255,a>>16&255].join()+")"}},
$,A={container:document.createElement("DIV"),items:[],init:function(){this.container.style.pointerEvents="none";this.container.style.position="absolute";this.container.style.left=0;this.container.style.top=0;z.context=this.createContext(this.container);ia.context=this.createContext(this.container);r.context=this.createContext(this.container);Y.context=this.createContext()},render:function(b){Ga(function(){b||(z.render(),ia.render(),Y.render());r.render()})},createContext:function(b){var a=document.createElement("CANVAS");
a.style.transform="translate3d(0, 0, 0)";a.style.imageRendering="optimizeSpeed";a.style.position="absolute";a.style.left=0;a.style.top=0;var c=a.getContext("2d");c.lineCap="round";c.lineJoin="round";c.lineWidth=1;c.imageSmoothingEnabled=!1;this.items.push(a);b&&b.appendChild(a);return c},appendTo:function(b){b.appendChild(this.container)},remove:function(){this.container.parentNode.removeChild(this.container)},setSize:function(b,a){for(var c=0,d=this.items.length;c<d;c++)this.items[c].width=b,this.items[c].height=
a},setPosition:function(b,a){this.container.style.left=b+"px";this.container.style.top=a+"px"}};A.init();u=function(b){this.offset={x:0,y:0};b&&b.addLayer(this)};t=u.prototype=L.Layer?new L.Layer:{};t.addTo=function(b){b.addLayer(this);return this};t.onAdd=function(b){this.map=b;A.appendTo(b._panes.overlayPane);var a=this.getOffset(),c=b.getPixelOrigin();la({width:b._size.x,height:b._size.y});var d=c.y-a.y;p=c.x-a.x;n=d;ma(b._zoom);A.setPosition(-a.x,-a.y);b.on({move:this.onMove,moveend:this.onMoveEnd,
zoomstart:this.onZoomStart,zoomend:this.onZoomEnd,resize:this.onResize,viewreset:this.onViewReset,click:this.onClick},this);if(b.options.zoomAnimation)b.on("zoomanim",this.onZoom,this);b.attributionControl&&b.attributionControl.addAttribution('© <a href="http://osmbuildings.org">OSM Buildings</a>');F.update()};t.onRemove=function(){var b=this.map;b.attributionControl&&b.attributionControl.removeAttribution('© <a href="http://osmbuildings.org">OSM Buildings</a>');b.off({move:this.onMove,
moveend:this.onMoveEnd,zoomstart:this.onZoomStart,zoomend:this.onZoomEnd,resize:this.onResize,viewreset:this.onViewReset,click:this.onClick},this);b.options.zoomAnimation&&b.off("zoomanim",this.onZoom,this);A.remove()};t.onMove=function(b){b=this.getOffset();ea({x:this.offset.x-b.x,y:this.offset.y-b.y})};t.onMoveEnd=function(b){if(this.noMoveEnd)this.noMoveEnd=!1;else{var a=this.map;b=this.getOffset();var c=a.getPixelOrigin();this.offset=b;A.setPosition(-b.x,-b.y);ea({x:0,y:0});la({width:a._size.x,
height:a._size.y});a=c.y-b.y;p=c.x-b.x;n=a;A.render();F.update()}};t.onZoomStart=function(b){Q=!0;A.render()};t.onZoom=function(b){};t.onZoomEnd=function(b){b=this.map;var a=this.getOffset(),c=b.getPixelOrigin(),d=c.y-a.y;p=c.x-a.x;n=d;b=b._zoom;Q=!1;ma(b);F.update();A.render();this.noMoveEnd=!0};t.onResize=function(){};t.onViewReset=function(){var b=this.getOffset();this.offset=b;A.setPosition(-b.x,-b.y);ea({x:0,y:0})};t.onClick=function(b){var a=Y.getIdFromXY(b.containerPoint.x,b.containerPoint.y);
a&&ua({feature:a,lat:b.latlng.lat,lon:b.latlng.lng})};t.getOffset=function(){return L.DomUtil.getPosition(this.map._mapPane)};t.style=function(b){b=b||{};var a;if(a=b.color||b.wallColor)H=I.parse(a),ha=""+H.alpha(C),ba=H.lightness(0.8),aa=""+ba.alpha(C),O=H.lightness(1.2),X=""+O.alpha(C);b.roofColor&&(O=I.parse(b.roofColor),X=""+O.alpha(C));void 0!==b.shadows&&(z.enabled=!!b.shadows);A.render();return this};t.date=function(b){z.date=b;z.render();return this};t.load=function(b){F.load(b);return this};
t.set=function(b){F.set(b);return this};var ta=function(){};t.each=function(b){ta=function(a){return b(a)};return this};var ua=function(){};t.click=function(b){ua=function(a){return b(a)};return this};u.VERSION="0.2.2b";u.ATTRIBUTION='© <a href="http://osmbuildings.org">OSM Buildings</a>';ca.OSMBuildings=u})(this); | PypiClean |
/netket-3.9.2.tar.gz/netket-3.9.2/netket/experimental/hilbert/spin_orbital_fermions.py |
from typing import Optional, List, Union
from collections.abc import Iterable
import numpy as np
from fractions import Fraction
from netket.hilbert.fock import Fock
from netket.hilbert.tensor_hilbert_discrete import TensorDiscreteHilbert
from netket.hilbert.homogeneous import HomogeneousHilbert
class SpinOrbitalFermions(HomogeneousHilbert):
r"""
Hilbert space for 2nd quantization fermions with spin `s` distributed among
`n_orbital` orbitals.
The number of fermions can be fixed globally or fixed on a per spin projection.
Note:
This class is simply a convenient wrapper that creates a Fock or TensorHilbert
of Fock spaces with occupation numbers 0 or 1.
It is mainly useful to avoid needing to specify the n_max=1 each time, and adds
convenient functions such as _get_index and _spin_index, which allow one to
index the correct TensorHilbert corresponding to the right spin projection.
"""
def __init__(
self,
n_orbitals: int,
s: float = None,
n_fermions: Optional[Union[int, List[int]]] = None,
):
r"""
Constructs the hilbert space for spin-`s` fermions on `n_orbitals`.
Samples of this hilbert space represent occupation numbers (0,1) of the
orbitals. The number of fermions may be fixed to `n_fermions`.
If the spin is different from 0 or None, n_fermions can also be a list to fix
the number of fermions per spin component.
Using this class, one can generate a tensor product of fermionic hilbert spaces
that distinguish particles with different spin.
Args:
n_orbitals: number of orbitals we store occupation numbers for. If the
number of fermions per spin is conserved, the different spin
configurations are not counted as orbitals and are handled differently.
s: spin of the fermions.
n_fermions: (optional) fixed number of fermions per spin (conserved). In the
case n_fermions is an int, the total number of fermions is fixed, while
for lists, the number of fermions per spin component is fixed.
Returns:
A SpinOrbitalFermions object
"""
if s is None:
total_size = n_orbitals
else:
spin_size = round(2 * s + 1)
total_size = n_orbitals * spin_size
if n_fermions is None:
hilbert = Fock(n_max=1, N=total_size)
elif isinstance(n_fermions, int):
hilbert = Fock(n_max=1, N=total_size, n_particles=n_fermions)
else:
if not isinstance(n_fermions, Iterable):
raise TypeError(
f"n_fermions (whose type is {type(n_fermions)}) "
"must be None, an integer, or an iterable of integers"
)
if s is None:
raise TypeError(
"n_fermions cannot be a sequence if no spin is specified."
)
if len(n_fermions) != spin_size:
raise ValueError(
"list of number of fermions must equal number of spin components"
)
spin_hilberts = [
Fock(n_max=1, N=n_orbitals, n_particles=Nf) for Nf in n_fermions
]
hilbert = TensorDiscreteHilbert(*spin_hilberts)
self._fock = hilbert
"""Internal representation of this Hilbert space (Fock or TensorHilbert)."""
# local states are the occupation numbers (0, 1)
local_states = np.array((0.0, 1.0))
# we use the constraints from the Fock spaces, and override `constrained`
super().__init__(local_states, N=total_size, constraint_fn=None)
self._s = s
self.n_fermions = n_fermions
self.n_orbitals = n_orbitals
# we copy the respective functions, independent of what hilbert space they are
self._numbers_to_states = self._fock._numbers_to_states
self._states_to_numbers = self._fock._states_to_numbers
self.all_states = self._fock.all_states
def __repr__(self):
_str = f"SpinOrbitalFermions(n_orbitals={self.n_orbitals}"
if self.n_fermions is not None:
_str += f", n_fermions={self.n_fermions}"
if self.spin is not None:
_str += f", s={Fraction(self.spin)}"
_str += ")"
return _str
@property
def spin(self) -> float:
"""Returns the spin of the fermions"""
return self._s
@property
def size(self) -> int:
"""Size of the hilbert space. In case the fermions have spin `s`, the size is
(2*s+1)*n_orbitals"""
return self._fock.size
@property
def _attrs(self):
return (self.spin, self.n_fermions, self.n_orbitals)
@property
def constrained(self):
return self.n_fermions is not None
@property
def is_finite(self) -> bool:
return self._fock.is_finite
@property
def n_states(self) -> int:
return self._fock.n_states
@property
def _n_spin_states(self) -> int:
"""return the number of spin projections"""
if self.spin is None:
raise Exception(
"cannot request number of spin states for spinless fermions"
)
return round(2 * self.spin + 1)
def _spin_index(self, sz: float) -> int:
"""return the index of the Fock block corresponding to the sz projection"""
if self.spin is None:
if sz is not None or not np.isclose(sz, 0):
raise Exception("cannot request spin index of spinless fermions")
return 0
else:
return round(sz + self.spin)
def states_to_local_indices(self, x):
return self._fock.states_to_local_indices(x)
def _get_index(self, orb: int, sz: float = None):
"""go from (site, spin_projection) indices to index in the hilbert space"""
if orb >= self.n_orbitals:
raise IndexError("requested orbital index outside of the hilbert space")
spin_idx = self._spin_index(sz)
return spin_idx * self.n_orbitals + orb | PypiClean |
/CmdUtils-0.1.tar.gz/CmdUtils-0.1/cmdutils/__init__.py | import optparse
import os
import sys
from cmdutils.log import Logger
class CommandError(Exception):
"""
Raised whenever there's some user error, to show the error to the
user.
"""
def __init__(self, msg, show_usage=True):
Exception.__init__(self, msg)
self.show_usage = show_usage
class OptionParser(optparse.OptionParser):
"""
Subclass of `optparse.OptionParser` which adds min/max positional arguments,
version loading from Setuptools distribution, and logging initialization.
Also see the `add_verbose` method for adding logging-related verbosity controls
"""
def __init__(self,
usage=None,
option_list=None,
option_class=optparse.Option,
version=None,
version_package=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
max_args=None,
min_args=None,
use_logging=False):
if version_package:
if version:
raise TypeError(
"You may not give both a version and version_package argument")
import pkg_resources
dist = pkg_resources.get_distribution(version_package)
version='%s from %s (python %s)' % (
dist, dist.location, '%s.%s' % (sys.version_info[:2]))
self.max_args = max_args
self.min_args = min_args
self.use_logging = use_logging
optparse.OptionParser.__init__(
self, usage=usage, option_list=option_list, option_class=option_class,
version=version, conflict_handler=conflict_handler,
description=description, formatter=formatter,
add_help_option=add_help_option, prog=prog)
def add_verbose(self, add_quiet=True, add_log=False):
"""
Adds a ``--verbose/-v`` option. If `add_quiet` is true (the
default) then ``--quiet/-q`` is also added.
If `add_log` is true (default false) then we also add a
``--log/-l`` option. Expects a log file argument, which will
be logged to at maximum verbosity in addition to any normal
(stdout) logging.
"""
self.add_option(
'-v', '--verbose',
dest="verbosity",
help="Make the command more verbose (use multiple times to increase verbosity)",
default=0,
action="count")
if add_quiet:
self.add_option(
'-q', '--quiet',
dest="quietness",
default=0,
help="Make the command quieter (use multiple times to increase quietness)",
action="count")
if add_log:
self.add_log()
def add_log(self, log_file=None):
"""
Adds a ``--log/-l`` option. Expects a log file argument, which
will be logged to at VERBOSE in addition to any
normal (stdout) logging.
"""
self.add_option(
'-l', '--log',
dest="log_file",
metavar="FILENAME",
help="Log verbosely to the given file",
default=log_file)
def get_default_values(self):
"""
Overridden to make ``options`` a `CmdValues` instance, with
logger attributes.
"""
values = optparse.OptionParser.get_default_values(self)
values = CmdValues(values.__dict__)
return values
def parse_args(self, args=None, values=None):
"""
Overridden to do min/max argument checking.
"""
options, args = optparse.OptionParser.parse_args(self, args, values)
error = None
if self.min_args is not None and len(args) < self.min_args:
error = 'You must provide at least %s arguments (%s given)' % (
self.min_args, len(args))
if self.max_args is not None and len(args) > self.max_args:
error = 'You must provide no more than %s arguments (%s given)' % (
self.max_args, len(args))
if error is not None:
logger = getattr(options, 'logger', None)
if logger:
logger.debug('Arguments given: %s' % args)
self.error(error)
return options, args
class CmdValues(optparse.Values):
"""
``options`` is typically an instance of this class.
"""
_logger = None
def logger__get(self):
if self._logger is not None:
return self._logger
self._logger = self._create_logger()
return self._logger
def logger__set(self, value):
self._logger = value
def logger__del(self):
self._logger = None
logger = property(logger__get, logger__set, logger__del)
def _create_logger(self):
logger = Logger([])
verbosity = Logger.LEVELS.index(Logger.NOTIFY)
verbosity -= getattr(self, 'verbosity', 0)
verbosity += getattr(self, 'quietness', 0)
level = Logger.level_for_integer(verbosity)
logger.consumers.append((level, sys.stdout))
if getattr(self, 'log_file', None):
log_file = self.log_file
log_dir = os.path.dirname(os.path.abspath(log_file))
if not os.path.exists(log_dir):
logger.notify('Creating directory for log file: %s' % log_dir)
os.makedirs(log_dir)
f = open(log_file, 'a')
logfile_level = min(Logger.level_for_integer(verbosity),
Logger.DEBUG)
logger.consumers.append((logfile_level, f))
return logger
def run_main(main, parser, args=None):
"""
Runs the `main` function, which should have a signature like
``main(options, args)``, and should return an exit code (0 or None means success).
Also the `main` function can raise `CommandError`.
"""
if args is None:
args = sys.argv[1:]
try:
options, args = parser.parse_args(args)
result = main(options, args)
except CommandError, e:
print str(e)
if e.show_usage:
parser.print_help()
logger = getattr(options, 'logger', None)
if logger:
import traceback
import StringIO
out = StringIO.StringIO()
traceback.print_exc(file=out)
logger.debug('Failing exception:\n%s' % out.getvalue())
result = 3
if result:
sys.exit(result)
def main_func(parser):
"""
Use like::
@main_func(parser)
def main(options, args):
...
"""
def decorator(func):
def main(args=None):
run_main(func, parser, args)
return main
return decorator | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/emoji-regex/README.md | # emoji-regex [](https://travis-ci.org/mathiasbynens/emoji-regex)
_emoji-regex_ offers a regular expression to match all emoji symbols (including textual representations of emoji) as per the Unicode Standard.
This repository contains a script that generates this regular expression based on [the data from Unicode v12](https://github.com/mathiasbynens/unicode-12.0.0). Because of this, the regular expression can easily be updated whenever new emoji are added to the Unicode standard.
## Installation
Via [npm](https://www.npmjs.com/):
```bash
npm install emoji-regex
```
In [Node.js](https://nodejs.org/):
```js
const emojiRegex = require('emoji-regex');
// Note: because the regular expression has the global flag set, this module
// exports a function that returns the regex rather than exporting the regular
// expression itself, to make it impossible to (accidentally) mutate the
// original regular expression.
const text = `
\u{231A}: ⌚ default emoji presentation character (Emoji_Presentation)
\u{2194}\u{FE0F}: ↔️ default text presentation character rendered as emoji
\u{1F469}: 👩 emoji modifier base (Emoji_Modifier_Base)
\u{1F469}\u{1F3FF}: 👩🏿 emoji modifier base followed by a modifier
`;
const regex = emojiRegex();
let match;
while (match = regex.exec(text)) {
const emoji = match[0];
console.log(`Matched sequence ${ emoji } — code points: ${ [...emoji].length }`);
}
```
Console output:
```
Matched sequence ⌚ — code points: 1
Matched sequence ⌚ — code points: 1
Matched sequence ↔️ — code points: 2
Matched sequence ↔️ — code points: 2
Matched sequence 👩 — code points: 1
Matched sequence 👩 — code points: 1
Matched sequence 👩🏿 — code points: 2
Matched sequence 👩🏿 — code points: 2
```
To match emoji in their textual representation as well (i.e. emoji that are not `Emoji_Presentation` symbols and that aren’t forced to render as emoji by a variation selector), `require` the other regex:
```js
const emojiRegex = require('emoji-regex/text.js');
```
Additionally, in environments which support ES2015 Unicode escapes, you may `require` ES2015-style versions of the regexes:
```js
const emojiRegex = require('emoji-regex/es2015/index.js');
const emojiRegexText = require('emoji-regex/es2015/text.js');
```
## Author
| [](https://twitter.com/mathias "Follow @mathias on Twitter") |
|---|
| [Mathias Bynens](https://mathiasbynens.be/) |
## License
_emoji-regex_ is available under the [MIT](https://mths.be/mit) license.
| PypiClean |
/Geoarchpy-1.0.2.tar.gz/Geoarchpy-1.0.2/ArchPy/base.py | import numpy as np
import matplotlib
from matplotlib import colors
import matplotlib.pyplot as plt
import pyvista as pv
import scipy
from scipy.ndimage import uniform_filter
import copy
import time
import shapely
import shapely.geometry
import sys
#geone
import geone
import geone.covModel as gcm
import geone.grf as grf
from geone import img
import geone.imgplot as imgplt
import geone.imgplot3d as imgplt3
import geone.deesseinterface as dsi
import geone.geosclassicinterface as gci
#ArchPy modules
from ArchPy.ineq import *
from ArchPy.data_transfo import *
from ArchPy.tpgs import * #truncated plurigraussian
from ArchPy.inputs import * # inputs utilities
##### functions ######
def Arr_replace(arr, dic):
"""
Replace value in an array using a dictionnary linking actual values to new values
Parameters
----------
arr : np.ndarray
Any numpy array that contains values
dic : dict
A dictionnary with arr values as keys and
new values as dic values
Returns
-------
nd.array
A new array with values replaced
"""
arr_old=arr.flatten().copy()
arr_new=np.zeros([arr_old.shape[0]])
for i, x in enumerate(arr_old):
if x==x:
arr_new[i]=dic[x]
else:
arr_new[i]=np.nan
return arr_new.reshape(arr.shape)
def get_size(obj, seen=None):
"""Recursively finds size of objects
Parameters
----------
obj : any python object
The object, variables or anything
whose size we are looking for
Returns
-------
int
Size of the object in bytes
Note
----
Function taken from stack overflow, written by Aaron Hall
"""
size=sys.getsizeof(obj)
if seen is None:
seen=set()
obj_id=id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
def resample_to_grid(xc, yc, rxc, ryc, raster_band, method="nearest"):
"""
Function to resample the raster data to a
user supplied grid of x, y coordinates.
x, y coordinate arrays should correspond
to grid vertices
Parameters
----------
xc: np.ndarray or list
an array of x-cell centers
yc: np.ndarray or list
an array of y-cell centers
rxc: ndarray
raster xcell centers
ryc: ndarray
raster ycell centers
raster_band: 2D ndarray
raster band to re-sample
method: str
scipy interpolation method options
"linear" for bi-linear interpolation
"nearest" for nearest neighbor
"cubic" for bi-cubic interpolation
Returns
-------
np.array
Note
----
Function taken from flopy (3.3.4)
"""
from scipy.interpolate import griddata
#get some info and output grid
data_shape=xc.shape
xc=xc.flatten()
yc=yc.flatten()
# step 2: flatten raster grid
rxc=rxc.flatten()
ryc=ryc.flatten()
#flatten array
arr=raster_band.flatten()
# interpolation
data=griddata((rxc, ryc), arr, (xc, yc), method=method)
#rearrange shape
data=data.reshape(data_shape)
return data
##### ArchPy functions #####
def interp2D(litho, xg, yg, xu, verbose=0, ncpu=1, mask2D=None, seed=123456789, **kwargs):
"""
Function to realize a 2D interpolation based on a
multitude of methods (scipy.interpolate, geone (kriging, MPS, ...))
Parameters
----------
litho: :class:`Surface` object
ArchPy surface object on which we want to interpolate
the surface
xg: ndarray of size nx
central coordinates in x direction
yg: ndarray of size ny
central coordinates in y direction
xu: ndarray of size (n, 2)
position at which we want to know
the estimation (not used for every interp method)
**kwargs
Different parameters for surface interpolation
- nit: int
Number of iterations for gibbs sampler
(depends on the number of data)
- nmax: int
Number of neighbours in grf
with inequalities (to speed up the simulations)
- krig_type: str
Can be either "ordinary_kigring" or "simple kriging" method
for kriging interpolation and grf with inequalities
- mean: float or 2D array
Mean value to use with grf methods
- unco: bool
Unconditional or not
- All other MPS and GRF parameters (see geone documentation)
Returns
-------
ndarray
array of same size as x, interpolated values
"""
if hasattr(litho, "sto_x") and hasattr(litho, "sto_y") and hasattr(litho, "sto_z") and hasattr(litho, "sto_ineq"):
# merge sto and real hd
l_x = litho.x + litho.sto_x
l_y = litho.y + litho.sto_y
l_z = litho.z + litho.sto_z
l_ineq = litho.ineq + litho.sto_ineq
else:
l_x = litho.x
l_y = litho.y
l_z = litho.z
l_ineq = litho.ineq
xp=np.array(l_x)
yp=np.array(l_y)
zp=np.array(l_z)
ineq_data=l_ineq #inequality data
method=litho.int_method
##grid
xg.sort()
yg.sort()
nx=len(xg)-1
ny=len(yg)-1
sx=xg[1] - xg[0]
sy=yg[1] - yg[0]
ox=xg[0]
oy=yg[0]
##kwargs
kwargs_def_grf={"nit": 50, "nmax": 20, "krig_type": "simple_kriging", # number of gibbs sampler iterations, number of neigbors, krig type
"grf_method": "fft", "mean": None,"unco": False} # and grf method, mean and unconditional flag
kwargs_def_MPS={"unco": False,
"xr": 1, "yr": 1, "zr": 1, "maxscan": 0.25, "neig": 24, "thresh": 0.05, "xloc": False, "yloc": False, "zloc": False,
"homo_usage": 1, "rot_usage": 1, "rotAziLoc": False, "rotAzi": 0, "rotDipLoc": False, "rotDip": 0, "rotPlungeLoc": False, "rotPlunge": 0,
"radiusMode": "large_default", "rx": nx*sx, "ry": ny*sy, "rz": 1, "anisotropyRatioMode": "one", "ax": 1, "ay": 1, "az": 1,
"angle1": 0, "angle2": 0, "angle3": 0,
"relativeDistanceFlag": False, "rescalingMode": 'min_max', "TargetMin": None, "TargetMax": None, "TargetMean": None, "TargetLength": None} #continous params
kw={}
#assign default values
if method in ["kriging", "grf", "grf_ineq"]:
kw=kwargs_def_grf
elif method.lower() == "mps":
kw=kwargs_def_MPS
for k, v in kw.items():
if k not in kwargs.keys():
kwargs[k]=v
#if no data --> unco set to True
if len(xp)+len(ineq_data) == 0:
kwargs["unco"]=True
else:
kwargs["unco"]=False
#mask
if "mask" in kwargs.keys():
mask2D = kwargs["mask"] & mask2D
##DATA
# handle inequalities (setup equality points to lower/upper bounds of inequalities if krig ineq or GRF ineq are not used for the interpolation)
if len(litho.ineq) == 0 and method == "grf_ineq":
method = "grf"
x_in=[]
y_in=[]
z_in=[]
if method in ["kriging", "cubic", "linear", "nearest", "grf"]:
if litho.get_surface_covmodel(vb=0) is not None and len(ineq_data) > 0:
# gibbs sampler to estimate values at inequality point, requires a covmodel
eq_d=np.array([xp, yp, zp]).T
dmy=np.nan*np.ones([2, len(litho.x)+len(litho.sto_x)]).T
eq_d=np.concatenate([eq_d, dmy], 1) # append all data together in right format
all_data=np.concatenate([eq_d, np.array(ineq_data)])
all_data=ArchPy.ineq.Gibbs_estimate(all_data, litho.get_surface_covmodel(), krig_type="simple_kriging", nit=50) # Gibbs sampler
xp=all_data[:, 0]
yp=all_data[:, 1]
zp=all_data[:, 2]
else:
for in_data in ineq_data: # handle inequality with non ineq methods
if (in_data[3] == in_data[3]) & (in_data[4] != in_data[4]): # inf ineq
x_in.append(in_data[0])
y_in.append(in_data[1])
z_in.append(in_data[3])
elif (in_data[3] != in_data[3]) & (in_data[4] == in_data[4]): # sup ineq
x_in.append(in_data[0])
y_in.append(in_data[1])
z_in.append(in_data[4])
elif (in_data[3] == in_data[3]) & (in_data[4] == in_data[4]): # sup and inf ineq
x_in.append(in_data[0])
y_in.append(in_data[1])
z_in.append((in_data[3]+in_data[4])/2)
# append data
xp=np.concatenate((xp, x_in))
yp=np.concatenate((yp, y_in))
zp=np.concatenate((zp, z_in))
data=np.concatenate([xp.reshape(-1, 1), yp.reshape(-1, 1)], axis=1)
## dealt with inequality data in the right format
elif method.lower() in ["grf_ineq", "mps"]:
# equality data
x_eq=np.array([xp, yp]).T
v_eq=zp
if len(litho.ineq) == 0:
xIneq_min = None
vIneq_min = None
xIneq_max = None
vIneq_max = None
elif len(litho.ineq) > 0:
#ineq
ineq_data=np.array(litho.ineq)
mask=(ineq_data[:, 3] == ineq_data[:, 3]) # inf boundary
xIneq_min=ineq_data[:,: 2][mask]
vIneq_min=ineq_data[:, 3][mask]
mask=(ineq_data[:, 4] == ineq_data[:, 4]) # sup boundary
xIneq_max=ineq_data[:,: 2][mask]
vIneq_max=ineq_data[:, 4][mask]
### interpolations methods ###
if method.lower() in ["linear", "cubic", "nearest"]: # spline methods
if kwargs["unco"] == False:
s=scipy.interpolate.griddata(np.array([xp, yp]).T, zp, xu, method=method, fill_value=np.mean(zp))
s=s.reshape(ny, nx)
else:
raise ValueError ("Error: No data point found or unconditional spline interpolation requested")
## MULTI-GAUSSIAN ### covmodel required
elif method.lower() in ["kriging", "grf", "grf_ineq"]:
covmodel=copy.deepcopy(litho.get_surface_covmodel())
if method.lower() == "kriging":
if kwargs["unco"] == False:
s, var=gcm.krige(data, zp, xu, covmodel, method=kwargs["krig_type"])
#s=gci.estimate2D(covmodel, [nx, ny], [sx, sy], [ox, oy], x=data, v=zp,
# method="ordinary_kriging", nneighborMax=10, searchRadiusRelative=1.0)["image"].val[0]
s=s.reshape(ny, nx)
else:
raise ValueError ("Error: No data point found or unconditional kriging requested")
elif method.lower() == "grf":
if kwargs["unco"] == False: #conditional
#transform data into normal distr
if litho.N_transfo:
if hasattr(litho, "distribution"):
di = litho.distribution
else:
di=store_distri(zp, t=kwargs["tau"])
norm_zp=NScore_trsf(zp, di)
# need to recompute variogram TO DO
if kwargs["grf_method"] == "fft":
np.random.seed(int(seed)) # set seed for fft
sim=geone.grf.grf2D(covmodel, [nx, ny], [sx, sy], [ox, oy], x=data, v=norm_zp, nreal=1, mean=0, var=1, printInfo=False)
s=NScore_Btrsf(sim[0].flatten(), di)# back transform
s=s.reshape(ny, nx)
elif kwargs["grf_method"] == "sgs":
sim=gci.simulate2D(covmodel, [nx, ny], [sx, sy], [ox, oy], x=data, v=norm_zp, nreal=1, mean=0, var=1, verbose=verbose, nthreads=ncpu, seed=seed, mask=mask2D)
s=NScore_Btrsf(sim["image"].val[0,0].flatten(), di)# back transform
s=s.reshape(ny, nx)
else: # no normal score
if "mean" not in kwargs.keys():
mean = np.mean(zp)
else:
mean = kwargs["mean"]
if kwargs["grf_method"] == "fft":
np.random.seed(int(seed)) # set seed for fft
sim=geone.grf.grf2D(covmodel, [nx, ny], [sx, sy], [ox, oy], x=data, v=zp, nreal=1, mean=mean, printInfo=False)
s=sim[0]
elif kwargs["grf_method"] == "sgs":
sim=gci.simulate2D(covmodel, [nx, ny], [sx, sy], [ox, oy], x=data, v=zp, nreal=1, mean=mean, verbose=verbose, nthreads=ncpu, seed=seed, mask=mask2D)
s=sim["image"].val[0,0]
else: # unconditional
if kwargs["grf_method"] == "fft":
np.random.seed(int(seed)) # set seed for fft
sim=geone.grf.grf2D(covmodel, [nx, ny], [sx, sy], [ox, oy], nreal=1, mean=kwargs["mean"], printInfo=False)
s=sim[0]
elif kwargs["grf_method"] == "sgs":
sim=gci.simulate2D(covmodel, [nx, ny], [sx, sy], [ox, oy], nreal=1, mean=kwargs["mean"], verbose=verbose, nthreads=ncpu, seed=seed, mask=mask2D)
s=sim["image"].val[0,0]
elif method.lower() == "grf_ineq":
# Normal transform
if litho.N_transfo:
if hasattr(litho, "distribution"):
di = litho.distribution
else:
di=store_distri(v_eq, t=kwargs["tau"])
v_eq=NScore_trsf(v_eq, di)
vIneq_min=NScore_trsf(vIneq_min, di)
vIneq_max=NScore_trsf(vIneq_max, di)
var=1
mean=0
# need to recompute variogram TO DO
else:
var=covmodel.sill()
# define mean
if "mean" not in kwargs:
if len(v_eq) == 0: # if only inequality data what to do ??
mean=np.mean(np.concatenate((vIneq_max, vIneq_min)))
else:
mean=(np.mean(zp))
else:
mean = kwargs["mean"]
#return x_eq, v_eq, xIneq_min, vIneq_min,xIneq_max,vIneq_max, mask2D, mean, (nx, ny), (sx, sy), (ox, oy)
sim=gci.simulate2D(covmodel, (nx, ny), (sx, sy), (ox, oy), method=kwargs["krig_type"], mean=mean,
x=x_eq, v=v_eq,
xIneqMin=xIneq_min, vIneqMin=vIneq_min,
xIneqMax=xIneq_max, vIneqMax=vIneq_max,
searchRadiusRelative=1, verbose=verbose,
nGibbsSamplerPathMin=kwargs["nit"],nGibbsSamplerPathMax=2*kwargs["nit"],
seed=seed, nneighborMax=kwargs["nmax"], nthreads=ncpu, mask=mask2D)["image"].val[0, 0]
if litho.N_transfo:
s=NScore_Btrsf(sim.flatten(), di)
s=s.reshape(ny, nx)
else:
s=sim
elif method.lower() == "mps":
assert isinstance(kwargs["TI"], geone.img.Img), "TI is not a geone image object"
#load parameters
TI=kwargs["TI"] #get TI
#extract hard data
eq_d=np.concatenate([x_eq, 0.5*np.ones([v_eq.shape[0], 1]), v_eq.reshape(-1, 1), np.nan*np.ones([v_eq.shape[0], 2])], axis=1)
if len(litho.ineq) == 0:
sup_d = None
inf_d = None
all_data = eq_d
varname=['x', 'y', 'z', 'code'] # list of variable names
hd = all_data[:, :4].T
pt = img.PointSet(npt=hd.shape[1], nv=4, val=hd, varname=varname)
else:
sup_d=np.concatenate([xIneq_max, 0.5*np.ones([vIneq_max.shape[0], 1]), np.nan*np.ones([vIneq_max.shape[0], 2]), vIneq_max.reshape(-1, 1)], axis=1)
inf_d=np.concatenate([xIneq_min, 0.5*np.ones([vIneq_min.shape[0], 1]), np.nan*np.ones([vIneq_min.shape[0], 1]), vIneq_min.reshape(-1, 1), np.nan*np.ones([vIneq_min.shape[0], 1])], axis=1)
all_data=np.concatenate([eq_d, sup_d, inf_d])
varname=['x', 'y', 'z', 'code', 'code_min', 'code_max'] # list of variable names
hd=all_data.T
pt=img.PointSet(npt=hd.shape[1], nv=6, val=hd, varname=varname)
#define mode (only rescaling min-max)
if kwargs["TargetMin"] is None:
kwargs["TargetMin"]=np.nanmin(all_data[:,3: ])
if kwargs["TargetMax"] is None:
kwargs["TargetMin"]=np.nanmax(all_data[:,3: ])
#DS research
snp=dsi.SearchNeighborhoodParameters(
radiusMode=kwargs["radiusMode"], rx=kwargs["rx"], ry=kwargs["ry"], rz=kwargs["rz"],
anisotropyRatioMode=kwargs["anisotropyRatioMode"], ax=kwargs["ax"], ay=kwargs["ay"], az=kwargs["az"],
angle1=kwargs["angle1"], angle2=kwargs["angle2"], angle3=kwargs["angle3"])
#DS input
deesse_input=dsi.DeesseInput(
nx=nx, ny=ny, nz=1, # dimension of the simulation grid (number of cells)
sx=sx, sy=sy, sz=1, # cells units in the simulation grid (here are the default values)
ox=ox, oy=oy, oz=0.5, # origin of the simulation grid (here are the default values)
nv=1, varname='code', # number of variable(s), name of the variable(s)
nTI=1, TI=TI, # number of TI(s), TI (class dsi.Img)
dataPointSet=pt, # hard data (optional)
searchNeighborhoodParameters=snp,
homothetyUsage=kwargs["homo_usage"],
homothetyXLocal=kwargs["xloc"],
homothetyXRatio=kwargs["xr"],
homothetyYLocal=kwargs["yloc"],
homothetyYRatio=kwargs["yr"],
homothetyZLocal=kwargs["zloc"],
homothetyZRatio=kwargs["zr"],
rotationUsage=kwargs["rot_usage"], # tolerance or not
rotationAzimuthLocal=kwargs["rotAziLoc"], # rotation according to azimuth: global
rotationAzimuth=kwargs["rotAzi"],
rotationDipLocal=kwargs["rotDipLoc"],
rotationDip=kwargs["rotDip"],
rotationPlungeLocal=kwargs["rotPlungeLoc"],
rotationPlunge=kwargs["rotPlunge"],
distanceType='continuous', # distance type: proportion of mismatching nodes (categorical var., default)
relativeDistanceFlag=kwargs["relativeDistanceFlag"],
rescalingMode=kwargs["rescalingMode"],
rescalingTargetMin=kwargs["TargetMin"], # min of the target interval
rescalingTargetMax=kwargs["TargetMax"], #max of the target interval
rescalingTargetMean=kwargs["TargetMean"],
rescalingTargetLength=kwargs["TargetLength"],
nneighboringNode=kwargs["neig"], # max. number of neighbors (for the patterns)
distanceThreshold=kwargs["thresh"], # acceptation threshold (for distance between patterns)
maxScanFraction=kwargs["maxscan"], # max. scanned fraction of the TI (for simulation of each cell)
seed=np.random.randint(1e6),
npostProcessingPathMax=1, # number of post-processing path(s)
nrealization=1) # number of realization(s))
# Run deesse
deesse_output=dsi.deesseRun(deesse_input, nthreads=ncpu, verbose=2)
sim=deesse_output["sim"][0]
s=sim.val[0,0]
else:
raise ValueError ("choose proper interpolation method")
return s
def split_logs(bh):
"""
Take a raw borehole with hierarchical units mixed
and sort them by group and hierarchy.
Parameters
----------
bh: :py:class:`borehole` object
Returns
-------
list
A list of new boreholes
"""
l_bhs=[]
l_logs=[] #list of logs
bhID=bh.ID
bhx, bhy, bhz, depth=bh.x, bh.y, bh.z, bh.depth
log_s=bh.log_strati
l_logs=[]
#logs stratis
h_max=max([i[0].get_h_level() for i in log_s if i[0] is not None])
for i in range(h_max):
l_logs.append([])
def bidule(s, h_lev):
"""
Recursive operation to identifiy log strati at lower order level from higher levels.
If in a borehole with a certain subsubunit (e.g. B11), this information must be also transfer to unit B1 and B
This function does that (I guess...)
"""
if h_lev > 1:
if s in [i[0] for i in l_logs[h_lev-1]]: # sub_unit already present
if s == l_logs[h_lev-1][-1][0]: # last unit added is the same --> adapt bot
l_logs[h_lev-1][-1][-1]=bot # change bot
elif s.mummy_unit.SubPile.nature == "3d_categorical":
l_logs[h_lev-1].append([s, top, bot]) # unit already present but
elif s not in [i[0] for i in l_logs[h_lev-1]]:
l_logs[h_lev-1].append([s, top, bot])
h_lev -= 1
bidule(s.mummy_unit, h_lev)
elif h_lev == 1:
if s not in [i[0] for i in l_logs[h_lev-1]]: # add unit only if it's not in log
l_logs[h_lev-1].append([s, top])
for i in range(len(log_s)):
s=log_s[i] #get unit and contact
if i == len(log_s)-1: #if last unit
bot = bhz - depth
else:
s_aft = log_s[i+1]
bot = s_aft[1]
unit=s[0] #get unit
if unit is not None:
h_lev=unit.get_h_level()
else:
h_lev=1
top =s[1]
if h_lev == 1:
l_logs[h_lev-1].append([unit, top])
elif h_lev > 1:
bidule(unit, h_lev)
#1st order log
bh=borehole(bhID, bhID, bhx, bhy, bhz, depth, log_strati=l_logs[0], log_facies=bh.log_facies) # first borehole
l_bhs.append(bh)
#2 and more order logs
for log in l_logs[1: ]: #loop over different logs hierarchy levels
i_0=0
if len(log) > 1: #more than 1 unit is present
for i in range(1, len(log)):
unit=log[i-1][0]
unit_after=log[i][0]
if unit.mummy_unit != unit_after.mummy_unit: # check that two successive units does not belong to the same hierarchic group
new_log=log[i_0: i]
depth=new_log[0][1] - new_log[-1][2]
log_strati=[s[: -1] for s in new_log]
bh=borehole(str(bhID)+"_"+unit.name, str(bhID)+"_"+unit.name, bhx, bhy, log_strati[0][1], depth, log_strati=log_strati)
l_bhs.append(bh)
i_0=i
if i == len(log)-1:
new_log=log[i_0: ]
depth=new_log[0][1] - new_log[-1][2]
log_strati=[s[: -1] for s in new_log]
bh=borehole(str(bhID)+"_"+unit_after.name, str(bhID)+"_"+unit_after.name, bhx, bhy, log_strati[0][1],
depth, log_strati=log_strati)
l_bhs.append(bh)
else:
unit=log[0][0]
depth=log[0][1] - log[-1][2]
log_strati=[s[: -1] for s in log]
bh=borehole(str(bhID)+"_"+unit.name, str(bhID)+"_"+unit.name, bhx, bhy, log_strati[0][1],
depth, log_strati=log_strati)
l_bhs.append(bh)
return l_bhs
def running_mean_2D(x, N):
"""
Smooth a 2d surface
Parameters
----------
x: 2D ndarray
Array to smooth
N: int
Window semi-size
Returns
-------
2D ndarray
"""
s=x.copy()
s=uniform_filter(s, size=N, mode="reflect")
return s
####### CLASSES ########
class Arch_table():
"""
Major class of ArchPy. Arch_table is the central object
that can be assimilated as a "project".
Practically every operations are done using an Arch_table
Parameters
----------
name: str
Name of the project
working_directory: str
Path to the working directory
seed: int
Seed for random number generation
write_results: bool
If True, results will be written to disk
fill_flag: bool
If True, the top unit will be filled with the most common facies
verbose: int
Verbosity level
ncpu: int
Number of cpus to use for parallel operations, if -1, all cpus will be used
"""
def __init__(self, name, working_directory="ArchPy_workspace", seed=np.random.randint(1e6), write_results=False, fill_flag=False, verbose=1, ncpu=-1):
assert name is not None, "A name must be provided"
# put assert seed
self.name=name
self.ws=working_directory #working directy where files will be created
self.list_all_units=[]
self.list_all_facies=[]
self.list_bhs=[] # list of boreholes for hd
self.list_fake_bhs=[] # list of "fake" boreholes
self.list_map_bhs=[]
self.sto_hd = [] # stochastic hard data
self.list_props=[]
self.seed=int(1e6*seed)
self.verbose=verbose # 0: print (quasi)-nothing, 1: print everything
self.ncpu=ncpu
self.xg=None
self.yg=None
self.zg=None
self.xgc =None
self.ygc=None
self.zgc=None
self.sx=None
self.sy=None
self.sz=None
self.ox=None
self.oy=None
self.oz=None
self.nx=None
self.ny=None
self.nz=None
self.Pile_master=None
self.geol_map=None
self.write_results=write_results
self.bhs_processed=0 # flag to know if boreholes have been processed
self.surfaces_computed=0
self.facies_computed=0
self.prop_computed=0
self.nreal_units=0
self.nreal_fa=0
self.nreal_prop=0
self.fill_flag = fill_flag
self.Geol=Geol()
#get functions
def get_pile_master(self):
"""
Returns the Pile_master object
"""
if self.Pile_master is None:
raise ValueError ("No Pile master defined for Arch Table {}".format(self.name))
return self.Pile_master
def get_xg(self):
"""Returns the edges of the grid in x direction"""
if self.xg is None:
assert 0, ('Error: Grid was not added')
return self.xg
def get_yg(self):
"""Returns the edges of the grid in y direction"""
if self.yg is None:
assert 0, ('Error: Grid was not added')
return self.yg
def get_zg(self):
"""Returns the edges of the grid in z direction"""
if self.zg is None:
assert 0, ('Error: Grid was not added')
return self.zg
def get_xgc(self):
"""Returns the centers of the grid in x direction"""
if self.xgc is None:
assert 0, ('Error: Grid was not added')
return self.xgc
def get_ygc(self):
"""Returns the centers of the grid in y direction"""
if self.ygc is None:
assert 0, ('Error: Grid was not added')
return self.ygc
def get_zgc(self):
"""Returns the centers of the grid in z direction"""
if self.zg is None:
assert 0, ('Error: Grid was not added')
return self.zgc
def get_nx(self):
"""Returns the number of cells in x direction"""
if self.nx is None:
assert 0, ('Error: Grid was not added')
return self.nx
def get_ny(self):
"""Returns the number of cells in y direction"""
if self.ny is None:
assert 0, ('Error: Grid was not added')
return self.ny
def get_nz(self):
"""Returns the number of cells in z direction"""
if self.nz is None:
assert 0, ('Error: Grid was not added')
return self.nz
def get_sx(self):
"""Returns the size of the cells in x direction"""
if self.sx is None:
assert 0, ('Error: Grid was not added')
return self.sx
def get_sy(self):
"""Returns the size of the cells in y direction"""
if self.sy is None:
assert 0, ('Error: Grid was not added')
return self.sy
def get_sz(self):
"""Returns the size of the cells in z direction"""
if self.sz is None:
assert 0, ('Error: Grid was not added')
return self.sz
def get_ox(self):
"""Returns the origin of the grid in x direction"""
if self.ox is None:
assert 0, ('Error: Grid was not added')
return self.ox
def get_oy(self):
"""Returns the origin of the grid in y direction"""
if self.oy is None:
assert 0, ('Error: Grid was not added')
return self.oy
def get_oz(self):
"""Returns the origin of the grid in z direction"""
if self.oz is None:
assert 0, ('Error: Grid was not added')
return self.oz
# def get_facies(self):
# # if self.Geol.facies_domains is None:
# # assert 0, ('Error: facies domains not computed')
# facies =self.Geol.facies_domains.copy()
# return facies
def get_facies(self, iu=0, ifa=0, all_data=True):
"""
Return a numpy array of 1 or all facies realization(s).
Parameters
----------
iu: int
unit index
ifa: int
facies index
all_data: bool
return all the units simulations
Returns
-------
ndarray
facies domains
"""
if self.write_results:
if "fd" not in [i.split(".")[-1] for i in os.listdir(self.ws)]:
raise ValueError("Facies have not been computed yet")
if all_data:
# get all real
if self.write_results:
nreal_fa=self.nreal_fa
nreal_u=self.nreal_units
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
fd=np.zeros([nreal_u, nreal_fa, nz, ny, nx], dtype=np.int8)
for iu in range(nreal_u):
for ifa in range(nreal_fa):
fname=self.name+"_{}_{}.fd".format(iu, ifa)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
fd[iu, ifa]=pickle.load(f)
else:
fd=self.Geol.facies_domains.copy()
else:
if self.write_results:
fname=self.name+"_{}_{}.fd".format(iu, ifa)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
fd=pickle.load(f)
else:
fd=self.Geol.facies_domains[iu, ifa].copy()
return fd
def get_surfaces_unit(self, unit, typ="top"):
"""
Return a 3D array of computed surfaces for a specific unit
Parameters
----------
unit: :class:`Unit` object
a Unit object contained inside the master pile
or another subunit
typ: string, (top, bot or original),
specify which type of surface to return, top,
bot or original (surfaces before applying erosion
and stratigrapic rules)
Returns
-------
3D ndarray of size (nreal_units, ny, nx)
All computed surfaces in a nd.array of
size (nreal_units, ny, nx)
"""
assert unit in self.get_all_units(), "Unit must be included in a pile related to the master pile"
assert self.Geol.surfaces_by_piles is not None, "Surfaces not computed"
assert typ in ("top", "bot", "original"), "Surface type {} doesnt exist".format(typ)
hl=unit.get_h_level()
if hl == 1:
P=self.get_pile_master()
elif hl > 1:
P=unit.mummy_unit.SubPile
if P.nature == "surfaces":
if typ =="top":
s=self.Geol.surfaces_by_piles[P.name][:, unit.order-1].copy()
elif typ =="bot":
s=self.Geol.surfaces_bot_by_piles[P.name][:, unit.order-1].copy()
elif typ == "original":
s=self.Geol.org_surfaces_by_piles[P.name][:, unit.order-1].copy()
return s
else:
return None
def get_surface(self, h_level="all"):
"""
Return a 4D array of multiple surfaces according to
the hierarchical level desired, by default ArchPy try
to return the highest hierarchical unit's surface
Parameters
----------
h_level: int or string,
maximum level of hierarchy
desired to return, "all" indicates to return
the highest hierarchical level possible for each unit
Returns
-------
4D ndarray of size (nlayer, nsim, ny, nx)
All computed surfaces in a nd.array of
size (nlayer, nsim, ny, nx)
list
a list (nlayer) of units name corresponding to the
surfaces to distinguish wich surface correspond to which unit
"""
if h_level == "all":
l=[]
def fun(pile):
for u in pile.list_units:
if u.f_method != "SubPile":
l.append(u)
else:
fun(u.SubPile)
elif isinstance(h_level, int) and h_level > 0:
l=[]
def fun(pile):
for u in pile.list_units:
if u.f_method == "SubPile":
if u.get_h_level() < h_level:
fun(u.SubPile)
elif u.get_h_level() == h_level:
l.append(u)
else:
pass
else:
l.append(u)
fun(self.get_pile_master())
nlay=len(l)
unit_names=[i.name for i in l]
for i in range(nlay):
u=l[i]
s=self.get_surfaces_unit(u)
if i == 0:
nreal, ny, nx=s.shape
surfs=np.zeros([nlay, nreal, ny, nx], dtype=np.float32)
surfs[0]=s
else:
surfs[i]=s
return surfs, unit_names
def get_unit(self, name="A", ID=1, type="name", all_strats=True, vb=1):
"""
Return the unit in the Pile with the associated name or ID.
Parameters
----------
name: string
name of the strati to retrieve
ID: int
ID of the strati to retrieve
type: str, (name or ID)
retrieving method
all_strats: bool
flag to indicate to also search in sub-units,
if false research will be restricted to units
directly in the Pile master
vb: int
verbosity level
Returns
-------
:class:`Unit` object
"""
assert isinstance(name, str), "Name must be a string"
if all_strats:
l=self.get_all_units()
else:
l=self.get_pile_master().list_units()
for s in l:
if type == "name":
if s.name == name:
return s
elif type == "ID":
if s.ID == ID:
return s
if type=="name":
var=name
elif type=="ID":
var=ID
if vb:
print ("No unit with that name/ID {}".format(var))
return None
def getbhindex(self, ID):
"""
Return the index corresponding to a certain borehole ID
Parameters
----------
ID: string
borehole ID
Returns
-------
int
index of the borehole in the pile
"""
interest=None
for i in range(len(self.list_bhs)):
if self.list_bhs[i].ID == ID:
interest=i
if interest is None:
assert 1, ('the propriety '+ID+' was not found')
else:
return interest
def getbh(self, ID):
"""
Return the borehole object given its ID
Parameters
----------
ID: string
borehole ID
Returns
-------
:class:`Borehole` object
"""
index=self.getbhindex(ID)
return self.list_bhs[index]
def get_facies_obj(self, name="A", ID=1, type="name", vb=1):
"""
Return the facies in the Pile with the associated name or ID.
Parameters
----------
name: string
name of the strati to retrieve
ID: int
ID of the strati to retrieve
type: str, (name or ID)
retrieving method
vb: int
verbosity level (0 or 1)
Returns
-------
:class:`Facies` object
"""
assert isinstance(name, str), "Name must be a string"
l=self.get_all_facies()
for s in l:
if type == "name":
if s.name == name:
return s
elif type == "ID":
if s.ID == ID:
return s
if type=="name":
var=name
elif type=="ID":
var=ID
if vb:
print ("No facies with that name/ID {}".format(var))
return None
def pointToIndex(self, x, y, z):
"""
Return the index of the cell containing the point (x,y,z)
Parameters
----------
x: float
x coordinate of the point
y: float
y coordinate of the point
z: float
z coordinate of the point
Returns
-------
3 int
index of the cell containing the point (x,y,z)
"""
cell_x=np.array((x-self.ox)/self.sx).astype(int)
cell_y=np.array((y-self.oy)/self.sy).astype(int)
cell_z=np.array((z-self.oz)/self.sz).astype(int)
return cell_x, cell_y, cell_z
def get_all_units(self, recompute=True):
"""
Return a list of all units, even sub-units
Parameters
----------
recompute: bool
if False, the list all units attribute
will be simply retrieve. Even if changes have
been made.
Returns
-------
list of :class:`Unit` objects
"""
if len(self.list_all_units) == 0:
recompute=True
if self.list_all_units is None or recompute: # recompute if wanted of if there is no list_all_stratis
def list_all_units(all_stratis, subpile_stratis):
all_stratis=all_stratis + subpile_stratis
for s in subpile_stratis:
if s.f_method == "SubPile":
all_stratis=list_all_units(all_stratis, s.SubPile.list_units)
return all_stratis
lau=list_all_units([], self.get_pile_master().list_units)
#check that stratis have different names
l=[]
for s in lau:
if s.name not in l:
l.append(s.name)
else:
raise ValueError("Some units have the same name (Unit {}".format(s.name))
self.list_all_units=lau
return self.list_all_units
def get_all_facies(self, recompute=True):
"""
Return a list of all facies
Parameters
----------
recompute: bool
if False, the list all facies attribute
will be simply retrieve. Even if changes have
been made on the project.
Returns
-------
list of :class:`Facies` objects
"""
if len(self.list_all_facies) == 0:
recompute=True
if recompute:
l=[]
def l_fa(pile):
for s in pile.list_units:
for fa in s.list_facies:
if fa not in l:
l.append(fa)
if s.f_method == "SubPile":
l_fa(s.SubPile)
l_fa(self.get_pile_master())
self.list_all_facies=l
return l
else:
if self.list_all_facies is not None:
return self.list_all_facies
else:
self.get_all_facies(recompute=True)
return self.list_all_facies
def get_piles(self):
"""
Return a list of all the subpiles
Returns
-------
list of :class:`Pile` objects
"""
l=[]
def func(pile):
l.append(pile)
for u in pile.list_units:
if u.f_method == "SubPile":
func(u.SubPile)
func(self.Pile_master)
return l
def getpropindex(self, name):
""" Return the index corresponding to a certain prop name
Parameters
----------
name: str
name of the property to retrieve
Returns
-------
int
index of the property
"""
interest=None
for i in range(len(self.list_props)):
if self.list_props[i].name == name:
interest=i
if interest is None:
raise ValueError('the propriety '+name+' was not found')
return interest
def getprop(self, name, iu=None, ifa=None, ip=None, all_data=True):
"""
Return a numpy array of 1 or all facies realization(s).
Parameters
----------
iu:int
unit index
ifa:int
facies index
ip:int
property index
all_data:bool
return all the units simulations
Returns
-------
numpy array
1 or all facies realization(s)
"""
if self.write_results:
if "pro" not in [i.split(".")[-1] for i in os.listdir(self.ws)]:
raise ValueError("Properties have not been computed yet")
l=[i.name for i in self.list_props]
if name not in l:
raise ValueError('The propriety "'+name+'"" was not found \n available Property names are: {}'.format(l))
nreal_prop=self.nreal_prop
nreal_fa=self.nreal_fa
nreal_u=self.nreal_units
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
if all_data:
prop=np.zeros([nreal_u, nreal_fa, nreal_prop, nz, ny, nx], dtype=np.float32)
if self.write_results:
# get all real
for iu in range(nreal_u):
for ifa in range(nreal_fa):
for ip in range(nreal_prop):
fname=self.name+"{}_{}_{}_{}.pro".format(name, iu, ifa, ip)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
prop[iu, ifa, ip]=pickle.load(f)
else:
prop=self.Geol.prop_values[name]
else:
if self.write_results:
fname=self.name+"{}_{}_{}_{}.pro".format(name, iu, ifa, ip)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
prop=pickle.load(f)
else:
prop=self.Geol.prop_values[name][iu, ifa, ip]
return prop
def get_bounds(self):
"""Return bounds of the simulation domain
Returns
-------
tuple
(xmin, xmax, ymin, ymax, zmin, zmax)
"""
bounds=[self.get_ox(), self.get_xg()[-1], self.get_oy(), self.get_yg()[-1], self.get_oz(), self.get_zg()[-1]]
return bounds
def check_units_ID(self):
"""
check the IDs of units in order to be sure that they are different
Returns
-------
int
1 if all is ok
"""
l=[]
for i in self.get_all_units(recompute=True):
if i.ID not in l:
l.append(i.ID)
else:
print("Sorry, unit {} has the same ID {} than another unit. ID must be differents for each units".format(i.name, i.ID))
return None
return 1
def check_piles_name(self):
"""
check the names of piles in order to be sure that they are different
Returns
-------
int
1 if all is ok
"""
l_names=[]
l=self.get_piles()
for p in l:
if p.name not in l_names:
l_names.append(p.name)
else:
print("Pile name '{}' is attributed to more than one pile, verboten !".format(p.name))
return 0
return 1
def set_Pile_master(self, Pile_master):
"""
Define a pile object as the main pile of the project
Parameters
----------
Pile_master: :class:`Pile` object
pile object to set as the main pile of the project
"""
assert isinstance(Pile_master, Pile), "Pile master is not an ArchPy Pile object"
self.Pile_master=Pile_master
if self.check_piles_name():
if self.verbose:
print("Pile sets as Pile master")
else:
self.Pile_master=None
if self.verbose:
print("Pile not sets")
def indextocell(self, x, y, z): #duplicate with pointoindex --> remove one
'''cell number to cell indexing'''
cell_x=(x/self.get_sx() - min(self.get_xgc())).astype(int)
cell_y=(y/self.get_sy() - min(self.get_ygc())).astype(int)
cell_z=(z/self.get_sz() - min(self.get_zgc())).astype(int)
return cell_x, cell_y, cell_z
def celltoindex(self,cell_x,cell_y,cell_z):
'''cell index to cell position
Parameters
----------
cell_x : int
cell index in x direction
cell_y : int
cell index in y direction
cell_z : int
cell index in z direction
Returns
-------
tuple
(x,y,z) cell position
'''
x=self.get_sx()* cell_x + min(self.get_xgc())
y=self.get_sy()* cell_y + min(self.get_ygc())
z=self.get_sz()* cell_z + min(self.get_zgc())
return x,y,z
def reprocess(self):
"""Reprocess the boreholes and erase the previous hard data"""
self.bhs_processed=0
self.erase_hd()
self.process_bhs()
self.seed= int(self.seed + 1e6)
def resample2grid(self, raster_path, band=None, rspl_method="nearest"):
"""
Resample a raster to the size of the simulation grid.
Parameters
----------
raster_path : str
path to the raster file
band : int, optional
raster band to use, if None 0 is used. The default is None.
rspl_method : str, optional
resampling method to use. availables are : nearest, linear, cubic.
The default is "nearest".
Returns
-------
2D array of size (self.ny, self.nx)
"""
#import rasterio
import rasterio
#open raster and extract cell centers
DEM=rasterio.open(raster_path)
x0, y0, x1, y1=DEM.bounds
rxlen=DEM.read().shape[2]
rylen=DEM.read().shape[1]
x=np.linspace(x0, x1, rxlen)
y=np.linspace(y1, y0, rylen)
rxc, ryc=np.meshgrid(x, y)
#take grid cell centers
xc=self.xcellcenters
yc=self.ycellcenters
if band is None:
ib = 0
else:
ib = band
return resample_to_grid(xc, yc, rxc, ryc, DEM.read()[ib], method=rspl_method) # resampling
def add_grid(self, dimensions, spacing, origin, top=None, bot=None, rspl_method="nearest", polygon=None, mask=None):
"""
Method to add/change simulation grid, regular grid.
Parameters
----------
dimensions: sequence of size 3,
number of cells in x, y and direction (nx, ny, nz)
spacing: sequence of size 3,
spacing of the cells in x, y and direction (sx, sy, sz)
origin: sequence of size 3,
origin of the simulation grid of the cells
in x, y and direction (ox, oy, oz)
top, bot: 2D ndarray of dimensions (ny, nx) or float or raster file,
top and bottom of the simulation domain
rspl_method: string
scipy resampling method (nearest, linear
and cubic --> nearest is generally sufficient)
polygon: 2D ndarray of dimensions (ny, nx)
boolean array to indicate where the simulation is active (1)
or inactive (0). Polygon can also be a Shapely (Multi) - polygon.
mask: 3D ndarray of dimensions (nz, ny, nx)
3D boolean array to indicate where the simulation
is active (1) or inactive (0).
If given, top, bot and polygon are ignored.
"""
if self.verbose:
print("## Adding Grid ##")
## cell centers ##
sx=spacing[0]
sy=spacing[1]
sz=spacing[2]
nx=dimensions[0]
ny=dimensions[1]
nz=dimensions[2]
ox=origin[0]
oy=origin[1]
oz=origin[2]
xg=np.arange(ox, ox+nx*sx+sx, sx, dtype=np.float32)
yg=np.arange(oy, oy+ny*sy+sy, sy, dtype=np.float32)
zg=np.arange(oz, oz+nz*sz+sz, sz, dtype=np.float32)
xgc=xg[: -1]+sx/2
ygc=yg[: -1]+sy/2
zgc=zg[: -1]+sz/2
self.sx=sx
self.sy=sy
self.sz=sz
self.nx=nx
self.ny=ny
self.nz=nz
self.ox=ox
self.oy=oy
self.oz=oz
self.xg=xg
self.yg=yg
self.zg=zg
self.xgc=xgc # xg_cell_centers
self.ygc=ygc # yg_cell_centers
self.zgc=zgc # zg_cell_centers
self.xcellcenters, self.ycellcenters = np.meshgrid(xgc, ygc) # cell centers coordinates
z_tree=KDTree(zg.reshape(-1, 1))
self.z_tree=z_tree
self.zc_tree=KDTree(zgc.reshape(-1, 1))
self.xc_tree=KDTree(xgc.reshape(-1, 1))
self.yc_tree=KDTree(ygc.reshape(-1, 1))
## resample top and bot if needed
if isinstance(top, str) or isinstance(bot, str):
#import rasterio
import rasterio
#open raster and extract cell centers
DEM=rasterio.open(top)
x0, y0, x1, y1=DEM.bounds
rxlen=DEM.read().shape[2]
rylen=DEM.read().shape[1]
x=np.linspace(x0, x1, rxlen)
y=np.linspace(y1, y0, rylen)
rxc, ryc=np.meshgrid(x, y)
#take grid cell centers
xc=self.xcellcenters
yc=self.ycellcenters
if isinstance(top, str):
if self.verbose:
print("Top is a raster - resampling activated")
top=resample_to_grid(xc, yc, rxc, ryc, DEM.read()[0], method=rspl_method) # resampling
if isinstance(bot, str):
if self.verbose:
print("Bot is a raster - resampling activated")
rast=rasterio.open(bot)
x0, y0, x1, y1=rast.bounds
rxlen=rast.read().shape[2]
rylen=rast.read().shape[1]
x=np.linspace(x0, x1, rxlen)
y=np.linspace(y1, y0, rylen)
rxc, ryc=np.meshgrid(x, y)
bot=resample_to_grid(xc, yc, rxc, ryc, rast.read()[0], method=rspl_method) # resampling
if top is not None:
assert top.shape == (ny, nx), "Top shape is not adequat respectively to coordinate vectors. \n Must be have a size of -1 respectively to coordinate vectors xg and yg (which are the vectors of edge cells)"
if bot is not None:
assert bot.shape == (ny, nx), "Bot shape is not adequat respectively to coordinate vectors. \n Must be have a size of -1 respectively to coordinate vectors xg and yg (which are the vectors of edge cells)"
#define top/bot
if (top is None) and (mask is None):
top=np.ones([ny, nx], dtype=np.float32)*np.max(zg)
elif (top is None) and (mask is not None): # if mask is provided but not top
top =np.zeros([ny, nx], dtype=np.float32)*np.nan
for ix in range(nx):
for iy in range(ny):
for iz in range(nz-1, -1, -1):
if mask[iz, iy, ix]:
top[iy, ix]=zgc[iz]
break
# cut top
top[top>zg[-1]]=zg[-1]
top[top<zg[0]]=zg[0]
self.top=top.astype(np.float32)
if (bot is None) and (mask is None):
bot=np.ones([ny, nx], dtype=np.float32)*np.min(zg)
elif (bot is None) and (mask is not None): # if mask is provided but not top
bot =np.zeros([ny, nx], dtype=np.float32)*np.nan
for ix in range(nx):
for iy in range(ny):
for iz in range(nz):
if mask[iz, iy, ix]:
bot[iy, ix]=zgc[iz]
break
# cut bot
bot[bot>zg[-1]]=zg[-1]
bot[bot<zg[0]]=zg[0]
self.bot=bot.astype(np.float32)
# create mask from top and bot if none
if mask is None:
mask=np.zeros([nz, ny, nx], dtype=bool)
iu=z_tree.query(top.reshape(-1, 1), return_distance=False).reshape(ny, nx)
il=z_tree.query(bot.reshape(-1, 1), return_distance=False).reshape(ny, nx)
for ix in range(len(xgc)):
for iy in range(len(ygc)):
mask[il[iy, ix]: iu[iy, ix], iy, ix]=1
# list of coordinates 2D and 3D
X, Y=np.meshgrid(xgc, ygc)
self.xu2D=np.array([X.flatten(), Y.flatten()], dtype=np.float32).T
#X, Y, Z=np.meshgrid(xgc, ygc, zgc)
#self.xu3D=np.array([X.flatten(), Y.flatten(), Z.flatten()]).T
#apply polygon
# if polygon is a shapefile
if isinstance(polygon, str): # if polygon is a shapefile
import shapely
from shapely.geometry import Polygon, MultiPolygon
if polygon.split(".")[-1] == "shp":
import geopandas as gp
poly = gp.read_file(polygon)
if poly.shape[0] == 1:
polygon = Polygon(poly.geometry.iloc[0])
elif poly.shape[0] > 1:
polygon = MultiPolygon(poly.geometry.values)
import shapely
#if polygon is shapely Polygon
if isinstance(polygon, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
if self.verbose:
print("Polygon is a shapely instance - discretization activated")
polygon_array=np.zeros([ny*nx], dtype=bool) #2D array simulation domain
cell_l=[]
for i,cell in enumerate(self.xu2D):
xy=((cell[0]-sx/2, cell[1]-sy/2),(cell[0]-sx/2, cell[1]+sy/2),
(cell[0]+sx/2, cell[1]+sy/2),(cell[0]+sx/2, cell[1]-sy/2))
p=shapely.geometry.Polygon(xy)
p.name=i
cell_l.append(p)
l=[]#list of intersected cells
for cell in cell_l:
if cell.intersects(polygon):
l.append(cell.name)
polygon_array[np.array(l)]=1
polygon=polygon_array.reshape(ny, nx)
if polygon is not None:
polygon=polygon.astype(bool) #ensure polygon is a boolean array
for ix in range(nx):
for iy in range(ny):
if ~polygon[iy, ix]:
mask[:, iy, ix]=0
self.mask=mask
self.mask2d = mask.any(0)
if self.verbose:
print("## Grid added and is now simulation grid ##")
def hierarchy_relations(self, vb=1):
"""Method that sets the hierarchical relations between units
and sub-units
"""
def h_relations(pile):
"""
# calculate mummy unit for each sub unit (to which unit a sub-unit is related)
"""
for s in pile.list_units: # loop over units of the master pile
if s.f_method == "SubPile":
for ssub in s.SubPile.list_units: # loop over units of the sub pile if filling method is SubPile
ssub.mummy_unit=s
h_relations(s.SubPile)
h_relations(self.get_pile_master())
#set bb units
for unit in self.get_all_units():
unit.get_baby_units(recompute=True, vb=0)
if vb:
print("hierarchical relations set")
def check_bh_inside(self, bh):
"""
Check if a borehole is inside the simulation domain
and cut boreholes if needed
Parameters
----------
bh: :class:`Borehole`
borehole object
Returns
-------
0 if borehole is outside the simulation domain
1 if borehole is inside the simulation domain
"""
if bh.depth <= 0:
if self.verbose:
print("borehole depth is not positive")
return 0
#fun to check botom
def cut_botom(bh):
z0_bh=bh.z
zbot_bh=z0_bh - bh.depth
bot_z = self.bot[self.coord2cell(bh.x, bh.y)]
# if (zbot_bh < zg[0]): # modification to cut below bot and not below simulation grid
if (zbot_bh < bot_z):
bh.depth=(z0_bh - bot_z) - sz/2
if self.verbose:
print("Borehole {} goes below model limits, borehole {} depth cut".format(bh.ID, bh.ID))
if bh.log_strati is not None: # update log strati
new_log=[]
for s in bh.log_strati:
if s[1] > bot_z:
new_log.append(s)
bh.log_strati=new_log
if bh.log_facies is not None: # update log facies
new_log=[]
for s in bh.log_facies:
if s[1] > bot_z:
new_log.append(s)
bh.log_facies=new_log
zg=self.get_zg() # z vector
sz=self.get_sz()
#check botom
if bh.log_strati is not None:
z0_bh=bh.log_strati[0][1] # borehole altitude top
elif bh.log_facies is not None:
z0_bh=bh.log_facies[0][1]
else:
if self.verbose:
print("no log found in bh {}".format(bh.ID))
return 0
#check inside mask and adapt z borehole to DEM
for iz in np.arange(z0_bh, z0_bh-bh.depth, max(-sz, (-bh.depth) / 2)):
if self.coord2cell(bh.x, bh.y, iz) is not None: # check if inside mask
if self.verbose:
#print("Borehole {} inside of the simulation zone".format(bh.ID))
pass
if iz == bh.z:
cut_botom(bh)
if bh.depth < 0:
return 0
else:
return 1
elif iz < bh.z: # if the borehole is above DEM and must be cut
bh.z=iz #update borehole altitude
# update log strati
if bh.log_strati:
if len(bh.log_strati) > 1:
new_log=[]
for i in range(len(bh.log_strati)-1):
s=bh.log_strati[i]
s2=bh.log_strati[i+1]
if s[1] > iz and s2[1] < iz: # unit cut by the dem
new_log.append((s[0], iz))
elif s[1] <= iz:
new_log.append(s)
else:
pass
#last unit in log
if s2[1] <= iz:
new_log.append(s2)
elif s2[1] > iz:
new_log.append((s2[0], iz))
bh.log_strati=new_log
else:
pass
# update log facies
if bh.log_facies:
if len(bh.log_facies)>1:
new_log=[]
for i in range(len(bh.log_facies)-1):
s=bh.log_facies[i]
s2=bh.log_facies[i+1]
if s[1] > iz and s2[1] < iz: # unit cut by the dem
new_log.append((s[0], iz))
elif s[1] <= iz:
new_log.append(s)
else:
pass
if s2[1] <= iz:
new_log.append(s2)
elif s2[1] > iz:
new_log.append((s2[0], iz))
bh.log_facies=new_log
else:
pass
cut_botom(bh)
if bh.depth < 0:
return 0
else:
return 1
if self.verbose:
print("Borehole {} outside of the simulation zone - not added -".format(bh.ID))
return 0
def coord2cell(self, x, y, z=None):
"""
Method that returns the cell in which are the given coordinates
Parameters
----------
x: float
x coordinate
y: float
y coordinate
z: float, optional
z coordinate, if z is None, only x and y indexes are returned
Returns
-------
cell: tuple
cell indexes (ix, iy, iz) or (ix, iy) if z is None
"""
assert y == y, "coordinates contain NaN"
assert x == x, "coordinates contain NaN"
if z is not None:
assert z == z, "coordinates contain NaN"
xg=self.get_xg()
yg=self.get_yg()
zg=self.get_zg()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
nz=self.get_nz()
# check point inside simulation block
if (x <= xg[0]) or (x >= xg[-1]):
if self.verbose:
print("point outside of the grid in x")
return None
if (y <= yg[0]) or (y >= yg[-1]):
if self.verbose:
print("point outside of the grid in y")
return None
if z is not None:
if (z <= zg[0]) or (z > zg[-1]):
if self.verbose:
print("point outside of the grid in z")
return None
ix=((x-xg[0])//sx).astype(int)
iy=((y-yg[0])//sy).astype(int)
if z is not None:
iz=((z-zg[0])//sz).astype(int)
if iz > nz-1:
iz=nz-1
cell=(iz, iy, ix)
if self.mask[iz, iy, ix]:
return cell
else:
#print("Point outside of the simulation domain")
return None
else:
cell = (iy, ix)
return cell
def add_prop(self, prop):
"""
Add a property to the Arch_Table
Parameters
----------
prop: :class:`Prop` object
property to add to the Arch_Table
Returns
-------
None
"""
try:
for i in prop:
if (isinstance(i, Prop)) and (i not in self.list_props):
self.list_props.append(i)
if self.verbose:
print("Property {} added".format(i.name))
else:
if self.verbose:
print("object isn't a Property object or it is already in the list")
except: # strati not in a list
if (isinstance(prop, Prop)) and (prop not in self.list_props):
self.list_props.append(prop)
if self.verbose:
print("Property {} added".format(prop.name))
else:
if self.verbose:
print("object isn't a Property object")
def add_bh(self, bhs):
"""
Method to add borehole, list of boreholes if multiples
Parameters
----------
bhs: :class:`borehole` object or list of :class:`borehole` objects
borehole(s) to add to the Arch_Table
"""
if hasattr(bhs, "__iter__"):
for i in bhs:
if (isinstance(i, borehole)) and (i not in self.list_bhs):
if self.check_bh_inside(i):
self.list_bhs.append(i)
self.bhs_processed = 0 # reset flag of boreholes already processed
if self.verbose:
print("Borehole {} added".format(i.ID))
else:
if self.verbose:
print("object isn't a borehole object or object is already in the list")
else: # boreholes not in a list
if (isinstance(bhs, borehole)) and (bhs not in self.list_bhs):
if self.check_bh_inside(bhs):
self.list_bhs.append(bhs)
self.bhs_processed = 0 # reset flag of boreholes already processed
if self.verbose:
print("Borehole {} added".format(bhs.ID))
else:
if self.verbose:
print("object isn't a borehole object or object is already in the list")
def make_fake_bh(self, positions_x, positions_y, units=None, facies=None, stratIndex=0, faciesIndex=0, extractUnits=True, extractFacies=True, vb=1):
"""
Create fake boreholes from realization of the Arch_table.
Parameters
----------
positions_x: sequence of numbers
indicate the x positions of the borehole to create bhs
positions_y: sequence of numbers
indicate the y positions of the borehole to create bhs
units: 3D array, optional
unit array to use to create bhs
facies: 3D array, optional
facies array to use to create bhs
stratIndex: int or sequence of int, optional
unit index to sample
faciesIndex: int or sequence of int, optional
facies index to sample
extractUnits: bool, optional
flag to indicate to sample units or not
extractFacies: bool, optional
flag to indicate to sample facies or not
vb: int, optional
verbose level
Returns
-------
list of :class:`borehole` objects
"""
# get 3D arrays to sample
if units is None and extractUnits:
units = self.get_units_domains_realizations()
# units_data[np.isnan(units_data)]=-99 # default ID to indicate no data
units_data=units.copy()
if facies is None and extractFacies:
facies = self.get_facies()
facies_data=facies.copy()
# facies_data[np.isnan(facies_data)]=-99 # same for facies
#change into array
if type(positions_x) is not np.ndarray:
positions_x = np.array([positions_x])
if type(positions_y) is not np.ndarray:
positions_y = np.array([positions_y])
if type(stratIndex) is int:
stratIndex = np.array([stratIndex])
if type(faciesIndex) is int:
faciesIndex = np.array([faciesIndex])
l_pos = []
for x,y in zip(positions_x,positions_y):
cell_x, cell_y, z = self.pointToIndex(x,y,1)
l_pos.append((cell_x, cell_y, z))
u_list = []
for iu in stratIndex:
fa_list = []
for ifa in faciesIndex:
fake_bh = []
for i in range(len(positions_x)):
cell_x, cell_y, z = l_pos[i]
x = positions_x[i]
y = positions_y[i]
if extractUnits:
#unit log
unit_log = []
unit_idx = units_data[iu, :, cell_y, cell_x]
if unit_idx[-1] != 0:
unit_log.append((self.get_unit(ID = unit_idx[-1], type='ID', vb=0), np.round(self.zg[-1], 2)))
for index_trans in reversed(np.where(np.diff(unit_idx) != 0)[0]):
unit_log.append((self.get_unit(ID =unit_idx[index_trans], type='ID', vb=0), np.round(self.zg[index_trans+1], 2)))
else:
unit_log=None
#facies log
if extractFacies:
facies_log = []
facies_idx = facies_data[iu, ifa, :, cell_y, cell_x]
#If model has no free space above, we need to add the transition Surface - first layer
if facies_idx[-1] != 0:
facies_log.append((self.get_facies_obj(ID = facies_idx[-1], type='ID', vb=0), np.round(self.zg[-1], 2)))
for index_trans in reversed(np.where(np.diff(facies_idx) != 0)[0]):
fa_obj=self.get_facies_obj(ID =facies_idx[index_trans], type='ID', vb=0)
iz=np.round(self.zg[index_trans+1], 2)
facies_log.append((fa_obj, iz))
else:
facies_log=None
# merge None if same unit above and below
if unit_log is not None:
c=0
for i in range(len(unit_log)):
if i == len(unit_log)-1 and unit_log[i][0] is None:
unit_log = unit_log[:-1]
break
i-=c
if unit_log[i][0] is None and i > 0:
if unit_log[i-1][0] == unit_log[i+1][0]:
unit_log=unit_log[:i] + unit_log[(i+2):]
c+=2
# if no info set logs to None
if facies_log is not None:
if len(facies_log) == 0:
facies_log=None
if unit_log is not None:
if unit_log:
unit_log[0] = (unit_log[0][0], self.top[cell_y,cell_x]-self.get_sz()/10)
else:
unit_log = None
if facies_log is not None:
if facies_log:
facies_log[0] = (facies_log[0][0], self.top[cell_y,cell_x]-self.get_sz()/10)
else:
facies_log = None
if unit_log is not None or facies_log is not None:
fake_bh.append(borehole("fake","fake",x=x,y=y,z=self.top[cell_y,cell_x]-self.get_sz()/10,
depth=self.top[cell_y,cell_x]-self.bot[cell_y,cell_x],log_strati=unit_log,log_facies=facies_log))
else:
if vb:
print("Borehole at positon ({}, {}) is outside of a simulation zone".format(x, y))
fa_list.append(fake_bh)
u_list.append(fa_list)
return u_list
def add_fake_bh(self, bhs):
"""
Method to add a fake borehole, list if multiples
Use for inversion purposes
Parameters
----------
bhs : list of :class:`borehole` or :class:`borehole`
boreholes to add
"""
try:
for i in bhs:
if (isinstance(i, borehole)) and (i not in self.list_fake_bhs):
if self.check_bh_inside(i):
self.list_fake_bhs.append(i)
if self.verbose:
print("Borehole {} added".format(i.ID))
else:
if self.verbose:
print("object isn't a borehole object or object is already in the list")
except: # boreholes not in a list
if (isinstance(bhs, borehole)) and (bhs not in self.list_fake_bhs):
if self.check_bh_inside(bhs):
self.list_fake_bhs.append(bhs)
if self.verbose:
print("Borehole {} added".format(bhs.ID))
else:
if self.verbose:
print("object isn't a borehole object or object is already in the list")
## geological map functions
def compute_geol_map(self, iu=0, color=False):
"""
Compute and return the geological map for given unit realization
Parameters
----------
iu: int
unit realization index
color: bool
if True return a 3D array with RGBA values
Returns
-------
2D ndarray
geological map
"""
ny = self.get_ny()
nx = self.get_nx()
nz = self.get_nz()
arr = self.get_units_domains_realizations(iu)
geol_map = np.zeros([ny, nx], dtype=np.int8)
for iz in np.arange(nz-1, 0, -1):
iy,ix=np.where(arr[iz] != 0)
slic = geol_map[iy, ix]
slic[slic == 0] = arr[iz, iy, ix][slic == 0]
geol_map[iy, ix] = slic
if color:
arr_plot = np.ones([ny, nx, 4])
for iv in np.unique(geol_map):
if iv != 0:
arr_plot[geol_map == iv, :] = matplotlib.colors.to_rgba(self.get_unit(ID = iv, type="ID").c)
else:
arr_plot[geol_map == iv, :] = (1, 1, 1, 1)
return arr_plot
return geol_map
def add_geological_map(self, raster):
"""
Add a geological map to Arch_table
Parameters
----------
raster : 2D ndarray of size (ny, nx)
geological map to add. Values are units IDs
"""
if isinstance(raster, str):
geol_map = self.resample2grid(raster)
self.geol_map = geol_map
if self.verbose:
print("Geological map added")
return
elif isinstance(raster, np.ndarray):
self.geol_map = raster
if self.verbose:
print("Geological map added")
def geol_contours(self, step = 5):
"""
This function extract information at the boundaries
between units from the given geological map.(see :meth:`add_geological_map`)
Results are returned as a list of :class:`borehole` objects
Parameters
----------
step : int
step between each cells where to add a contour information
Returns
-------
list of :class:`borehole`
"""
# some functions
def unit_contact(u1_id, u2_id, geol_map):
from skimage import measure
arr = geol_map.astype(np.float32).copy()
arr[geol_map == u1_id] = 1
arr[geol_map == u2_id] = 2
arr[(geol_map != u1_id) & (geol_map != u2_id)] = np.nan
contours = measure.find_contours(arr, 1)
return contours
def combi_p2(lst):
res = []
for i in range(len(lst) - 1):
o2 = i+1
for o in range(i, len(lst)-1):
res.append((lst[i], lst[o2]))
o2 += 1
return np.array(res)
# retrieve some things
geol_map = self.geol_map
top = self.top
sz = self.get_sz()
xgc = self.get_xgc()
ygc = self.get_ygc()
ids_in_geol_map = np.unique(geol_map)
combis = combi_p2(ids_in_geol_map) # possible combinations of unit ids
lst = []
for u1_id, u2_id in combis:
u1 = self.get_unit(ID=u1_id, type="ID", vb=0)
u2 = self.get_unit(ID=u2_id, type="ID", vb=0)
if u1 is not None and u2 is not None:
conts_u1_u2 = unit_contact(u1_id, u2_id, geol_map) # extract contacts between u1 and u2
if u1.mummy_unit != u2.mummy_unit: # units are not from the same (sub)-pile
if u1.get_big_mummy_unit() < u2.get_big_mummy_unit():
u1_above = True
else:
u1_above = False
else: # units are from the same pile
if u1 < u2:
u1_above=True
else:
u1_above=False
for cont in conts_u1_u2:
for iy, ix in cont[::step]:
iy = int(iy)
ix = int(ix)
z = top[iy ,ix]-1e-3
if u1_above:
bh = borehole("contact_bh", "contact_bh", xgc[ix], ygc[iy], z, sz/2, [(u1, z),(u2, z-sz/2)])
else :
bh = borehole("contact_bh", "contact_bh", xgc[ix], ygc[iy], z, sz/2, [(u2, z),(u1, z-sz/2)])
lst.append(bh)
return lst
def process_geological_map(self, typ="all", step = 5):
"""
Process the geological map attributed to ArchTable model.
This function creates fake boreholes from a
given geological map (raster) and them to list_map_bhs
Parameters
----------
typ : str
flag to indicate what information to take:
- "uniform" for only superficial information (no contact or boundaries)
- "boundaries" for only the contact between the units
- "all" for both
step : int
step for sampling the geological map, small values implies
that much more data are sampled from the raster but this increases
the computational burden. Default is 5 (every 5th cell is sampled)
"""
xg = self.get_xg()
yg = self.get_yg()
xgc = self.get_xgc()
ygc = self.get_ygc()
sz = self.get_sz()
raster = self.geol_map
assert raster.shape == (self.get_ny(), self.get_nx()), "invalid shape for geological map, should be ({}, {})".format(self.get_ny(), self.get_nx())
self.list_map_bhs=[]
sample_raster = False
sample_boundaries = False
if typ=="all":
sample_raster = True
sample_boundaries = True
elif typ == "uniform":
sample_raster = True
elif typ == "boundaries":
sample_boundaries = True
if sample_raster:
mask2d = self.mask.any(0)
bhs_map = []
for ix in np.arange(0, len(xg)-1, step):
for iy in np.arange(0, len(yg)-1, step):
if mask2d[iy, ix]:
unit_id = raster[iy, ix]
unit = self.get_unit(ID=unit_id, type="ID", vb=0)
if unit is not None:
z = self.top[iy, ix]-1e-3
bh = borehole("raster_bh", "raster_bh", xgc[ix], ygc[iy], z, sz/4, [(unit, z)])
bhs_map.append(bh)
self.list_map_bhs += bhs_map
if sample_boundaries:
bhs = self.geol_contours(step=step)
self.list_map_bhs += bhs
if self.verbose:
print("Geological map extracted - processus ended successfully")
# remove boreholes
def rem_all_bhs(self, fake_only=False, geol_map_only=False):
"""Remove all boreholes from the list
Parameters
----------
fake_only : bool
if True, only fake boreholes are removed
geol_map_only : bool
if True, only boreholes from geological map are removed
"""
if fake_only:
self.list_fake_bhs=[]
if self.verbose:
print("Fake boreholes removed")
if geol_map_only:
self.list_map_bhs=[]
if self.verbose:
print("Boreholes from geological map removed")
else:
self.list_fake_bhs=[]
self.list_bhs=[]
self.list_map_bhs=[]
if self.verbose:
print("boreholes removed")
def rem_bh(self, bh):
"""
Remove a given bh from the list of boreholes
Parameters
----------
bh: :class:`borehole` object
borehole to remove
"""
if bh in self.list_bhs:
self.list_bhs.remove(bh)
if self.verbose:
print("Borehole {} removed".format(bh.ID))
else:
if self.verbose:
print("Borehole {} not in the list".format(bh.ID))
def rem_fake_bh(self, bh):
"""
Remove a given bh from the list of fake boreholes
Parameters
----------
bh: :class:`borehole` object
borehole to remove
"""
if bh in self.list_fake_bhs:
self.list_fake_bhs.remove(bh)
if self.verbose:
print("Borehole {} removed".format(bh.ID))
else:
if self.verbose:
print("Borehole {} not in the list".format(bh.ID))
def erase_hd(self):
"""
Erase the hard data from all surfaces and facies
"""
for unit in self.get_all_units():
s=unit.surface
s.x=[]
s.y=[]
s.z=[]
s.ineq=[]
for fa in self.get_all_facies():
fa.x=[]
fa.y=[]
fa.z=[]
if self.verbose:
print("Hard data reset")
def rem_all_facies_from_units(self):
"""
To remove facies from all units
"""
for unit in self.get_all_units():
unit.rem_facies(all_facies=True)
if self.verbose:
print("All facies have been removed from units")
def order_Piles(self):
"""
Order all the units in all the piles according to order attribute
"""
if self.verbose:
print("##### ORDERING UNITS ##### ")
def ord_fun(pile):
pile.order_units(vb=self.verbose) # organize the pile
for s in pile.list_units: #check subunits
if s.f_method == "SubPile":
ord_fun(s.SubPile)
ord_fun(self.get_pile_master())
def add_prop_hd(self,prop,x,v):
"""
Add Hard data to the property "prop"
Parameters
----------
prop : string
property name of the `~ArchPy.base.Prop` object
x : ndarray of size (ndata, 3)
x, y and z coordinates of hd points
v : array of size (ndata)
HD property values at x position
"""
prop=self.list_props(self.getpropindex(prop))
prop.add_hd(x,v)
def hd_un_in_unit(self, unit, iu=0):
"""
Extract sub-units hard data for a unit
Parameters
----------
unit : :class:`unit` object
unit to extract hard data
iu : int
index of the unit realization to extract hard data
Returns
-------
hd : list of tuples
list of hard data coordinates (x,y,z)
sub_units : list of int
list of sub-units ID
"""
mask = self.Geol.units_domains[iu] == unit.ID
hd=[]
sub_units=[]
for un in unit.SubPile.list_units:
for ix,iy,iz in zip(un.x, un.y,un.z):
cell=self.coord2cell(ix,iy,iz)
if cell is not None:
if mask[cell]:
hd.append((ix, iy, iz))
sub_units.append(un.ID)
else:
pass
return hd, sub_units
def hd_fa_in_unit(self, unit, iu=0):
"""
Extract facies hard data for a unit and send warning
if a hard data should not be in the unit
Parameters
----------
unit : :class:`unit` object
unit to extract hard data
iu : int
unit realization index to extract hard data
"""
mask=self.unit_mask(unit.name, iu=iu)
hd=[]
facies=[]
errors={}
for fa in self.get_all_facies():
fa_err=0
for ix,iy,iz in zip(fa.x, fa.y,fa.z):
cell=self.coord2cell(ix,iy,iz)
if mask[cell]:
if fa not in unit.list_facies:
fa_err += 1
#if self.verbose:
#print("Warning facies {} have been found inside unit {}".format(fa.name, unit.name))
else:
hd.append((ix, iy, iz))
facies.append(fa.ID)
else:
pass
errors[fa.name]=fa_err
#print errors
if np.sum(list(errors.values())) > 0:
if self.verbose:
print("Some errors have been found \nSome facies were found inside units where they shouldn't be \n\n### List of errors ####")
for k,v in errors.items():
if v > 0:
print("Facies {}: {} points".format(k,v))
print("\n")
return hd, facies
def compute_distribution(self):
"""
Compute the probability distribution of the hard data for each unit
For each unit, the distribution is computed using the Normal Score Transform
"""
if self.verbose:
print("\n ## Computing distributions for Normal Score Transform ##\n")
for unit in self.get_all_units(recompute=True):
if unit.surface.N_transfo:
data = np.array(unit.surface.z)
tau = unit.surface.dic_surf["tau"]
bandwidth_mult = unit.surface.dic_surf["bandwidth_mult"]
n = len(data)
if n > 10:
if bandwidth_mult > 0:
from sklearn.neighbors import KernelDensity
from scipy.stats import iqr
bandwidth = bandwidth_mult* 0.9 * min (np.std(data), iqr(data)) * n**(-1/5)
if bandwidth > 0:
kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(data.reshape(-1, 1))
data_kern = kde.sample(1000)
di = store_distri(data_kern, t=0)
unit.surface.distribution = di
else:
pass
else:
di = store_distri(data_kern, t=0)
unit.surface.distribution = di
else:
if self.verbose:
print("Not enough data points to estimate a cdf and use Normal Score Transform for unit {} \n".format(unit.name))
unit.surface.N_transfo = False
unit.surface.dic_surf["N_transfo"] = False
def estimate_surf_params(self, default_covmodel=None, auto=False, **kwargs):
"""
Alias for infer surface in ArchPy.infer
Parameters
----------
default_covmodel : str, default None
default covariance model to use for surface estimation
auto : bool
to automatically infer parameter (True) or not (False)
kwargs :
various kwargs and parameters that can be passed to ArchPy.infer.infer_surface or ArchPy.infer.fit_surfaces
"""
import ArchPy.infer as api
# default surface covmodel
if auto:
print("### SURFACE PARAMETERS ESTIMATION ### \n")
for u in self.list_all_units:
print("### UNIT : {} ### \n".format(u.name))
api.infer_surface(self, u, default_covmodel=default_covmodel, **kwargs)
else:
api.fit_surfaces(self, default_covmodel=default_covmodel, **kwargs)
def get_proportions(self, type="units", depth_min=0, depth_max=np.inf, ignore_units=[], mask=None):
"""
Function that returns the proportions of the units in the boreholes
Parameters
----------
type : str
type of proportions to return
"units" : proportions of units
"facies" : proportions of facies
depth_max : float, default np.inf
maximum depth of investigation in the boreholes
depth_min : float, default 0
minimum depth of investigation in the boreholes
ignore_units : list of str, default []
units name to ignore during the analysis
mask : 2D ndarray of size (ny, nx), default None
mask where to analyse the boreholes
Returns
-------
dictionnary
"""
list_bhs = self.list_bhs
if mask is not None:
new_l_bhs = []
for bh in list_bhs:
iy, ix = self.coord2cell(bh.x, bh.y)
if mask[iy, ix]:
new_l_bhs.append(bh)
list_bhs = new_l_bhs
meters_units = {}
for bh in list_bhs[:]:
z_min = bh.z - depth_max # minimum altitude of investigation
z_max = bh.z - depth_min # maximum altitude of investigation
if type == "units":
log = bh.log_strati
elif type == "facies":
log = bh.log_facies
if log is not None:
thk = 0
n_units = len(log)
for i in range(n_units):
analysis = True
if i == n_units-1:
s = log[-1]
top = s[1]
bot = bh.z - bh.depth
else:
s2 = log[i+1]
s = log[i]
top = s[1]
bot = s2[1]
if bot >= z_max: # do nothing because interval above investigation altitudes
analysis = False
elif bot < z_max and top > z_max and bot >= z_min: # case where top is above depth min
top = z_max
elif top > z_max and bot < z_min: # case where top above depth min and bot below depth max
top = z_max
bot = z_min
elif bot < z_max and top < z_max and bot > z_min and top > z_min: # inside
pass
elif top <= z_max and bot < z_min and top > z_min: #
bot = z_min
elif top <= z_min:
analysis = False
else:
pass
if analysis:
if s[0] is not None:
if s[0].name not in meters_units.keys():
meters_units[s[0].name] = 0
thk += top - bot
meters_units[s[0].name] += top - bot
prop_units = {}
tot = 0
for k,v in meters_units.items():
if k not in ignore_units:
tot += v
for k,v in meters_units.items():
if k not in ignore_units:
prop_units[k] = v/tot
return prop_units
def process_bhs(self, step=None, facies=True, stop_condition=False):
"""
ArchPy Pre-processing algorithm
Extract hard data from boreholes for all units given
the Piles defined in the Arch_table object.
Parameters
----------
step : float
vertical interval for extracting borehole
facies: bool
flag to indicate to process facies data or not
stop_condition: bool
flag to indicate if the process must be
aborted if a inconsistency in bhs is found (False)
or bhs will be simply ignored (True)
"""
# functions
def check_bhs_consistency(bhs_lst, stop_condition=False):
"""
Check that boreholes are not in the same cell,
have correct units/facies info, etc.
"""
bhs_lst_cop=bhs_lst.copy()
stop=0
no_gap = True
## check consistency ##
for bh in bhs_lst_cop:
s_bef=0
i=0
if bh.log_strati is not None:
## determine pile nature
try:
nature = bh.log_strati[0][0].mummy_unit.SubPile.nature
except:
nature = "surfaces"
for s in bh.log_strati:
if s[0] is not None or s[0] == "comf": # if there is unit info (not a gap)
if s_bef == 0 and no_gap:
s_bef=s
if s[1] != bh.z:
if self.verbose:
print("First altitude in log strati of bh {} is not set at the top of the borehole, altitude changed".format(bh.ID))
bh.log_strati[0]=(s[0], bh.z)
s_bef=s
elif s_bef != 0:
# check if unit appear only one time
c=0
for s2 in bh.log_strati:
if s2[0] is not None:
c+= (s2[0] == s[0])
#check consistency with pile
if s[0].order < s_bef[0].order:
if nature == "surfaces":
if self.verbose:
print("borehole {} not consistent with the pile".format(bh.ID)) # remove and not stop
if stop_condition:
stop=1
else:
#remove borehole from lists
remove_bh(bh, bhs_lst)
break
#check height
elif s[1] > s_bef[1]:
if self.verbose:
print("Height in log_strati in borehole {} must decrease with depth".format(bh.ID))
if stop_condition:
stop=1
else:
#remove borehole from lists
remove_bh(bh, bhs_lst)
break
#check if unit appear only one time
elif c > 1:
if nature == "surfaces":
if self.verbose:
print("Unit {} appear more than one time in log_strati of borehole {}".format(s[0].name, bh.ID))
if stop_condition:
stop=1
else:
#remove borehole from lists
remove_bh(bh, bhs_lst)
break
s_bef=s
else:
if i == 0:
no_gap=False
i += 1
if bh.log_facies is not None:
i=0
for fa in bh.log_facies:
if i == 0:
fa_bef=fa
if fa[1] != bh.z:
if self.verbose:
print("First altitude in log facies of bh {} is not set at the top of the borehole, altitude changed".format(bh.ID))
bh.log_facies[0]=(fa[0], bh.z)
fa_bef=fa
else:
if fa[1] > fa_bef[1]:
if self.verbose:
print("Height in log_facies in borehole {} must decrease with depth".format(bh.ID))
if stop_condition:
stop=1
else:
#remove borehole from lists
remove_bh(bh, bhs_lst)
fa_bef=fa
i += 1
if stop:
if self.verbose:
print("Process bhs aborted")
return None
def remove_bh(bh, bhs_lst):
#if bh in self.list_bhs:
# self.rem_bh(bh)
#elif bh in self.list_fake_bhs:
# self.rem_fake_bh(bh)
if bh in bhs_lst:
bhs_lst.remove(bh) # remove borehole from current list
#merge lists
bhs_lst=self.list_bhs + self.list_fake_bhs + self.list_map_bhs
#get grid
xg=self.get_xg()
yg=self.get_yg()
zg=self.get_zg()
#order units
self.order_Piles()
#set hierarchy_relations
self.hierarchy_relations(vb=self.verbose)
if len(self.list_bhs + self.list_fake_bhs + self.list_map_bhs) == 0:
if self.verbose:
print("No borehole found - no hd extracted")
#self.bhs_processed=1
return
### extract strati units and facies info from borehole data
if self.bhs_processed == 0:
self.sto_hd = []
### check multiple boreholes in the same cell ###
t=KDTree(self.xu2D)
xbh=[i.x for i in bhs_lst]
ybh=[i.y for i in bhs_lst]
c_bh=np.array([xbh, ybh]).T # boreholes coordinates
idx=t.query(c_bh, return_distance=False)
bh_arr=np.array(bhs_lst) # array of boreholes
idx_removed=[]
for idx1 in idx:
if idx1 not in idx_removed:
mask=(idx1 == idx).reshape(-1)
bhs_cell=bh_arr[mask] #boreholes in the same cell
if len(bhs_cell) > 1: # more than one borehole in one cell
if self.verbose:
print("Multiples boreholes {} were found inside the same cell, the deepest will be kept".format([i.ID for i in bhs_cell]))
depths=(np.array([i.depth for i in bhs_cell]))
mask2=(depths == max(depths))
if sum(mask2) == 1:
for (bh, chk) in zip(bhs_cell, mask2):
if ~chk:
#remove borehole from lists
remove_bh(bh, bhs_lst)
idx_removed.append(idx1)
elif sum(mask2) > 1: #bhs have the same depths --> remove others
#remove borehole from lists
for i in range(1, len(bhs_cell)):
remove_bh(bhs_cell[i], bhs_lst)
idx_removed.append(idx1)
else:
if self.verbose:
print("Error this shouldn't happen :)")
if len(bhs_lst) == 0:
if self.verbose:
print("No valid borehole, processus aborted")
return None
## SPLIT BOREHOLES ##
# logs must be splitted by hierarchy group and linked with assigned Pile
new_bh_lst=[]
for bh in bhs_lst:
if bh.log_strati is not None:
if len([i[0] for i in bh.log_strati if i[0] is not None]) > 0:
for new_bh in split_logs(bh):
new_bh_lst.append(new_bh)
elif bh.log_facies is not None:
new_bh_lst.append(bh)
#check consistency
check_bhs_consistency(new_bh_lst)
def add_contact(s, x, y, z, type="equality", z2=None): #function to add an hd point to a surface
if self.coord2cell(x, y, z) is not None:
if type == "equality":
s.surface.x.append(x)
s.surface.y.append(y)
s.surface.z.append(z)
elif type == "ineq_inf":
s.surface.ineq.append([x, y, 0, z, np.nan])
elif type == "ineq_sup":
s.surface.ineq.append([x, y, 0, np.nan, z])
elif type == "double_ineq":
s.surface.ineq.append([x, y, 0, z, z2]) # inferior and upper ineq
if len(new_bh_lst) == 0:
if self.verbose:
print("No valid borehole, processus aborted")
return None
#### PROCESSING (ArchPy Algorithm) ######
#### Facies ####
if facies:
if step is None:
step=np.abs(zg[1] - zg[0]) # interval spacing to sample data
for bh in new_bh_lst:
if bh.log_facies is not None:
for zi in np.arange(bh.z+step/2, bh.z-bh.depth, -step): # loop over all the borehole from top to botom
if zi < bh.z:
idx=np.where(zi < np.array(bh.log_facies)[:, 1])[0][-1]
fa=bh.log_facies[idx][0]
if fa is not None: # if there is data
if self.coord2cell(bh.x, bh.y, zi) is not None:
fa.x.append(bh.x)
fa.y.append(bh.y)
fa.z.append(zi)
# # remove None at end of bh
# while [bh for bh in new_bh_lst if bh.log_strati[-1][0] is None]:
# for bh in new_bh_lst:
# if bh.log_strati[-1][0] is None:
# new_bh_lst.remove(bh)
# new_bh = bh
# new_bh.log_strati = new_bh.log_strati[:-1]
# new_bh_lst.append(new_bh)
# print([i.log_strati for i in new_bh_lst])
#### Units ####
for bh in new_bh_lst:
if bh.log_strati is not None:
for i, s in enumerate(bh.log_strati): # loop over borehole (i: index, s: tuple (strati, altitude of contact))
if s[0] is not None: # if there is unit info
s1=s[0] # unit of interest (first element is strati object)
# get pile
if s1.mummy_unit is not None:
Pile=s1.mummy_unit.SubPile # Link with pile
elif s1 in self.Pile_master.list_units:
Pile=self.Pile_master
if Pile.nature == "surfaces":
if (i == 0) and (s1.order == 1): # first unit encountered is also first unit in pile
pass
else:
non_unit=True # flag to know if above unit is None or not
if i == 0: # first unit in the log
s_above_order=1 # second unit in pile (first is ignored)
non_unit=False # flag
elif i <= len(bh.log_strati): # others units encountered except first one
s_above=bh.log_strati[i-1] # unit just above unit of interest in bh
# check above is not None
if s_above[0] is not None:
s_above_order=s_above[0].order
non_unit=False # flag
if non_unit == False:
if s1.contact == "comf":
for il in range(s_above_order, s1.order-1):
s2=Pile.list_units[il]
if s2.surface.contact == "erode":
add_contact(s2, bh.x, bh.y, s[1], "ineq_inf")
elif s2.surface.contact == "onlap":
add_contact(s2, bh.x, bh.y, s[1], "ineq_sup")
elif s1.contact != "comf":
erod_lst=[] # list of erode units
erod_lst_2=[] # list of erode surfaces (no unit, don't have volume)
for il in range(s_above_order, s1.order-1): # check if overlaying layers are erode
s2=Pile.list_units[il]
if s2.surface.contact == "erode" :
if s2.contact == "onlap":
erod_lst.append(s2)
elif s2.contact == "erode":
erod_lst_2.append(s2)
if erod_lst: # if at least one erode layer exists above --> add equality point to erode
s2=min(erod_lst) # select higher one
if i == 0: # first unit at topography must be ineq_inf --> all erosions must go above
add_contact(s2, bh.x, bh.y, s[1], "ineq_inf")
else:
add_contact(s2, bh.x, bh.y, s[1], "equality")
add_contact(s1, bh.x, bh.y, s[1], "ineq_inf")
# supplementary info, layer above erosion layer must go below and other erosion layer must go above
for il in range(s_above_order, s1.order-1):
s_erod=Pile.list_units[il]
if s_erod.order < s2.order and i > 0: # non eroded layers --> not deposited a checker
add_contact(s_erod, bh.x, bh.y, s[1], "ineq_sup")
if s_erod.surface.contact == "erode":
add_contact(s_erod, bh.x, bh.y, s[1], "ineq_inf")
elif erod_lst_2 and len(erod_lst) == 0: # only erode surfaces
l = []
# case no erosion
case = []
if i == 0:
case.append((s1, "ineq_inf")) # unit at the topography must go above topo
else:
case.append((s1, "equality")) # else equality
for il in range(s_above_order, s1.order-1): # add inequality sup to other above layers
s_12=Pile.list_units[il] # unit above s1
if s_12.surface.contact == "onlap" and i > 0:
case.append((s_12, "ineq_sup"))
elif s_12.contact == "erode":
case.append((s_12, "ineq_inf"))
l.append(case)
for er in erod_lst_2[::-1] : # cases of erosion (one for each er. surface)
case = []
case.append((er, "equality"))
case.append((s1, "ineq_inf"))
for il in range(s_above_order, s1.order-1):
s_erod=Pile.list_units[il]
if s_erod.order < er.order:
if s_erod.surface.contact == "onlap":
case.append((s_erod, "ineq_sup"))
elif s_erod.surface.contact == "erode":
case.append((s_erod, "ineq_inf"))
elif s_erod.order > er.order :
if s_erod.surface.contact == "erode":
case.append((s_erod, "ineq_inf")) # erode layers below er goes above
l.append(case)
p = np.ones(len(l))*(1/len(l)) # user input ?
self.sto_hd.append(((bh.x, bh.y, s[1]), l, p))
elif erod_lst and erod_lst_2: # both (TO DO)
pass
else: # no erode layer --> exact data on surface of interest (s1) and ineq sup to others above
if i == 0:
add_contact(s1, bh.x, bh.y, s[1], "ineq_inf") # unit at the topography must go above topo
else:
add_contact(s1, bh.x, bh.y, s[1], "equality") # else equality
for il in range(s_above_order, s1.order-1): # add inequality sup to other above layers
s_12=Pile.list_units[il] # unit above s1
if s_12.surface.contact == "onlap" and i > 0:
add_contact(s_12, bh.x, bh.y, s[1], "ineq_sup")
else: # unit above is a gap TO DO to update with new approach
if i == 1: # if second unit in log and above is None
add_contact(s1, bh.x, bh.y, s[1], "ineq_inf") # unit must go above
else:
s_gap=s_above # store gap information
s_above=bh.log_strati[i-2] # take unit above gap
s_above_order=s_above[0].order
erod_lst=[]
for il in range(s_above_order, s1.order-1): # check if overlaying layers are erode
s2=Pile.list_units[il]
if s2.surface.contact == "erode":
erod_lst.append(s2)
if erod_lst: # if at least one erode layer exists above
s2=min(erod_lst) # select higher one
add_contact(s2, bh.x, bh.y, s[1], z2=s_gap[1], type="double_ineq") # erode surface must go between gap
add_contact(s1, bh.x, bh.y, s[1], "ineq_inf")
# supplementary info, layer above erosion layer must go below and other erosion layer must go above
for il in range(s_above_order, s1.order-1):
s_erod=Pile.list_units[il]
if s_erod.order < s2.order and i > 0: # non eroded layers --> not deposited a checker
add_contact(s_erod, bh.x, bh.y, s[1], "ineq_sup")
elif s_erod.order > (s2.order): # layers below erosion horizon --> must go above
if s_erod.surface.contact == "erode":
add_contact(s_erod, bh.x, bh.y, s[1], "ineq_inf")
else: # no erode layer
add_contact(s1, bh.x, bh.y, s[1], z2=s_gap[1], type="double_ineq") # surface must go between gap
# add upper bound to other above units in pile
for il in range(s_above_order, s1.order-1): # add inequality sup to other above layers
s_12=Pile.list_units[il] # unit above s1
if s_12.surface.contact == "onlap" and i > 0:
add_contact(s_12, bh.x, bh.y, s_gap[1], "ineq_sup")
if (i == len(bh.log_strati)-1) & (s1.order < Pile.list_units[-1].order): # if last unit is not last in the pile --> below layers must go below
for il in range(s1.order, Pile.list_units[-1].order):
s_12=Pile.list_units[il]
add_contact(s_12, bh.x, bh.y, bh.z-bh.depth, "ineq_sup")
elif Pile.nature == "3d_categorical":
## get top and bottom
top = s[1]
if i != len(bh.log_strati) - 1:
bot = bh.log_strati[i+1][1]
else:
bot = bh.z - bh.depth
if step is None:
step = self.get_sz() # interval spacing to sample data
for iz in np.arange(top, bot, -step):
s1.x.append(bh.x)
s1.y.append(bh.y)
s1.z.append(iz)
else: # if there is a gap ignore and pass
pass
# compute distribution for each unit if necessary
self.compute_distribution()
self.bhs_processed=1
if self.verbose:
print("Processing ended successfully")
elif self.bhs_processed == 1:
if self.verbose:
print("Boreholes already processed")
def compute_surf(self, nreal=1, fl_top=True, rm_res_files=True):
"""
Performs the computation of the surfaces
Parameters
----------
nreal: int
number of realizations
fl_top: bool
assign first layer to top of the domain (True by default)
rm_res_files : bool
flag to remove previous existing resulting files in working directory
"""
start=time.time()
np.random.seed(self.seed) # set seed
if nreal == 0:
if self.verbose:
print("Warning: nreal is set to 0.")
return
if self.bhs_processed == 0:
print("Boreholes not processed, fully unconditional simulations will be tempted")
#assert self.bhs_processed == 1, "Boreholes not processed"
# create work directory if it doesn't exist
if self.ws not in os.listdir():
os.mkdir(self.ws)
# remove preexisting results files in ws
if rm_res_files:
for file in os.listdir(self.ws):
if file.split(".")[-1] in ("unt", "fac", "pro"):
fpath=os.path.join(self.ws, file)
os.remove(fpath)
self.Geol.units_domains=np.zeros([nreal, self.get_nz(), self.get_ny(), self.get_nx()], dtype=np.int8) # initialize tmp result array
if self.check_units_ID() is None: # check if units have correct IDs
return None
self.get_pile_master().compute_surf(self, nreal, fl_top, vb=self.verbose) # compute surfs of the first pile
## stochastic hard data
#hierarchies
def fun(pile): # compute surf hierarchically
i=0
for unit in pile.list_units:
if unit.f_method == "SubPile":
if unit.SubPile.nature == "surfaces":
tops=self.Geol.surfaces_by_piles[pile.name][:, i]
bots=self.Geol.surfaces_bot_by_piles[pile.name][:, i]
unit.SubPile.compute_surf(self, nreal, fl_top=True, subpile=True, tops=tops, bots=bots, vb=self.verbose)
fun(unit.SubPile)
elif unit.SubPile.nature == "3d_categorical":
unit.compute_facies(self, nreal=1, mode="units", verbose=self.verbose)
fun(unit.SubPile)
i += 1
fun(self.get_pile_master()) # run function
if self.fill_flag:
self.fill_top_unit()
end=time.time()
if self.verbose:
print("\n### {}: Total time elapsed for computing surfaces ###".format(end - start))
# write results
if self.write_results:
units_domains=self.Geol.units_domains
for ireal in range(nreal):
ud=units_domains[ireal]
fname=self.name+"_{}.ud".format(ireal)
fpath=os.path.join(self.ws, fname)
with open(fpath, "wb") as f:
pickle.dump(ud, f)
#delet units domains in Geol object to for memory space
del(self.Geol.units_domains)
self.surfaces_computed=1 # flag
def define_domains(self, surfaces, fl_top=True):
"""
Performs the computation of the units domains when surfaces are provided
Parameters
----------
surfaces : dictionary of surfaces as values and pile name as key
fl_top: bool
assign first layer to top of the domain (True by default)
"""
#np.random.seed(self.seed) # set seed
nreal = surfaces[self.get_pile_master().name].shape[0] # get number of realizations
self.Geol.units_domains=np.zeros([nreal, self.get_nz(), self.get_ny(), self.get_nx()], dtype=np.int8) # initialize tmp result array
if self.check_units_ID() is None: # check if units have correct IDs
return None
self.get_pile_master().define_domains(self, surfaces[self.get_pile_master().name], vb=self.verbose, fl_top=fl_top)
#hierarchies
def fun(pile): # compute surf hierarchically
i=0
for unit in pile.list_units:
if unit.f_method == "SubPile":
tops=surfaces[pile.name][:, i]
bots=surfaces[pile.name][:, i+1]
unit.SubPile.define_domains(self, surfaces[unit.SubPile.name], fl_top=True, subpile=True, tops=tops, bots=bots, vb=self.verbose)
fun(unit.SubPile)
i += 1
fun(self.get_pile_master()) # run function
self.surfaces_computed=1 # flag
def fill_ID(self, arr, ID=0):
"""
Fill ID values in an 3D array given surroundings values using nearest neighbors
Parameters
----------
arr: ndarray of size (nz, ny, nx)
simulation grid size
ID: ID
ID to replace
Returns
-------
arr: ndarray of size (nz, ny, nx)
simulation grid size
"""
nx = self.get_nx()
ny = self.get_ny()
nz = self.get_nz()
from sklearn.neighbors import NearestNeighbors
X = np.ones([nz, ny, nx])* self.xgc
Y = np.ones([nz, ny, nx])
Y[:] = np.ones([nx, ny]).T * self.ygc.reshape(-1, 1)
Z = np.ones([nz, ny, nx])
Z[:, :] =( np.ones([nz, nx]) * self.zgc.reshape(-1, 1)).reshape(nz, 1, nx)
xu3D = np.array([X.flatten(), Y.flatten(), Z.flatten()]).T
mask = (arr != ID) * (arr != 0) # mask of data
X_fit = xu3D[mask.flatten()]
y_fit = arr.flatten()[mask.flatten()]
mask = (arr == ID) # mask where to fill
if mask.any():
X_pred = xu3D[mask.flatten()]
# fit
nn = NearestNeighbors(n_neighbors=1).fit(X_fit)
#pred
res = nn.kneighbors(X_pred, return_distance=False, n_neighbors=1)
# assign
y_pred = y_fit[res]
arr_test = arr.copy()
arr_test[mask] = y_pred[:, 0] # reassign values
return arr_test
def fill_top_unit(self, method = "nearest_neighbors"): # to remove
"""
Function to fill each cells simulated top unit given their nearest neighbour.
Parameters
----------
method: str
method to fill top unit. Default is "nearest_neighbors"
"""
nx = self.get_nx()
ny = self.get_ny()
nz = self.get_nz()
# get top unit ID
top_unit = self.get_all_units(1)[0]
ID = top_unit.ID
if method == "nearest_neighbors":
from sklearn.neighbors import NearestNeighbors
X = np.ones([nz, ny, nx])* self.xgc
Y = np.ones([nz, ny, nx])
Y[:] = np.ones([nx, ny]).T * self.ygc.reshape(-1, 1)
Z = np.ones([nz, ny, nx])
Z[:, :] =( np.ones([nz, nx]) * self.zgc.reshape(-1, 1)).reshape(nz, 1, nx)
xu3D = np.array([X.flatten(), Y.flatten(), Z.flatten()]).T
for iu in range(self.nreal_units):
arr = self.get_units_domains_realizations(iu)
mask = (arr != ID) * (arr != 0)
X_fit = xu3D[mask.flatten()]
y_fit = arr.flatten()[mask.flatten()]
mask = (arr == ID)
if mask.any():
X_pred = xu3D[mask.flatten()]
# fit
nn = NearestNeighbors(n_neighbors=1).fit(X_fit)
#pred
res = nn.kneighbors(X_pred, return_distance=False, n_neighbors=1)
# assign
y_pred = y_fit[res]
arr_test = arr.copy()
arr_test[mask] = y_pred[:, 0] # reassign values
self.Geol.units_domains[iu] = arr_test
else:
if self.vb:
print("Invalid method")
def compute_facies(self, nreal=1, verbose_methods=0):
"""
Performs the computation of the facies
Parameters
----------
nreal: int
number of realizations
verbose_methods: int
verbose for the facies methods, 0 by default
"""
if nreal==0:
if self.verbose:
print("Warning: nreal set to 0")
return
#asserts
all_ids=[]
for fa in self.get_all_facies():
if fa.ID not in all_ids:
all_ids.append(fa.ID)
else:
raise ValueError ("{} facies index has been defined on multiple units")
#start
start_tot=time.time()
np.random.seed (self.seed) # set seed
#grid
xg=self.xg
yg=self.yg
zg=self.zg
#initialize array and set number of realization
self.nreal_fa=nreal
self.Geol.facies_domains=np.zeros([self.nreal_units, nreal, self.get_nz(), self.get_ny(), self.get_nx()], dtype=np.int8)
for strat in self.get_all_units(): # loop over strati
if strat.contact == "onlap":
if self.verbose:
print("\n### Unit {}: facies simulation with {} method ####".format(strat.name, strat.f_method))
start=time.time()
strat.compute_facies(self, nreal, verbose=verbose_methods)
end=time.time()
if self.verbose:
print("Time elapsed {} s".format(np.round((end - start), decimals=2)))
end=time.time()
if self.verbose:
print("\n### {}: Total time elapsed for computing facies ###".format(np.round((end - start_tot), decimals=2)))
if self.write_results:
# write results
fa_domains=self.Geol.facies_domains
for iu in range(self.nreal_units):
for ifa in range(self.nreal_fa):
fd=fa_domains[iu, ifa]
fname=self.name+"_{}_{}.fd".format(iu, ifa)
fpath=os.path.join(self.ws, fname)
with open(fpath, "wb") as f:
pickle.dump(fd, f)
del(self.Geol.facies_domains) # delete facies domain to free some memory
self.facies_computed=1
def compute_domain(self, s1, s2):
"""
Return a bool 2D array that define the domain where the units
exist (between two surfaces, s1 and s2)
Parameters
----------
s1, s2: 2D ndarrays
two surfaces over the simulation domain size: (ny, nx)
Returns
-------
domain
3D ndarray of bools
"""
zg=self.get_zg()
xg=self.get_xg()
yg=self.get_yg()
sz=self.get_sz()
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
top=self.top
bot=self.bot
z0=zg[0]
z1=zg[-1]
s1[s1 < z0]=z0
s1[s1 > z1]=z1
s2[s2 < z0]=z0
s2[s2 > z1]=z1
idx_s1=(np.round((s1-z0)/sz)).astype(int)
idx_s2=(np.round((s2-z0)/sz)).astype(int)
#domain
a=np.zeros([nz, ny, nx], dtype=bool)
for iy in range(ny):
for ix in range(nx):
a[idx_s2[iy, ix]: idx_s1[iy, ix], iy, ix]=1
return a
def compute_prop(self, nreal=1):
#TO DO --> add an option to resimulate only certain properties (e.g. *args)
"""
Performs the computation of the properties added to the ArchTable
Parameters
----------
nreal: int
number of realizations
"""
assert len(self.list_props) > 0, "No property have been added to Arch_table object"
assert nreal > -1, "You cannot make a negative number of realizations, nice try tho"
if nreal==0:
if self.verbose:
print("Warning: nreal is set to 0")
return
np.random.seed (self.seed) # set seed
self.nreal_prop=nreal
xg=self.xgc
yg=self.ygc
zg=self.zgc
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
nreal_units=self.nreal_units
nreal_fa=self.nreal_fa
self.Geol.prop_values={} #remove previous prop simulations
for prop in self.list_props:
counter=0
if self.verbose:
print ("### {} {} property models will be modeled ###".format(nreal_units*nreal_fa*nreal, prop.name))
prop_values=np.zeros([nreal_units, nreal_fa, nreal, nz, ny, nx], dtype=np.float32) # property values
#HD
x=prop.x
v=prop.v
#loop
for iu in range(nreal_units): # loop over units models
for ifa in range(nreal_fa): # loop over lithological models
K_fa=np.zeros([nreal, nz, ny, nx], dtype=np.float32)
for strat in self.get_all_units():
if strat.f_method != "Subpile": #discard units filled by subunits
# mask_strat=(self.get_units_domains_realizations(iu) == strat.ID)
mask_strat=(self.Geol.units_domains[iu] == strat.ID) # mask unit
for ite,fa in enumerate(strat.list_facies):
#create mask facies for prop simulation
# facies_domain=self.get_facies(iu, ifa, all_data=False)
facies_domain=self.Geol.facies_domains[iu, ifa].copy() # gather a realization of facies domain
facies_domain[facies_domain != fa.ID]=0 # set 0 to other facies
facies_domain[~mask_strat]=0 # set 0 outside of the strati to simulate same facies but in other strat independantely
mask_facies=facies_domain.copy()
mask_facies[mask_facies != 0]=1 # set to 1 for mask
#simulate
if (fa in prop.facies) and (fa.ID in facies_domain): # simulation of the property (check if facies is inside strat unit)
i=prop.facies.index(fa) # retrieve index
m=prop.means[i] #mean value
covmodel=prop.covmodels[i] # covariance model used
method=prop.int[i] #method of interpolation (sgs or fft)
if method == "fft":
sims=grf.grf3D(covmodel, [nx, ny, nz], [sx, sy, sz], [x0, y0, z0],
nreal=nreal, mean=m, x=x, v=v, printInfo=False)
elif method == "homogenous":
sims=np.ones([nreal, nz, ny, nx])*m
if x is not None:
if self.verbose:
print("homogenous method chosen ! Warning: Some HD can be not respected")
elif method == "homogenous_uniform":
dat = np.random.uniform(m[0], m[1], nreal)
sims=np.ones([nreal, nz, ny, nx]) * dat[:,np.newaxis,np.newaxis,np.newaxis]
if x is not None:
if self.verbose:
print("homogenous method chosen ! Warning: Some HD can be not respected")
elif method == "sgs":
sims = gci.simulate3D(covmodel, [nx, ny, nz], [sx, sy, sz], [x0, y0, z0],
nreal=nreal, mean=m, mask=mask_facies, x=x, v=v, verbose=0, nthreads=self.ncpu, seed=self.seed + iu*10000 + ite*100 + ifa)["image"].val
sims=np.nan_to_num(sims) #remove nan
elif method == "mps":
#TO DO
pass
else: # not define --> homogenous and default mean value
m=prop.def_mean
sims=np.ones([nreal, nz, ny, nx])*m
else:
m=prop.def_mean
sims=np.ones([nreal, nz, ny, nx])*m
for ir in range(nreal):
sim=sims[ir]
sim[facies_domain != fa.ID]=0
K_fa[ir] += sim
K_fa[:, ~self.mask]=np.nan #default value is 0 --> warning ?
# ma=((self.get_facies(iu, ifa, all_data=False) == 0) * self.mask)
ma=((self.Geol.facies_domains[iu, ifa] == 0) * self.mask)
K_fa[:, ma]=prop.def_mean
if prop.vmin is not None:
K_fa[K_fa < prop.vmin]=prop.vmin
if prop.vmax is not None:
K_fa[K_fa > prop.vmax]=prop.vmax
if self.write_results:
for ireal in range(nreal):
fname=self.name+"{}_{}_{}_{}.pro".format(prop.name, iu, ifa, ireal)
fpath=os.path.join(self.ws, fname)
with open(fpath, "wb") as f:
pickle.dump(K_fa[ireal], f)
else:
prop_values[iu, ifa]=K_fa
counter += nreal
if self.verbose:
print("### {} {} models done".format(counter, prop.name))
if ~self.write_results:
self.Geol.prop_values[prop.name]=prop_values
self.prop_computed=1
def physicsforward(self, method, positions, stratIndex=None, faciesIndex=None, propIndex=None, idx=0, cpuID=0):
import ArchPy.forward as fd
"""Alias to the function ArchPy.forward.physicsforward.
method: string. method to Forward
position: position of the forward"""
return fd.physicsforward(self, method, positions, stratIndex, faciesIndex, propIndex, idx, cpuID)
def unit_mask(self, unit_name, iu=0, all_real=False):
"""
Return the mask of the given unit for a realization iu
Parameters
----------
unit_name: string
unit name defined when creating the unit object
for more details: :class:`Unit`
iu: int
unit realization index (0, 1, ..., Nu)
all_real: bool
flag to know if a mask of all realizations
must be returned
Returns
-------
3D (or 4D) ndarray
mask array
"""
if all_real: #all realizations
unit=self.get_unit(name=unit_name, type="name")
l=[]
def fun(unit): # Hierarchy unit
if unit.f_method == "SubPile":
for su in unit.SubPile.list_units:
l.append(su.ID)
if su.f_method == "SubPile":
fun(su)
else:
l.append(unit.ID)
fun(unit)
arr=np.zeros([self.nreal_units, self.get_nz(), self.get_ny(), self.get_nx()]) # mask with 0 everywhere
for idx in l:
arr[self.get_units_domains_realizations() == idx]=1 # set 1 where unit or sub-units are present
else:
unit=self.get_unit(name=unit_name, type="name")
l=[]
def fun(unit):
if unit.f_method == "SubPile":
for su in unit.SubPile.list_units:
l.append(su.ID)
if su.f_method == "SubPile":
fun(su)
else:
l.append(unit.ID)
fun(unit)
arr=np.zeros([self.get_nz(), self.get_ny(), self.get_nx()]) #mask with 0 everywhere
for idx in l:
arr[self.get_units_domains_realizations(iu) == idx]=1 #set 1 where unit or sub-units are present
return arr
### others funs ###
def orientation_map(self, unit,
azi_top="gradient", dip_top="gradient",
azi_bot="gradient", dip_bot="gradient",
iu=0, smooth=2):
"""
Compute orientation maps for a given unit (azimuth and dip)
Parameters
----------
unit: :class:`Unit` object
unit object from which we want to have the orientation map
azi_top: string or float
method to use to infer azimuth of the top of the unit
azi_bot: string or float
method to use to infer azi of the bottom of the unit
The angles are then interpolated between top and bottom (linear interpolation)
dip_top: string or float
same as azimuth but for dip
dip_bot: string or float
same as azimuth but for dip
iu: int
unit realization index (0, 1, ..., Nu)
smooth: int
half-size windows for rolling mean
Returns
-------
2D ndarray
orientation map
"""
def azi_dip(s):
dy=-np.diff(s, axis=0)[:, 1: ]/sy
dx=-np.diff(s, axis=1)[1:,: ]/sx
e2=(0, 1) # principal direction (North)
ang=np.ones([dy.shape[0]*dy.shape[1]], dtype=np.float32)
i=-1
for ix, iy in zip(dx.flatten(), dy.flatten()):
i+= 1
v=(ix, iy)
if np.sqrt(v[0]**2+v[1]**2) == 0: #divide by 0
val=0
else:
val=(np.rad2deg(np.arccos(np.dot(e2, v)/np.sqrt(v[0]**2+v[1]**2))))
if v[0]<0:
val *= -1
ang[i]=val
dip=np.rad2deg(np.arctan(-dy/np.cos(np.deg2rad(ang).reshape(ny-1, nx-1))))
return ang.reshape(ny-1, nx-1), dip
def azi_dip_JS(s):
fy, fx = np.gradient(s, sy, sx)
fx = -fx
fy = -fy
azi = 180/np.pi *(np.pi/2 - np.arctan2(fy, fx))
dip = - 180/np.pi * (np.pi/2 - np.arcsin(1/np.sqrt(1+fx*fx+fy*fy)))
return azi, dip
xg=self.get_xgc()
yg=self.get_ygc()
zg=self.get_zgc()
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
if unit.get_h_level() == 1:
pile=self.get_pile_master()
else:
pile=unit.mummy_unit.SubPile
# smoothing
top =running_mean_2D(self.Geol.surfaces_by_piles[pile.name][iu, unit.order-1], N=smooth)
bot =running_mean_2D(self.Geol.surfaces_bot_by_piles[pile.name][iu, unit.order-1], N=smooth)
# compute angles for top and bottom
# top
if azi_top == "gradient" and dip_top == "gradient":
azi_top, dip_top = azi_dip_JS(top)
elif azi_top == "gradient" and dip_top != "gradient":
azi_top = azi_dip_JS(top)[0]
dip_top = np.ones([ny, nx])*dip_top
elif azi_top != "gradient" and dip_top == "gradient":
azi_top = np.ones([ny, nx])*azi_top
dip_top = azi_dip_JS(top)[1]
else:
azi_top=np.ones([ny, nx])*azi_top
dip_top=np.ones([ny, nx])*dip_top
# bottom
if azi_bot == "gradient" and dip_bot == "gradient":
azi_bot, dip_bot = azi_dip_JS(bot)
elif azi_bot == "gradient" and dip_bot != "gradient":
azi_bot = azi_dip_JS(bot)[0]
dip_bot = np.ones([ny, nx])*dip_bot
elif azi_bot != "gradient" and dip_bot == "gradient":
azi_bot = np.ones([ny, nx])*azi_bot
dip_bot = azi_dip_JS(bot)[1]
else:
azi_bot=np.ones([ny, nx])*azi_bot
dip_bot=np.ones([ny, nx])*dip_bot
azi=np.zeros([nz, ny, nx], dtype=np.float32)
mask=self.unit_mask(unit_name=unit.name, iu=iu)
for ix in range(nx):
for iy in range(ny): # TO FINISH
t=np.where(mask[:, iy, ix]) # retrieve idx to indicate where layer exist vertically at certain location x
if len(t[0])>0: # if layer exists at position ix iy
i2=np.min(t)
i1=np.max(t)
vbot=azi_bot[iy, ix]
vtop=azi_top[iy, ix]
if (np.abs(np.min((vbot, vtop)) - np.max((vbot, vtop)))) < (np.abs(360 + np.min((vbot, vtop)) - np.max((vbot, vtop)))):
azi[:, iy, ix]=np.interp(zg, [zg[i2], zg[i1]], [vbot, vtop])
else:
if vbot < vtop:
azi[:, iy, ix]=np.interp(zg, [zg[i2], zg[i1]], [vbot+360, vtop])
else:
azi[:, iy, ix]=np.interp(zg, [zg[i2], zg[i1]], [vbot, vtop+360])
azi[mask!=1]=0
dip=np.zeros([nz, ny, nx], dtype=np.float32)
for ix in range(nx):
for iy in range(ny):
t=np.where(mask[:, iy, ix]) # retrieve idx to indicate where layer exist vertically at certain location x
if len(t[0])>0: # if layer exists at position ix iy
i2=np.min(t)
i1=np.max(t)
dip[:, iy, ix]=np.interp(zg, [zg[i2], zg[i1]], [dip_bot[iy, ix], dip_top[iy, ix]])
dip[mask!=1]=0
return azi, dip
def extract_log_facies_bh(self, facies, bhx, bhy, bhz, depth):
"""
Extract the log facies in the ArchPy format at the specific location
Parameters
----------
facies: 3D ndarray
facies realization
bhx, bhy, bhz: float
borehole location
depth: float
depth of investigation
Returns
-------
log_facies: list of :class:`Facies` object
a log facies (list of facies object with elevations)
"""
bh_cell=self.coord2cell(bhx, bhy, bhz)
bh_bot_cell=self.coord2cell(bhx, bhy, bhz-depth)
if bh_bot_cell is not None:
bh_bot_cell=bh_bot_cell[0]
else:
bh_bot_cell=0
id_facies=facies[bh_bot_cell: bh_cell[0+1], bh_cell[1], bh_cell[2]][:: -1]
z_facies=self.zgc[bh_bot_cell: bh_cell[0]+1][:: -1]
i=0
log_facies=[]
fa_prev=0
for fa in id_facies:
if fa != fa_prev and fa != 0:
log_facies.append((self.get_facies_obj(ID =fa, type="ID"), np.round(z_facies[i], 2)))
fa_prev=fa
i += 1
return log_facies
def get_entropy(self, typ = "units", h_level = None, recompute=False):
"""
Compute the Shannon entropy for units or facies and return it.
Parameters
----------
typ: string
type of models to use to compute the entropy,
valid values are "units" and "facies"
h_level: int
hiearchical level to use to compute the entropy,
only used if typ is "units"
recompute: bool
if True, recompute the entropy
Returns
-------
3D ndarray of size (nz, ny, nx)
"""
if typ == "units":
string = "units_entropy"
elif typ== "facies":
string = "units_facies"
if hasattr(self.Geol, string) and recompute == False:
if recompute == "false":
if typ == "units":
return self.Geol.units_entropy
elif typ == "facies":
return self.Geol.facies_entropy
elif recompute or not hasattr(self.Geol, string):
nz = self.get_nz()
ny = self.get_ny()
nx = self.get_nx()
arr = np.zeros([nz, ny, nx])
if typ =="units":
units = self.get_units_domains_realizations(h_level = h_level)
list_units_ids = np.unique(units[units != 0])
data = units[:, self.mask]
SE = np.zeros([data.shape[1]])
#b=len(self.get_all_units())
b = len(list_units_ids)
nreal=units.shape[0]
for idx in list_units_ids:
unit = self.get_unit(ID=idx, type="ID")
Pi = (data == unit.ID).sum(0)/nreal
pi_mask = (Pi != 0)
SE[pi_mask] += Pi[pi_mask] * (np.log(Pi[pi_mask])/np.log(b))
arr[self.mask] =- SE
arr[~self.mask]=np.nan
self.Geol.units_entropy = arr
return arr
elif typ == "facies":
facies_domains = self.get_facies().reshape(-1, self.nz, self.ny, self.nx)
data = units[:, self.mask]
SE = np.zeros([data.shape[1]]) # shannon entropy
b = len(self.get_all_facies())
nreal=facies_domains.shape[0]
for facies in self.get_all_facies():
Pi = (data == facies.ID).sum(0)/nreal
pi_mask = (Pi != 0)
SE[pi_mask] += Pi[pi_mask] * (np.log(Pi[pi_mask])/np.log(b))
arr[self.mask] =- SE
arr[~self.mask]=np.nan
self.Geol.facies_entropy = arr
else:
print("Choose between units or facies")
def realizations_aggregation(self, method="basic",
depth=100, ignore_units=None,
units_to_fill=[],
n_iter = 50):
"""
Method to aggregate multiple ArchPy realizations into one for units and facies (TO DO)
Parameters
----------
method: str
method to use to aggregate the realizations
valid method are:
- basic, return a model with the most probable units/facies in each cells
- probas_prop, return a model constructed sequentially by ensuring that proportions are respected (at best...)
- mean_surfs, return a model created by meaning the surface elevation if units were simulated with categorical method, basic method is used for these units
depth: float
probas_prop parameter, maximum depth of investigation
to compute probas and proportions.
Should be around the median depth of the boreholes
ignore_units: list
probas_prop parameter, units name to ignore.
These units will not be aggregated in the final model.
units_to_fill: list
mean_surfs parameter, units name to fill with NN at the end.
Should not be used except to fill the top unit.
Returns
-------
3D ndarray of size (nz, ny, nx)
"""
if ignore_units is None:
ignore_units = []
ign_un = []
for un in ignore_units:
ign_un.append(un)
nz = self.get_nz()
ny = self.get_ny()
nx = self.get_nx()
if method == "basic":
units = self.get_units_domains_realizations()
most_prob = np.zeros((nz, ny, nx), dtype=np.int8)
for iz in range(self.get_nz()):
for iy in range(self.get_ny()):
for ix in range(self.get_nx()):
if self.mask[iz, iy, ix]:
occ = np.bincount(units[:, iz, iy, ix])
idx = np.where(occ==max(occ))
most_prob[iz, iy, ix] = idx[0][0]
return most_prob
elif method == "probas_prop":
## tirer les proportions de chaque units
d_prop_units = self.get_proportions(depth_min = 0, depth_max = depth, ignore_units = ign_un)
# create mask
inter = int(depth/self.get_sz())
mask_depth = np.zeros([nz, ny, nx], dtype=bool)
for iy in range(ny):
for ix in range(nx):
a = np.where(self.mask[:, iy, ix])
if len(a[0]) > 0:
v = int(max(a[0]) - min(a[0]))
if v > inter:
mask_depth[max(a[0])-inter:max(a[0]), iy, ix] = True
else:
mask_depth[min(a[0]):max(a[0]), iy, ix] = True
# compute probas
d_sorted = {k: v for k, v in sorted(d_prop_units.items(), key=lambda item: item[1])}
units=self.get_units_domains_realizations(all_data=True)
tot_cells = mask_depth.sum()
best_model = np.zeros([nz, ny, nx], dtype=np.int8)
for k,v in d_sorted.items():
# print(k)
# compute prop of unit
#d = self.get_proportions(depth_min = 0, depth_max = depth)
#v = d[k]
# compute proba unit
arr=np.zeros([self.nz, self.ny, self.nx])
# compute probabilities
arr += (units == self.get_unit(k).ID).sum(0)
arr /= units.shape[0]
# loop probas
for iv in np.arange(1, 0, -0.01):
prop = (arr[mask_depth] >= iv).sum() / mask_depth.sum()
# print(prop, v)
if prop > v:
break
# print(np.round(iv, 3))
mask_sim = (best_model==0) & (arr >= iv)
best_model[mask_sim] = mask_sim[mask_sim].astype(int) * self.get_unit(k).ID
#tot_cells -= mask_sim[mask_depth].sum() # remove attributed cells from total
# units[:, mask_sim] = 0 # remove simulated units
# ign_un.append(k)
mask = (best_model == 0) & (self.mask)
best_model[mask] = -99 # non filled units
## fill last cells with nearest neighbors
res = self.fill_ID(best_model, ID=-99)
return res
elif method == "mean_surfs":
best_model = np.zeros([nz, ny, nx], dtype=np.int8)
def fun(pile, bot=None, mask_unit=None):
if pile.nature == "surfaces":
mean_surfs = []
# units, get means surfaces
for un_p in pile.list_units:
mean_surfs.append(np.nanmean(self.get_surfaces_unit(un_p), 0))
for i in range(len(mean_surfs)):
un = pile.list_units[i]
s = mean_surfs[i]
if i == len(mean_surfs)-1:
s2 = bot
else:
s2 = mean_surfs[i+1]
a = self.compute_domain(s, s2)
best_model[a] = un.ID * self.mask[a]
elif pile.nature == "3d_categorical":
ids = [u.ID for u in pile.list_units]
h_lev = pile.list_units[0].get_h_level()
units = self.get_units_domains_realizations(h_level=h_lev)
units = units[:, mask_unit]
l = []
# get most probable units by cells
for i in range(units.shape[1]):
a = np.bincount(units[:, i])
idxs= np.where(a == max(a))[0]
if len(idxs)> 1:
idx = np.random.choice(idxs)
elif len(idxs) == 1:
idx = idxs[0]
else:
idx = 0
pass
l.append(idx)
l = np.array(l)
best_model[mask_unit] = l # assign
# nearest neighbor ?
for i in range(len(pile.list_units)):
un_p = pile.list_units[i]
if un_p.SubPile is not None:
if pile.nature == "surface":
bot = mean_surfs[i+1] # get bottom of un_p
else:
bot = None
mask = (best_model == un_p.ID)
fun(un_p.SubPile, bot, mask)
fun(self.get_pile_master(), self.bot, self.mask)
ids = [self.get_unit(u).ID for u in units_to_fill]
for iv in ids + [0]:
best_model[(best_model==iv) & (self.mask)] = -99
best_model = self.fill_ID(best_model, -99)
return best_model
elif method == "MDS_errors": # a discuter avec Philippe
from sklearn.manifold import MDS
# matrix of distances between simulations
M = np.zeros([self.nreal_units, self.nreal_units])
for ireal in range(len(self.Geol.units_domains)):
for oreal in range(ireal):
s1 = self.Geol.units_domains[ireal]
s2 = self.Geol.units_domains[oreal]
M[ireal, oreal] = np.sum(s1 != s2)
M[oreal, ireal] = M[ireal, oreal]
# mds = MDS(random_state=None)
# M_transform = mds.fit_transform(M)
# from sklearn.cluster import k_means
# centr, code, jsp = k_means(M_transform, 1)
# # plt.scatter(centr[:, 0], centr[:, 1], c="r", s=100, marker="x")
# # plt.scatter(M_transform[:, 0], M_transform[:, 1], c=code)
# dist_sim = np.sqrt((M_transform[:, 0] - centr[0][0])**2 + (M_transform[:, 1] - centr[0][1])**2)
# idx = np.where(dist_sim==min(dist_sim))[0][0]
# loop to find the most representative simulation
l = []
for i in range(n_iter):
mds = MDS(random_state=None)
M_transform = mds.fit_transform(M)
from sklearn.cluster import k_means
centr, code, jsp = k_means(M_transform, 1)
dist_sim = np.sqrt((M_transform[:, 0] - centr[0][0])**2 + (M_transform[:, 1] - centr[0][1])**2)
idx = np.where(dist_sim==min(dist_sim))[0][0]
l.append(idx)
l = np.array(l)
res = np.bincount(l)
idx = np.where(np.bincount(l) == max(np.bincount(l)))[0][0]
best_model = self.Geol.units_domains[idx]
return best_model
else:
print("help")
### plotting ###
def plot_bhs(self, log="strati", plotter=None, v_ex=1, plot_top=False, plot_bot=False):
"""
Plot the boreholes of the Arch_table project.
Parameters
----------
log: string
which log to plot --> strati or facies
plotter: pyvista plotter
v_ex: float
vertical exaggeration
plot_top: bool
if the top of the simulation domain must be plotted
plot_bot: bool
if the bot of the simulation domain must be plotted
"""
z0=self.get_oz()
def lines_from_points(points):
"""Given an array of points, make a line set"""
poly=pv.PolyData()
poly.points=points
cells=np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1]=np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2]=np.arange(1, len(points), dtype=np.int_)
poly.lines=cells
return poly
if plotter is None:
p=pv.Plotter()
else:
p=plotter
if log == "strati":
for bh in self.list_bhs:
if bh.log_strati is not None:
for i in range(len(bh.log_strati)):
l=[]
st=bh.log_strati[i][0]
l.append(bh.log_strati[i][1])
if i < len(bh.log_strati)-1:
l.append(bh.log_strati[i+1][1])
if i == len(bh.log_strati)-1:
l.append(bh.z-bh.depth)
pts=np.array([np.ones([len(l)])*bh.x, np.ones([len(l)])*bh.y, l]).T
line=lines_from_points(pts)
line.points[:, -1]=(line.points[:, -1] - z0)*v_ex+z0
if st is not None:
color=st.c
opacity=1
else:
color="white"
opacity=0
p.add_mesh(line, color=color, interpolate_before_map=True, render_lines_as_tubes=True, line_width=15, opacity=opacity)
elif log == "facies":
for bh in self.list_bhs:
if bh.log_facies is not None:
for i in range(len(bh.log_facies)):
l=[]
st=bh.log_facies[i][0]
l.append(bh.log_facies[i][1])
if i < len(bh.log_facies)-1:
l.append(bh.log_facies[i+1][1])
if i == len(bh.log_facies)-1:
l.append(bh.z-bh.depth)
pts=np.array([np.ones([len(l)])*bh.x, np.ones([len(l)])*bh.y, l]).T
line=lines_from_points(pts)
line.points[:, -1]=(line.points[:, -1] - z0)*v_ex+z0
if st is not None:
color=st.c
opacity=1
else:
color="white"
opacity=0
p.add_mesh(line, color=color, interpolate_before_map=True, render_lines_as_tubes=True, line_width=15, opacity=opacity)
if plot_top:
X, Y=np.meshgrid(self.get_xgc(), self.get_ygc())
grid=pv.StructuredGrid(X, Y, (self.top-z0)*v_ex+z0)
p.add_mesh(grid, opacity=0.8, color="white")
if plot_bot:
X, Y=np.meshgrid(self.get_xgc(), self.get_ygc())
grid=pv.StructuredGrid(X, Y, (self.bot-z0)*v_ex+z0)
p.add_mesh(grid, opacity=0.8, color="red")
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
def plot_geol_map(self, plotter=None, v_ex=1, up=0):
nx = self.get_nx()
ny = self.get_ny()
# plot geol
X,Y = np.meshgrid(self.xgc, self.ygc)
grid = pv.StructuredGrid(X, Y, v_ex*(self.top - self.oz)+self.oz + up)
geol_map = self.geol_map
arr = np.ones([ny, nx, 3])*np.nan
for iy in range(geol_map.shape[0]):
for ix in range(geol_map.shape[1]):
unit = self.get_unit(ID=geol_map[iy, ix], type="ID", vb=0)
if unit is not None:
arr[iy, ix] = matplotlib.colors.to_rgb(unit.c)
if plotter is None:
p = pv.Plotter()
p.add_mesh(grid,"red", scalars=arr.reshape((nx*ny, 3),order="F"), opacity=.5, rgb=True)
p.add_bounding_box()
p.show()
else:
plotter.add_mesh(grid,"red",scalars=arr.reshape((nx*ny, 3),order="F"), opacity=.5, rgb=True)
def get_units_domains_realizations(self, iu=None, all_data=True, fill="ID", h_level="all"):
"""
Return a numpy array of 1 or all units realization(s).
iu: int
simulation to return
all_data: bool
return all the units simulations,
in that case, iu is ignored
fill: string
ID or color are possible, to return
realizations with unit ID or RGBA color
(for plotting purpose e.g. with plt.imshow)
h_level: string or int
hierarchical level to plot.
A value of 1 indicates that only unit of the
master pile will be plotted. "all" to plot all possible units
"""
# if self.Geol.units_domains is None:
# raise ValueError("Units have not been computed yet")
if self.write_results:
if "ud" not in [i.split(".")[-1] for i in os.listdir(self.ws)]:
raise ValueError("Units have not been computed yet")
else:
if self.Geol.units_domains is None:
raise ValueError("Units have not been computed yet")
if isinstance(iu, int):
all_data=False
nreal=self.nreal_units
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
if all_data:
ud=np.zeros([nreal, nz, ny, nx], dtype=np.int8)
if self.write_results:
# get all real
for ireal in range(nreal):
fname=self.name+"_{}.ud".format(ireal)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
ud[ireal]=pickle.load(f)
else:
ud=self.Geol.units_domains.copy()
if fill == "ID":
units_domains=ud
if h_level == "all":
pass
elif isinstance(h_level, int) and h_level > 0:
lst_ID=np.unique(units_domains)
for idx in lst_ID:
if idx != 0:
s=self.get_unit(ID=idx, type="ID")
h_lev=s.get_h_level() # hierarchical level of unit
if h_lev > h_level: # compare unit level with level to plot
for i in range(h_lev - h_level):
s=s.mummy_unit
if s is None:
raise ValueError("Error: parent unit return is None, hierarchy relations are inconsistent with Pile and simulations")
units_domains[units_domains == idx]=s.ID # change ID values to Mummy ID
elif fill == "color":
units_domains=np.zeros([nreal, nz, ny, nx, 4], dtype=np.float32)
for unit in self.get_all_units():
if unit.f_method != "Subpile":
mask=(ud == unit.ID)
units_domains[mask,: ]=matplotlib.colors.to_rgba(unit.c)
else:
if self.write_results:
fname=self.name+"_{}.ud".format(iu)
fpath=os.path.join(self.ws, fname)
with open(fpath, "rb") as f:
ud=pickle.load(f)
else:
ud=self.Geol.units_domains.copy()[iu]
if fill == "ID":
units_domains=ud
if h_level == "all":
pass
elif isinstance(h_level, int) and h_level > 0:
lst_ID=np.unique(units_domains)
for idx in lst_ID:
if idx != 0:
s=self.get_unit(ID=idx, type="ID")
h_lev=s.get_h_level() # hierarchical level of unit
if h_lev > h_level: # compare unit level with level to plot
for i in range(h_lev - h_level):
s=s.mummy_unit
if s is None:
raise ValueError("Error: parent unit return is None, hierarchy relations are inconsistent with Pile and simulations")
units_domains[units_domains == idx]=s.ID # change ID values to Mummy ID
elif fill == "color":
nz, ny, nx=ud.shape
units_domains=np.zeros([nz, ny, nx, 4])
for unit in self.get_all_units():
if unit.f_method != "Subpile":
mask=(ud == unit.ID)
units_domains[mask,: ]=matplotlib.colors.to_rgba(unit.c)
return units_domains
def plot_units(self, iu=0, v_ex=1, plotter=None, h_level="all", slicex=None, slicey=None, slicez=None,
excludedVal=None,
scalar_bar_kwargs=None, show_scalar_bar=True, **kwargs):
"""
plot units domain for a specific realization iu
Parameters
----------
iu: int
unit index to plot
v_ex: float
vertical exageration
plotter: pyvista.Plotter
pyvista.Plotter object to plot on
h_level: string or int
hierarchical level to plot.
A value of 1 indicates that only unit of the
master pile will be plotted. "all" to plot all possible units
slicex: float or sequence of floats
fraction of x axis where to slice
slicey: float or sequence of floats
fraction of y axis where to slice
slicez: float or sequence of floats
fraction of z axis where to slice
scalar_bar_kwargs: dict
kwargs to pass to the scalar bar
show_scalar_bar: bool
show scalar bar or not
kwargs:dict
kwargs to pass to geone.imgplot.drawImage3D_slice or geone.imgplot.drawImage3D_surface
"""
#ensure hierarchy_relations have been set
self.hierarchy_relations(vb=0)
colors=[]
d={}
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
stratis_domain=self.get_units_domains_realizations(iu=iu, fill="ID", h_level=h_level).astype(np.float32)
lst_ID=np.unique(stratis_domain)
new_lst_ID = []
if excludedVal is None:
excludedVal=[]
if isinstance(excludedVal, int):
excludedVal=[excludedVal]
## change values
new_id=1
for i in lst_ID:
if i != 0:
if i not in excludedVal:
s=self.get_unit(ID=i, type="ID")
# stratis_domain[stratis_domain == i]=new_id
colors.append(s.c)
d[new_id - 0.5]=s.name
new_id += 1
new_lst_ID.append(i)
#plot
stratis_domain[stratis_domain==0]=np.nan # remove where no formations are present
if plotter is None:
p=pv.Plotter()
else:
p=plotter
im=geone.img.Img(nx, ny, nz, sx, sy, sz*v_ex, x0, y0, z0, nv=1, val=stratis_domain, varname="Units")
if slicex is not None:
slicex=np.array(slicex)
slicex[slicex<0]=0
slicex[slicex>1]=1
cx=im.ox + slicex * im.nx * im.sx # center along x
else:
cx=None
if slicey is not None:
slicey=np.array(slicey)
slicey[slicey<0]=0
slicey[slicey>1]=1
cy=im.oy + slicey * im.ny * im.sy # center along x
else:
cy=None
if slicez is not None:
slicez=np.array(slicez)
slicez[slicez<0]=0
slicez[slicez>1]=1
cz=im.oz + slicez * im.nz * im.sz # center along x
else:
cz=None
if slicex is not None or slicey is not None or slicez is not None:
imgplt3.drawImage3D_slice(im, plotter=p, slice_normal_x=cx, slice_normal_y=cy, slice_normal_z=cz,
categ=True, categVal=new_lst_ID, categCol=colors, scalar_bar_annotations=d,
scalar_bar_kwargs=scalar_bar_kwargs, show_scalar_bar = show_scalar_bar, **kwargs)
else:
imgplt3.drawImage3D_surface(im, plotter=p, categ=True, categVal=new_lst_ID, categCol=colors, scalar_bar_annotations=d,
scalar_bar_kwargs=scalar_bar_kwargs, show_scalar_bar = show_scalar_bar, **kwargs)
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
def plot_proba(self, obj, v_ex=1, plotter=None, filtering_interval=[0.01, 1.00],
slicex=None, slicey=None, slicez=None,
scalar_bar_kwargs=None, excludedVal=None, **kwargs):
"""
Plot the probability of occurence of a specific unit or facies
(can be passed by a name or the object directly)
Parameters
----------
obj: :class:`Unit` or :class:`Facies` or str
unit or facies object or string name of the unit/facies
to plot the probability of occurence
v_ex: float
vertical exageration, default is 1
plotter: pyvista.Plotter
pyvista.Plotter object to plot on
slicex: float or sequence of floats
fraction of x axis where to slice
slicey: float or sequence of floats
fraction of y axis where to slice
slicez: float or sequence of floats
fraction of z axis where to slice
scalar_bar_kwargs: dict
kwargs to pass to the scalar bar
excludedVal: float or sequence of floats
values to exclude from the plot
kwargs:dict
kwargs to pass to geone.imgplot.drawImage3D_slice or geone.imgplot.drawImage3D_surface
"""
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
if isinstance(obj, str):
if self.get_unit(obj, vb=0) is not None:
u=self.get_unit(obj)
i=u.ID
hl=u.get_h_level()
typ="unit"
elif self.get_facies_obj(obj, vb=0) is not None:
fa=self.get_facies_obj(obj)
i=fa.ID
typ="facies"
elif isinstance(obj, Unit):
i=obj.ID
hl=obj.get_h_level()
typ="unit"
elif isinstance(obj, Facies):
i=obj.ID
typ="facies"
else:
raise ValueError ("unit/facies must be a string name of a unit or a unit object that is contained inside the Master Pile")
if typ == "unit":
arr=np.zeros([self.nz, self.ny, self.nx])
# compute probabilities
for iu in range(self.nreal_units):
units=self.get_units_domains_realizations(iu=iu, h_level=hl, all_data=False)
arr+=(units == i)
arr/=self.nreal_units
elif typ == "facies":
arr=np.zeros([self.nz, self.ny, self.nx])
# compute probabilities
for iu in range(self.nreal_units):
for ifa in range(self.nreal_fa):
facies=self.get_facies(iu=iu, ifa=ifa, all_data=False)
arr+=(facies == i)
arr/=(self.nreal_units*self.nreal_fa)
im=geone.img.Img(nx, ny, nz, sx, sy, sz*v_ex, x0, y0, z0, nv=1, val=arr, varname="P [-]") #create img object
#create slices
if slicex is not None:
slicex=np.array(slicex)
slicex[slicex<0]=0
slicex[slicex>1]=1
cx=im.ox + slicex * im.nx * im.sx # center along x
else:
cx=None
if slicey is not None:
slicey=np.array(slicey)
slicey[slicey<0]=0
slicey[slicey>1]=1
cy=im.oy + slicey * im.ny * im.sy # center along y
else:
cy=None
if slicez is not None:
slicez=np.array(slicez)
slicez[slicez<0]=0
slicez[slicez>1]=1
cz=im.oz + slicez * im.nz * im.sz # center along z
else:
cz=None
if arr.any(): #if values are found
# filter values
if filtering_interval is not None:
if isinstance(filtering_interval, list):
if len(filtering_interval) == 2:
if filtering_interval[0] < filtering_interval[1]:
arr[arr < filtering_interval[0]] = np.nan
arr[arr > filtering_interval[1]] = np.nan
else:
print("filtering_interval[0] must be smaller than filtering_interval[1]")
else:
print("filtering_interval must be a list of two values")
else:
print("filtering_interval must be a list of two values")
if plotter is None:
p=pv.Plotter()
else:
p=plotter
if slicex is not None or slicey is not None or slicez is not None:
imgplt3.drawImage3D_slice(im, plotter=p, slice_normal_x=cx, slice_normal_y=cy, slice_normal_z=cz, scalar_bar_kwargs=scalar_bar_kwargs, excludedVal=excludedVal, **kwargs)
else:
imgplt3.drawImage3D_surface(im, plotter=p, scalar_bar_kwargs=scalar_bar_kwargs, excludedVal=excludedVal, **kwargs)
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
else:
print("No values found for this unit")
def plot_facies(self, iu=0, ifa=0, v_ex=1, inside_units=None, excludedVal=None,
plotter=None, slicex=None, slicey=None, slicez=None,
scalar_bar_kwargs=None, show_scalar_bar=True, **kwargs):
"""
Plot the facies realizations over the domain with the colors attributed to facies
Parameters
----------
iu: int
indice of units realization
ifa: int
indice of facies realziation
v_ex: int or float
vertical exageration
inside_units: array-like of :class:`Unit` objects
list of units inside which we want to have the plot.
if None --> all
plotter: pyvista plotter if wanted
slicex: float or sequence of floats
fraction of x axis where to slice
slicey: float or sequence of floats
fraction of y axis where to slice
slicez: float or sequence of floats
fraction of z axis where to slice
scalar_bar_kwargs: dict
kwargs for the scalar bar
show_scalar_bar: bool
if True, show the scalar bar
kwargs:dict
kwargs to pass to geone.imgplot.drawImage3D_slice or geone.imgplot.drawImage3D_surface
"""
fa_domains=self.get_facies(iu, ifa, all_data=False).astype(np.float32)
#keep facies in only wanted units
if inside_units is not None:
mask_all=np.zeros([self.get_nz(), self.get_ny(), self.get_nx()])
for u in inside_units:
if isinstance(u, str):
mask=self.unit_mask(u, iu= iu)
elif isinstance(u, Unit) and u in self.get_all_units():
mask=self.unit_mask(u.name, iu= iu)
else:
raise ValueError ("Unit passed in inside_units must be a unit name or a unit object contained in the ArchTable")
mask_all[mask == 1]=1
fa_domains[mask_all != 1]=0
d={}
colors=[]
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
lst_ID=np.unique(fa_domains)
new_lst_ID = []
if excludedVal is None:
excludedVal=[]
if isinstance(excludedVal, int):
excludedVal=[excludedVal]
## create a dictionnary to map the facies ID to the facies name
new_id=1
for i in lst_ID:
if i != 0:
if i not in excludedVal:
fa=self.get_facies_obj(ID=i, type="ID")
# fa_domains[fa_domains == fa.ID]=new_id
colors.append(fa.c)
d[new_id + 0.5]=fa.name
new_id += 1
new_lst_ID.append(i)
#remove 0 occurence (where no facies are present)
fa_domains[fa_domains==0]=np.nan
if plotter is None:
p=pv.Plotter()
else:
p=plotter
im=geone.img.Img(nx, ny, nz, sx, sy, sz*v_ex, x0, y0, z0, nv=1, val=fa_domains, varname="Lithologies")
if slicex is not None:
slicex=np.array(slicex)
slicex[slicex<0]=0
slicex[slicex>1]=1
cx=im.ox + slicex * im.nx * im.sx # center along x
else:
cx=None
if slicey is not None:
slicey=np.array(slicey)
slicey[slicey<0]=0
slicey[slicey>1]=1
cy=im.oy + slicey * im.ny * im.sy # center along y
else:
cy=None
if slicez is not None:
slicez=np.array(slicez)
slicez[slicez<0]=0
slicez[slicez>1]=1
cz=im.oz + slicez * im.nz * im.sz # center along z
else:
cz=None
if slicex is not None or slicey is not None or slicez is not None:
imgplt3.drawImage3D_slice(im, plotter=p, slice_normal_x=cx, slice_normal_y=cy, slice_normal_z=cz,
categ=True, categVal=new_lst_ID, excludedVal=excludedVal,
categCol=colors, scalar_bar_annotations=d,
scalar_bar_kwargs=scalar_bar_kwargs, show_scalar_bar = show_scalar_bar, **kwargs)
else:
imgplt3.drawImage3D_surface(im, plotter=p, categ=True, categVal=new_lst_ID, excludedVal=excludedVal,
categCol=colors, scalar_bar_annotations=d,
scalar_bar_kwargs=scalar_bar_kwargs, show_scalar_bar = show_scalar_bar, **kwargs)
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
def plot_prop(self, property, iu=0, ifa=0, ip=0, v_ex=1, inside_units=None, inside_facies=None, filtering_interval=None,
plotter=None, slicex=None, slicey=None, slicez=None, cmin=None, cmax=None, scalar_bar_kwargs=None, **kwargs):
"""
Plot the facies realizations over the domain with the colors attributed to facies
Parameters
----------
iu:int
indice of units realization
ifa:int
indice of facies realization
ip:int
indice of property realization
v_ex:int or float
vertical exageration
inside_units:array-like of :class:`Unit` objects or unit names (string)
list of units inside which we want to have the plot. By default all
inside_facies:array-like of :class:`Facies` objects or facies names (string)
list of facies inside which we want to have the plot. By default all
plotter:pyvista plotter if wanted
slicex, slicey, slicez: float or sequence of floats
fraction in x, y or z direction where the slice is done
cmin, cmax:float
min and max value of the colorbar
scalar_bar_kwargs:dict
kwargs for the scalar bar
kwargs:dict
kwargs to pass to geone.imgplot.drawImage3D_slice or geone.imgplot.drawImage3D_surface
"""
prop=self.getprop(property, iu, ifa, ip, all_data=False)
facies=self.get_facies(iu, ifa, all_data=False)
#keep values in only wanted units
if inside_units is not None:
mask_all=np.zeros([self.get_nz(), self.get_ny(), self.get_nx()])
for u in inside_units:
if isinstance(u, str):
mask=self.unit_mask(u, iu=iu)
elif isinstance(u, Unit) and u in self.get_all_units():
mask=self.unit_mask(u.name, iu=iu)
else:
raise ValueError ("Unit passed in inside_units must be a unit name or a unit object contained in the ArchTable")
mask_all[mask == 1]=1
prop[mask_all != 1]=np.nan
#keep values in only wanted facies
if inside_facies is not None:
mask_all=np.zeros([self.get_nz(), self.get_ny(), self.get_nx()])
for fa in inside_facies:
if isinstance(fa, str):
mask=(facies == self.get_facies_obj(fa).ID)
elif isinstance(fa, Facies) and fa in self.get_all_facies():
mask=(facies == fa.ID)
else:
raise ValueError ("Unit passed in inside_units must be a unit name or a unit object contained in the ArchTable")
mask_all[mask == 1]=1
prop[mask_all != 1]=np.nan
if ~prop.any():
raise ValueError ("Error: No values found")
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
# filter values
if filtering_interval is not None:
if isinstance(filtering_interval, list):
if len(filtering_interval) == 2:
if filtering_interval[0] < filtering_interval[1]:
prop[prop < filtering_interval[0]] = np.nan
prop[prop > filtering_interval[1]] = np.nan
else:
print("filtering_interval[0] must be smaller than filtering_interval[1]")
else:
print("filtering_interval must be a list of two values")
else:
print("filtering_interval must be a list of two values")
if plotter is None:
p=pv.Plotter()
else:
p=plotter
im=geone.img.Img(nx, ny, nz, sx, sy, sz*v_ex, x0, y0, z0, nv=1, val=prop, varname=property)
if slicex is not None:
slicex=np.array(slicex)
slicex[slicex<0]=0
slicex[slicex>1]=1
cx=im.ox + slicex * im.nx * im.sx # center along x
else:
cx=None
if slicey is not None:
slicey=np.array(slicey)
slicey[slicey<0]=0
slicey[slicey>1]=1
cy=im.oy + slicey * im.ny * im.sy # center along y
else:
cy=None
if slicez is not None:
slicez=np.array(slicez)
slicez[slicez<0]=0
slicez[slicez>1]=1
cz=im.oz + slicez * im.nz * im.sz # center along z
else:
cz=None
if slicex is not None or slicey is not None or slicez is not None:
imgplt3.drawImage3D_slice(im, plotter=p, slice_normal_x=cx, slice_normal_y=cy, slice_normal_z=cz, cmin=cmin, cmax=cmax,
scalar_bar_kwargs=scalar_bar_kwargs, **kwargs)
else:
imgplt3.drawImage3D_surface(im, plotter=p, cmin=cmin, cmax=cmax,
scalar_bar_kwargs=scalar_bar_kwargs, **kwargs)
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
def plot_mean_prop(self, property, type="arithmetic", v_ex=1, inside_units=None, inside_facies=None, filtering_interval=None,
plotter=None, slicex=None, slicey=None, slicez=None, cmin=None, cmax=None, scalar_bar_kwargs=None, **kwargs):
#TO DO --> to optimize
"""
Function that plots the arithmetic mean of a property at every cells
of the simulation domain given all the property simulations
Parameters
----------
property: string
name of the property to plot
type: string
type of mean to plot:
- "arithmetic" for arithmetic mean
- "std" for standard deviation
- "median" for median value
v_ex: float
vertical exaggeration
inside_units: list of string or :class:`Unit` objects
list of units to consider for the mean
"std" for standard deviation, "median" for median value
inside_facies: list of string or :class:`Facies` objects
list of facies to consider for the mean
filtering_interval: list of two floats
interval of values to keep
plotter: pyvista.Plotter
pyvista plotter to plot on
slicex: float or sequence of floats
fraction of the x axis to plot
slicey: float or sequence of floats
fraction of the y axis to plot
slicez: float or sequence of floats
fraction of the z axis to plot
cmin: float
minimum value for the colorbar
cmax: float
maximum value for the colorbar
scalar_bar_kwargs: dict
dictionary of arguments to pass to the pyvista scalar bar
"""
#load property array and facies array
prop=self.getprop(property) # to modify
prop_shape=prop.shape
facies=self.get_facies()
facies_shape=facies.shape
#keep values in only wanted units
if inside_units is not None:
nreal_units=self.nreal_units # number of units real
mask_all=np.zeros(prop_shape)
for iu in range(nreal_units):
for u in inside_units:
if isinstance(u, str):
mask=self.unit_mask(u, iu=iu)
elif isinstance(u, Unit) and u in self.get_all_units():
mask=self.unit_mask(u.name, iu=iu)
else:
raise ValueError ("Unit passed in inside_units must be a unit name or a unit object contained in the ArchTable")
mask_all[iu,:,:,mask == 1]=1
prop[mask_all != 1]=np.nan
#keep values in only wanted facies
if inside_facies is not None:
mask_all=np.zeros(prop_shape)
for iu in range(nreal_units):
for ifa in range(self.nreal_fa):
for fa in inside_facies:
if isinstance(fa, str):
mask=(facies[iu,ifa] == self.get_facies_obj(fa).ID)
elif isinstance(fa, Facies) and fa in self.get_all_facies():
mask=(facies[iu,ifa] == fa.ID)
else:
raise ValueError ("Unit passed in inside_units must be a unit name or a unit object contained in the ArchTable")
mask_all[iu,ifa,:,mask == 1]=1
prop[mask_all != 1]=np.nan
if ~prop.any():
raise ValueError ("Error: No values found")
if type == "arithmetic":
arr=np.nanmean(prop.reshape(-1,self.get_nz(),self.get_ny(),self.get_nx()),axis=0)
elif type == "std":
arr=np.nanstd(prop.reshape(-1,self.get_nz(),self.get_ny(),self.get_nx()),axis=0)
elif type == "median":
arr=np.nanmedian(prop.reshape(-1,self.get_nz(),self.get_ny(),self.get_nx()),axis=0)
self.plot_arr(arr,property,v_ex=v_ex, plotter=plotter, slicex=slicex, slicey=slicey, slicez=slicez,
cmin=cmin, cmax=cmax, scalar_bar_kwargs=scalar_bar_kwargs, **kwargs)
def plot_arr(self,arr,var_name ="V0",v_ex=1, plotter=None, slicex=None, slicey=None, slicez=None, filtering_interval=None,
cmin=None, cmax=None, scalar_bar_kwargs=None, **kwargs):
"""
This function plot a 3D array with the same size of the simulation domain
Parameters
----------
arr: 3D array
array to plot. Size of the simulation domain (nx, ny, nz)
var_name: str
variable names to plot and to show in the pyvista plot
v_ex: float
vertical exaggeration
plotter: pyvista.Plotter
pyvista plotter to plot on
slicex: float or sequence of floats
fraction of the x axis to plot
slicey: float or sequence of floats
fraction of the y axis to plot
slicez: float or sequence of floats
fraction of the z axis to plot
filtering_interval: list of two floats
interval of values to keep
cmin: float
minimum value for the colorbar
cmax: float
maximum value for the colorbar
scalar_bar_kwargs: dict
dictionary of arguments to pass to the pyvista scalar bar
"""
nx=self.get_nx()
ny=self.get_ny()
nz=self.get_nz()
sx=self.get_sx()
sy=self.get_sy()
sz=self.get_sz()
x0=self.get_ox()
y0=self.get_oy()
z0=self.get_oz()
assert arr.shape == (nz, ny, nx), "Invalid shape for array, must be equal to {}".format(nz, ny, nx)
if plotter is None:
p=pv.Plotter()
else:
p=plotter
im=geone.img.Img(nx, ny, nz, sx, sy, sz*v_ex, x0, y0, z0, nv=1, val=arr, varname=var_name)
if slicex is not None:
slicex=np.array(slicex)
slicex[slicex<0]=0
slicex[slicex>1]=1
cx=im.ox + slicex * im.nx * im.sx # center along x
else:
cx=None
if slicey is not None:
slicey=np.array(slicey)
slicey[slicey<0]=0
slicey[slicey>1]=1
cy=im.oy + slicey * im.ny * im.sy # center along y
else:
cy=None
if slicez is not None:
slicez=np.array(slicez)
slicez[slicez<0]=0
slicez[slicez>1]=1
cz=im.oz + slicez * im.nz * im.sz # center along z
else:
cz=None
# filter values
if filtering_interval is not None:
if isinstance(filtering_interval, list):
if len(filtering_interval) == 2:
if filtering_interval[0] < filtering_interval[1]:
arr[arr < filtering_interval[0]] = np.nan
arr[arr > filtering_interval[1]] = np.nan
else:
print("filtering_interval[0] must be smaller than filtering_interval[1]")
else:
print("filtering_interval must be a list of two values")
else:
print("filtering_interval must be a list of two values")
if slicex is not None or slicey is not None or slicez is not None:
imgplt3.drawImage3D_slice(im, plotter=p, slice_normal_x=cx, slice_normal_y=cy, slice_normal_z=cz, cmin=cmin, cmax=cmax
, scalar_bar_kwargs=scalar_bar_kwargs, **kwargs)
else:
imgplt3.drawImage3D_surface(im, plotter=p, cmin=cmin, cmax=cmax,
scalar_bar_kwargs=scalar_bar_kwargs, **kwargs)
if plotter is None:
p.add_bounding_box()
p.show_axes()
p.show()
#cross sections
def draw_cross_section(self, background="units", iu=0, ifa=0):
extent = [self.get_ox(), self.get_xg()[-1], self.get_oy(), self.get_yg()[-1]]
if background == "units": # add option to pass rasters
back_map = self.compute_geol_map(0, color = True)
plt.imshow(back_map, origin="lower", extent=extent)
else:
pass
p_list = plt.ginput(n=-1, timeout=0)
plt.close()
## draw cross-section position
plt.imshow(back_map, origin="lower", extent=extent)
plt.plot([i[0] for i in p_list], [i[1] for i in p_list], c="red")
plt.show()
return p_list
def cross_section(self, arr_to_plot, p_list, esp=None):
"""
Return a cross section along the points pass in p_list
Parameters
----------
arr_to_plot: 3D or 4D array of dimension nz, ny, nx(, 4)
array of which we want a cross section.
This array will be considered being part of the ArchPy simulation domain.
p_list: sequence of tuple
list or array of tuple containing x and y coordinates
(e.g. p_list=[(100, 200), (300, 200)] --> draw a cross section between these two points)
esp: float
spacing to use when sampling the array along the cross section
Returns
-------
2D array
cross section ready to plot
"""
ox=self.get_ox()
oy=self.get_oy()
sx=self.get_sx()
sy=self.get_sy()
if esp is None:
esp=(self.get_sx()**2+self.get_sy()**2)**0.5
dist_tot=0
for ip in range(len(p_list)-1): # loop over points
p1=np.array(p_list[ip])
p2=np.array(p_list[ip+1])
d1, d2=p2-p1
dist=np.sqrt(d1**2 + d2**2)
lam=esp/dist
x_d, y_d=p1 # starting point
f=arr_to_plot.copy()
no_color=False
if len(f.shape) == 4:
no_color=False
x_sec_i=np.zeros([f.shape[0], int(dist/esp)+1, 4])
if ip == 0:
x_sec=np.zeros([f.shape[0], int(dist/esp)+1, 4])
elif len(f.shape) == 3:
no_color=True
x_sec_i=np.zeros([f.shape[0],int(dist/esp)+1])
if ip == 0:
x_sec=np.zeros([f.shape[0],int(dist/esp)+1])
if no_color:
i=0
for o in np.arange(0,dist,esp):
x_d += d1*lam
y_d += d2*lam
ix=int((x_d - ox)/sx)
iy=int((y_d - oy)/sy)
fp=f[:,iy,ix]
if ip == 0:
x_sec[:,i]=fp
else:
x_sec_i[:,i]=fp
i += 1
else:
i=0
for o in np.arange(0,dist,esp):
x_d += d1*lam
y_d += d2*lam
ix=int((x_d - ox)/sx)
iy=int((y_d - oy)/sy)
fp=f[:,iy,ix]
if ip == 0:
x_sec[:,i,: ]=fp
else:
x_sec_i[:,i,: ]=fp
i += 1
dist_tot += dist
#append xsections
if ip > 0:
x_sec=np.concatenate((x_sec, x_sec_i), axis=1)
return x_sec, dist_tot
### BUG TO CORRECT
"""
Boreholes appear multiple times on the cross-section, have to think about that
"""
def plot_cross_section(self, p_list, typ="units", arr=None, iu=0, ifa=0, ip=0,
property=None, esp=None, ax=None, colorbar=False,
ratio_aspect=2, i=0,
dist_max = 100, width=.5,
vmax=None, vmin=None):
"""
Plot a cross section along the points given in
p_list with a spacing defined (esp)
Parameters
----------
p_list : list or array of tuple containing
x and y coordinates
(e.g. p_list=[(100, 200), (300, 200)]
--> draw a cross section between these two points)
typ: string
units, facies or a property name
iu: int
units index realization
ifa: int
facies index realization
ip: int
property index realization
property: str
property name that have been computed
esp: float
spacing to use when sampling the array along the cross section
ax: matplotlib axes on which to plot
ratio_aspect: float
ratio between y and x axis to adjust vertical exaggeration
"""
def plot_bh(bh, x=None, width=width, typ="units"):
if typ == "units":
if bh.log_strati is not None:
if x is None:
ix = bh.x
else:
ix = x
i = -1
for i in range(len(bh.log_strati)-1):
s = bh.log_strati[i][1]
unit = bh.log_strati[i][0]
if i < len(bh.log_strati):
s2 = bh.log_strati[i+1][1]
if unit is not None:
plt.bar(ix, s - s2, bottom=s2, color=unit.c, alpha=1, edgecolor = 'black', width=width)
s = bh.log_strati[i+1][1]
unit = bh.log_strati[i+1][0]
s2 = bh.z - bh.depth
if unit is not None:
plt.bar(ix, s - s2, bottom=s2, color=unit.c, alpha=1, edgecolor = 'black', width=width)
if typ == "units":
arr=self.get_units_domains_realizations(iu=iu, fill="color", all_data=False)
elif typ == 'proba_units':
units=self.get_units_domains_realizations()
nreal=units.shape[0]
arr=(units == i).sum(0)/nreal
del(units)
elif typ == 'proba_facies':
facies=self.get_facies()
nreal=facies.shape[0] * facies.shape[1]
arr=(facies == i).sum(0).sum(0) /nreal
del(facies)
elif typ =="facies":
arr=self.get_facies(iu, ifa, all_data=False)
#change values to have colors directly
new_arr=np.zeros([arr.shape[0], arr.shape[1], arr.shape[2], 4])
list_fa=np.unique(arr)
for IDfa in list_fa:
if IDfa != 0:
fa=self.get_facies_obj(ID=IDfa, type="ID") # get facies object
mask=(arr == IDfa)
new_arr[mask,: ]=colors.to_rgba(fa.c)
arr=new_arr
elif typ =="prop":
assert isinstance(property, str), "property should be given in a property name --> string"
arr=self.getprop(property, iu, ifa, ip, all_data=False)
elif typ =="entropy_units":
units=self.get_units_domains_realizations()
SE=np.zeros(self.mask.shape) # shannon entropy
b=len(self.get_all_units())
nreal=units.shape[0]
for unit in self.get_all_units():
print(unit)
Pi=(units==unit.ID).sum(0)/nreal
pi_mask=(self.mask) & (Pi!=0)
SE[pi_mask] += Pi[pi_mask]*(np.log(Pi[pi_mask])/np.log(b))
arr=-SE
arr[~self.mask]=np.nan
del(units)
elif typ == "entropy_facies":
facies_domains=self.get_facies().reshape(-1, self.nz, self.ny, self.nx)
SE=np.zeros(self.mask.shape) # shannon entropy
b=len(self.get_all_facies())
nreal=facies_domains.shape[0]
for facies in self.get_all_facies():
Pi=(facies_domains==facies.ID).sum(0)/nreal
pi_mask=(self.mask) & (Pi!=0)
SE[pi_mask] += Pi[pi_mask]*(np.log(Pi[pi_mask])/np.log(b))
arr=-SE
arr[~self.mask]=np.nan
del(facies_domains)
elif typ =="arr":
pass
else:
assert 'Typ unknown'
return
#extract cross section
xsec, dist=self.cross_section(arr, p_list, esp=esp)
if ax is None:
fig, ax=plt.subplots(figsize=(10, 10))
extent=[0, dist, self.get_oz(), self.get_zg()[-1]]
a=ax.imshow(xsec, origin="lower", extent=extent, interpolation="none", vmax=vmax, vmin=vmin)
if colorbar:
plt.colorbar(a, ax=ax, orientation='horizontal')
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/ratio_aspect)
del(arr)
#get boreholes
dist_tot = 0
for ip in range(len(p_list)-1):
p1 = p_list[ip]
p2 = p_list[ip+1]
xmin = min(p1[0], p2[0])
xmax = max(p1[0], p2[0])
Lx = xmax - xmin
xmin -= max(0.2*Lx, dist_max/2)
xmax += max(0.2*Lx, dist_max/2)
ymin = min(p1[1], p2[1])
ymax = max(p1[1], p2[1])
Ly = ymax - ymin
ymin -= max(0.2*Ly, dist_max/2)
ymax += max(0.2*Ly, dist_max/2)
# select bh inside p1 and p2
sel_bhs = [bh for bh in self.list_bhs if bh.x > xmin and bh.x < xmax and bh.y > ymin and bh.y < ymax]
# compute dist of sel_bhs to line
# proj bhs
for bh in sel_bhs:
a = p2[0] - p1[0]
b = p2[1] - p1[1]
x_proj = (a**2 * bh.x + b**2 * p2[0] + a * b * (bh.y - p2[1])) / (a**2 + b**2)
y_proj = (b * x_proj - b * p2[0] + a * p2[1]) / a
dist_line = ((bh.x - x_proj) ** 2 + (bh.y - y_proj) ** 2)**0.5
if dist_line < dist_max:
dist = ((x_proj - p1[0]) ** 2 + (y_proj - p1[1]) ** 2)**0.5
dist_to_plot = dist_tot + dist # distance where to plot the bh
# plot bh
if typ == "units":
plot_bh(bh, dist_to_plot)
# increment total distance
dist_points = ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)**0.5
dist_tot += dist_points
def plot_lines(self, list_lines, names=None, ax=None, legend=True):
"""
Plot cross-sections lines given in the
"list_lines" argument on a 2D top view of the domain
Parameters
----------
list_lines: seqence of points
list of points defining the cross-section
lines. Each point is a list of 2 floats [x, y]
names: sequence of string
list of names to give to the cross-section lines
ax: matplotlib axis
axis on which to plot the cross-section
legend: bool
if True, plot a legend
"""
if ax is None:
fig, ax=plt.subplots()
ox=self.ox
oy=self.oy
x1=self.xg[-1]
y1=self.yg[-1]
ax.plot((ox, ox, x1, x1, ox),(oy, y1, y1, oy, oy), c="k") #plot domain
ax.set_aspect("equal")
i=1
for line in list_lines:
if names is None:
label="cross " + str(i)
else:
label=names[i-1]
line=np.array(line)
ax.plot(line[:,0], line[:,1], label=label)
i += 1
if legend:
ax.legend()
# CLASSES PILE + UNIT + SURFACE
class Pile():
"""
This class is a major object the Stratigraphic Pile (SP).
It contains all the units objects
and allow to know the stratigraphical relations between the units.
One Pile object must be defined for each subpile + 1 "master" pile.
Parameters
----------
name: str
name of the pile
nature: str
units type of interpolation, can be "surfaces" or "3d_categorical"
if "surfaces" is chosen (default), 2D surfaces interpolations of
the surfaces are performed. The surfaces are then used to delimit unit domains.
if "3d_categorical", a facies method is used to simulate position of the units.
The available methods are the same to simulate the facies.
verbose: int
level of verbosity
seed: int
seed for random number generation
"""
def __init__(self, name, nature="surfaces", verbose=1, seed=1):
assert isinstance(name, str), "A name should be provided and must be a string"
self.list_units=[]
self.name=name
self.verbose=verbose
self.seed=seed
self.nature = nature
def __repr__(self):
return self.name
def add_unit(self, unit):
"""
Add a unit to the pile
Parameters
----------
unit: Unit object or list of Unit objects
unit(s) to add to the pile
"""
try: #iterable
for i in unit:
if (isinstance(i, Unit)) and (i not in self.list_units):
i.verbose = self.verbose
self.list_units.append(i)
if self.verbose:
print("Stratigraphic unit {} added".format(i.name))
elif (isinstance(i, Unit)):
if self.verbose:
print("object is already in the list")
else:
if self.verbose:
print("object isn't a unit object")
except: # unit not in a list
if (isinstance(unit, Unit)) and (unit not in self.list_units):
self.list_units.append(unit)
unit.verbose = self.verbose
if self.verbose:
print("Stratigraphic unit {} added".format(unit.name))
elif (isinstance(unit, Unit)):
if self.verbose:
print("object is already in the list")
else:
if self.verbose:
print("object isn't a unit object")
def remove_unit(self, unit_to_rem):
"""Remove the given unit object from Pile object
Parameters
----------
unit_to_rem: :class:`Unit` object
unit to remove from the pile
"""
if len(self.list_units) > 0:
if unit_to_rem in self.list_units:
self.list_units.remove(unit_to_rem)
if self.verbose:
print("Unit {} removed from Pile {}".format(unit_to_rem.name, self.name))
else:
if self.verbose:
print("no units found in pile {}".format(self.name))
def order_units(self, vb=1):
"""
Order list_liths according the order attributes of each lithologies
Parameters
----------
vb: int
level of verbosity, 0 for no print, 1 for print
"""
if vb:
print("Pile {}: ordering units".format(self.name))
self.list_units.sort(key=lambda x: x.order)
if vb:
print("Stratigraphic units have been sorted according to order")
i=0
flag=0
# check orders of unit
for s in self.list_units:
if i == 0:
s_bef=s
if s_bef.order != 1:
flag=1
if vb:
print("first unit order is not equal to 1")
else:
if s.order == s_bef.order:
flag=1
if vb:
print("units {} and {} have the same order".format(s.name, s_bef.name))
elif s.order != s_bef.order + 1:
flag=1
if vb:
print("Discrepency in the orders for units {} and {}".format(s.name, s_bef.name))
s_bef=s
i += 1
if flag:
if vb:
print("Changing orders for that they range from 1 to n")
# check order ranging from 1 to n
for i in range(len(self.list_units)):
if self.list_units[i].order != i+1:
self.list_units[i].order=i+1
def compute_surf(self, ArchTable, nreal=1, fl_top=False, subpile=False, tops=None, bots=None, vb=1):
"""
Compute the elevation of the surfaces units (1st hierarchic order)
contained in the Pile object.
Parameters
----------
ArchTable: :class:`Arch_table` object
ArchTable object containing the architecture of the pile
nreal: int
number of realization
fl_top: bool
to not interpolate first layer and assign it=top
subpile: bool
if the pile is a subpile
tops, bots: sequence of 2D arrays of size (ny, nx)
of top/bot for subpile surface
vb: int
level of verbosity, 0 for no print, 1 for print
"""
def add_sto_contact(s, x, y, z, type="equality", z2=None): #function to add a stochastic hd point to a surface
# warning no check that point is inside domain
if type == "equality":
s.surface.sto_x.append(x)
s.surface.sto_y.append(y)
s.surface.sto_z.append(z)
elif type == "ineq_inf":
s.surface.sto_ineq.append([x, y, 0, z, np.nan])
elif type == "ineq_sup":
s.surface.sto_ineq.append([x, y, 0, np.nan, z])
elif type == "double_ineq":
s.surface.sto_ineq.append([x, y, 0, z, z2]) # inferior and upper ineq
if ArchTable.check_piles_name() == 0: # check consistency in unit
return None
ArchTable.nreal_units=nreal # store number of realizations
#simulation grid
xg=ArchTable.get_xgc()
nx=xg.shape[0]
yg=ArchTable.get_ygc()
ny=yg.shape[0]
zg=ArchTable.get_zg()
nz=zg.shape[0] - 1
if ~subpile:
top=ArchTable.top
bot=ArchTable.bot
mask=ArchTable.mask # simulation mask
mask2D = ArchTable.mask2d # mask 2D
nlay=len(self.list_units)
if vb:
print("########## PILE {} ##########".format(self.name))
#make sure to have ordered lithologies
self.order_units(vb=vb)
### initialize surfaces by setting to 0
surfs=np.zeros([nreal, nlay, ny, nx], dtype=np.float32)
surfs_bot=np.zeros([nreal, nlay, ny, nx], dtype=np.float32)
org_surfs=np.zeros([nreal, nlay, ny, nx], dtype=np.float32)
real_domains=np.zeros([nreal, nlay, nz, ny, nx], dtype=np.int8)
for ireal in range(nreal): # loop over real
# erase stochastic data
for s in self.list_units:
s.surface.sto_x = []
s.surface.sto_y = []
s.surface.sto_z = []
s.surface.sto_ineq = []
# compute sto data
for g in ArchTable.sto_hd:
coord = g[0]
x,y,z = coord
hd = g[1]
p = g[2]
i = np.random.choice(range(len(p)), p=p)
for s, typ in np.array(hd[i]):
add_sto_contact(s, x, y, z, typ)
#top/bot
if subpile:
top=tops[ireal]
bot=bots[ireal]
# counter for current simulated surface
i=-1
for litho in self.list_units[:: -1]:
if vb:
print("\n#### COMPUTING SURFACE OF UNIT {}".format(litho.name))
i += 1 # index for simulated surface
start=time.time()
if (fl_top) and (i == nlay-1): # first layer assign to top
s1=top
elif (litho.surface.int_method in ["kriging", "linear", "cubic", "nearest"]) and (ireal > 0): # determinist method
s1=org_surfs[0, (nlay-1)-i].copy()
if vb:
print("{}: determinist interpolation method, reuse the first surface".format(litho.name))
else:
# change mean if thickness mode is activated
if "thickness" in litho.surface.dic_surf.keys():
if litho.surface.dic_surf["thickness"] is not None:
if i == 0:
litho.surface.dic_surf["mean"] = ArchTable.bot + litho.surface.dic_surf["thickness"]
else:
litho.surface.dic_surf["mean"] = s1 + litho.surface.dic_surf["thickness"]
s1=interp2D(litho.surface, ArchTable.get_xg(), ArchTable.get_yg(), ArchTable.xu2D,
seed=ArchTable.seed + litho.ID * 1e3 + ireal, verbose=ArchTable.verbose, ncpu=ArchTable.ncpu, mask2D=mask2D,
**litho.surface.dic_surf) # simulation
# remove mean
if "thickness" in litho.surface.dic_surf.keys():
if litho.surface.dic_surf["thickness"] is not None:
litho.surface.dic_surf["mean"] = None
end=time.time()
if vb:
print("{}: time elapsed for computing surface {} s".format(litho.name, (end - start)))
## if nan inside the domain (non simulated area) --> set values from surface below
mask_nan = np.zeros([ny, nx], dtype=bool)
mask_nan[mask2D] = np.isnan(s1[mask2D])
if mask_nan.any():
if i > 0:
s1[mask_nan] = org_surfs[ireal, (nlay-1)-i+1][mask_nan]
else:
s1[mask_nan] = bot[mask_nan]
org_surfs[ireal, (nlay-1)-i]=s1.copy()
# adapt altitude if above/below top/bot
s1[s1>top]=top[s1>top]
s1[s1<bot]=bot[s1<bot]
#strati consistency and erosion rules
if i > 0:
for o in range(i): # index for checking already simulated surfaces
litho2=self.list_units[:: -1][o]
s2=surfs[ireal, (nlay-1)-o] #idx from nlay-1 to nlay-i-1
if litho != litho2:
if (litho.order < litho2.order) & (litho.surface.contact == "onlap"): # si couche simulée sup et onlap
s1[s1 < s2]=s2[s1 < s2]
elif (litho.order < litho2.order) & (litho.surface.contact == "erode"): # si couche simulée érode une ancienne
s2[s2 > s1]=s1[s2 > s1]
if o == i-1 and litho.contact == "erode": # if index "o" is underlying layer
s1[s1 > s2]=s2[s1 > s2] # prevent erosion layers having volume
surfs[ireal, (nlay-1)-i]=s1 #add surface
#compute domains
start=time.time()
surfs_ir=surfs[ireal]
for i in range(surfs_ir.shape[0]):
if i < surfs_ir.shape[0]-1:
s_bot=surfs_ir[i+1] # bottom
else:
s_bot=bot
s_top=surfs_ir[i] # top
a=ArchTable.compute_domain(s_top, s_bot)
real_domains[ireal, i]=a*mask*self.list_units[i].ID
surfs_bot[ireal, i]=s_bot
end=time.time()
if vb:
print("\nTime elapsed for getting domains {} s".format((end - start)))
#only 1 big array
a=np.sum(real_domains, axis=1)
ArchTable.Geol.units_domains[a != 0]=a[a != 0]
ArchTable.Geol.surfaces_by_piles[self.name]=surfs
ArchTable.Geol.surfaces_bot_by_piles[self.name]=surfs_bot
ArchTable.Geol.org_surfaces_by_piles[self.name]=org_surfs
if vb:
print("##########################\n")
def define_domains(self, ArchTable, surfaces, tops=None, bots=None, subpile=False, vb=0, fl_top=True):
"""
Define units domains from precomputed surfaces.
This method is used when the surfaces are precomputed (e.g. from a previous simulation)
in place to the compute_surfaces method.
Parameters
----------
ArchTable : :class:`Arch_table`
ArchTable object
surfaces : array
array of surfaces (nreal, nlay, ny, nx)
tops : array
array of top elevations (ny, nx)
bots : array
array of bottom elevations (ny, nx)
subpile : bool
if the pile is a subpile, i.e. if True, the top and bottom of the pile are not used
vb : int
verbosity level, 0 is silent, 1 is verbose
fl_top : bool
if True, the top of the pile is used to define the top of the first unit
"""
assert surfaces.shape[1] == len(self.list_units), "the number of surfaces {} provided is not equal to the number of units {}".format(surfaces.shape[1], len(self.list_units))
# nreal
nreal=surfaces.shape[0]
ArchTable.nreal_units=nreal # store number of realizations
#simulation grid
xg=ArchTable.get_xgc()
nx=xg.shape[0]
yg=ArchTable.get_ygc()
ny=yg.shape[0]
zg=ArchTable.get_zg()
nz=zg.shape[0] - 1
if ~subpile:
top=ArchTable.top
bot=ArchTable.bot
mask=ArchTable.mask
nlay=len(self.list_units)
#make sure to have ordered lithologies
self.order_units(vb=vb)
### initialize surfaces by setting to 0
surfs=np.zeros([nreal, nlay, ny, nx])
surfs_bot=np.zeros([nreal, nlay, ny, nx])
org_surfs=surfaces
real_domains=np.zeros([nreal, nlay, nz, ny, nx])
for ireal in range(nreal): # loop over real
#top/bot
if subpile:
top=tops[ireal]
bot=bots[ireal]
# counter for current simulated surface
i=-1
for litho in self.list_units[:: -1]:
i += 1 # index for simulated surface
start=time.time()
if (fl_top) and (i == nlay-1): # first layer assign to top
s1=top
else:
s1=org_surfs[ireal, (nlay-1)-i].copy()
# adapt altitude if above/below top/bot
s1[s1>top]=top[s1>top]
s1[s1<bot]=bot[s1<bot]
# strati consistency and erosion rules
if i > 0:
for o in range(i): # index for checking already simulated surfaces
litho2=self.list_units[:: -1][o]
s2=surfs[ireal, (nlay-1)-o] #idx from nlay-1 to nlay-i-1
if litho != litho2:
if (litho.order < litho2.order) & (litho.surface.contact == "onlap"): # si couche simulée sup et onlap
s1[s1 < s2]=s2[s1 < s2]
elif (litho.order < litho2.order) & (litho.surface.contact == "erode"): # si couche simulée érode une ancienne
s2[s2 > s1]=s1[s2 > s1]
if o == i-1 and litho.contact == "erode": # if index "o" is underlying layer
s1[s1 > s2]=s2[s1 > s2] # prevent erosion layers having volume
surfs[ireal, (nlay-1)-i]=s1 #add surface
#compute domains
surfs_ir=surfs[ireal]
for i in range(surfs_ir.shape[0]):
if i < surfs_ir.shape[0]-1:
s_bot=surfs_ir[i+1] # bottom
else:
s_bot=bot
s_top=surfs_ir[i] # top
a=ArchTable.compute_domain(s_top, s_bot)
real_domains[ireal, i]=a*mask*self.list_units[i].ID
surfs_bot[ireal, i]=s_bot
#only 1 big array
a=np.sum(real_domains, axis=1)
ArchTable.Geol.units_domains[a != 0]=a[a != 0]
del(real_domains)
ArchTable.Geol.surfaces_by_piles[self.name]=surfs
ArchTable.Geol.surfaces_bot_by_piles[self.name]=surfs_bot
ArchTable.Geol.org_surfaces_by_piles[self.name]=org_surfs
if vb:
print("##########################\n")
class Unit():
"""
This class defines Unit objects, which are the building blocks of the Pile class.
Parameters
----------
name : string
name of the unit that will be used as an identifier
order : int
order of the unit in the pile (1 (top) to n (bottom), where n is the total number of units)
color : string
color to use for plotting and representation.
The color can be any color that is accepted by matplotlib
surface : :class:`Surface` object
surface object that defines the surface of the unit
ID : int
ID of the unit, if None, the ID will be set to the order of the unit
dic_facies : dict
dictionary that defines the facies of the unit. The mandatory keys for the dictionary are:
- f_method : string, valable method are:
- "homogenous" : homogenous facies
- "SubPile" : facies are simulated with a subpile
- "SIS" : facies are simulated with stochastic indicator simulation
- "MPS" : facies are simulated with Multiple points statistics
- "TPGs": facies are simulated with Truncated pluri-Gaussian method
- f_covmodel : 3D geone covariance model object or list of 3D geone covariance model objects only used with "SIS" method
- TI : geone.image, Training image for "MPS" method
- SubPile : :class:`Pile` object, subpile for "SubPile" method
Facies keyword arguments to pass to the facies methods (these should be pass through dic_facies !):
- "SIS" :
- "neigh" : int, number of neighbors to use for the SIS
- r : float, radius of the neighborhood to use for the SIS
- probability : list of float, proportion of each facies to use for the SIS
- TI : geone img, Training image(s) to use
- mps "classic" parameters (maxscan, thresh, neig (number of neighbours))
- npost : number of path postprocessing, default 1
- radiusMode : radius mode to use (check deesse manual)
- anisotropyRatioMode : Anisotropy ratio to use in reasearch of neighbours (check deesse manual)
- rx, ry, rz : radius in x, y and z direction
- angle1, angle2, angle3: ellipsoid of research rotation angles
- ax, ay, az : anisotropy ratio for research ellipsoid
- rot_usage : 0, 1 or 2 (check deesse manual)
- rotAziLoc, rotDipLoc, : local rotation, True or False ?
rotPlungeLoc
- rotAzi, rotDip, : global rotation angles: values, min-max, maps, see deesse_input doc
rotPlunge
- xr, yr, zr : ratio for geom transformation
- xloc, yloc, zloc : local or not transformation
- homo_usage : homothety usage
- probaUsage : probability constraint usage, 0 for no proba constraint, 1 for global proportion defined in globalPdf, 2 for local proportion defined in localPdf
- globalPdf : array-like of float of length equal to the number of class, proportion for each class
- localPdf : (nclass, nz, ny, nx) array of floats probability for each class, localPdf[i] is the "map defined on the simulation grid
- localPdfRadius : support radius for local pdf, default is 2
- deactivationDistance : float, distance at which localPdf are deactivated (see Deesse doc)
- constantThreshold : float, threshold value for pdf's comparison
- dataImage : geone img used as data, see deesse/geone documentation
- outputVarFlag : bool or list of bool of size nv, to output or not the variables
- distanceType : string or list of strings, "categorical" or "continuous"
- TPGs:
- flag: dictionary of limits of cuboids domains in Gaussian space, check ... for the right format.
- Ik_cm: indicator covmodels
- G_cm: list of Gaussian covmodels for the two gaussian fields, can be infered from Ik_cm
- various parameters: (du, dv --> precision of integrals for G_cm inference),
(dim: dimension wanted for G_cm inference,
(c_reg: regularisation term for G_cm inference),
(n: number of control points for G_cm inference))
"""
def __init__(self, name, order, color, surface=None, ID=None,
dic_facies={"f_method": "homogenous", "f_covmodel": None, "TI": None, "SubPile": None, "Flag": None, "G_cm": None},
contact="onlap", verbose=1):
assert ID != 0, "ID cannot be 0"
assert name is not None, "A name must be provided"
assert isinstance( surface, Surface), "A surface object must be provided for each unit"
self.name=name
self.order=order
self.contact=contact
self.c=color
if ID is None:
self.ID=order
else:
self.ID=ID
self.SubPile=None
self.verbose=verbose
self.list_facies=[]
self.bb_units = []
self.x = [] # data coordinates for categorical simulations
self.y = [] # data coordinates for categorical simulations
self.z = [] # data coordinates for categorical simulations
self.mummy_unit=None
self.set_dic_facies(dic_facies) #set dic facies to unit
if surface is not None:
self.set_Surface(surface)
def set_dic_facies(self, dic_facies):
"""
Set a dictionary facies to Unit object
Parameters
----------
dic_facies : dict
dictionnary containing the parameters to pass at the facies methods
"""
assert dic_facies["f_method"] in ("MPS", "SIS", "homogenous", "TPGs", "SubPile"), 'Filling method unknown, valids are "MPS", "SIS", "homogenous", "TPGs", "SubPile'
self.f_method=dic_facies["f_method"]
# check important input for facies methods
#MPS
if self.f_method == "MPS":
if "TI" not in dic_facies.keys():
if self.verbose:
print("WARNING NO TI PASSED FOR MPS SIMULATION")
else:
self.set_f_TI(dic_facies["TI"])
#Subpile
elif self.f_method == "SubPile":
if "SubPile" not in dic_facies.keys():
if self.verbose:
print("No SubPile passed for SubPile filling, consider adding one with set_SubPile() before proceeding")
else:
self.set_SubPile(dic_facies["SubPile"])
if "units_fill_method" in dic_facies.keys():
if dic_facies["units_fill_method"] == "SIS":
self.set_f_covmodels(dic_facies["f_covmodel"])
elif dic_facies["units_fill_method"] == "MPS":
if "TI" not in dic_facies.keys():
if self.verbose:
print("WARNING NO TI PASSED FOR MPS SIMULATION")
else:
self.set_f_TI(dic_facies["TI"])
elif dic_facies["units_fill_method"] == "TPGs":
if "Flag" in dic_facies.keys():
self.flag = dic_facies["Flag"]
if "G_cm" in dic_facies.keys():
self.G_cm = dic_facies["G_cm"]
# TO COMPLETE
#Truncated Plurigaussians CREATE CHECK FUNCTIONS TO DO
elif self.f_method == "TPGs":
#assert
self.flag=dic_facies["Flag"]
self.G_cm=dic_facies["G_cm"]
#SIS
elif self.f_method == "SIS":
if "f_covmodel" not in dic_facies.keys():
if self.verbose:
print("Unit {}: WARNING NO COVMODELS PASSED FOR SIS SIMULATION".format(self.name))
else:
self.set_f_covmodels(dic_facies["f_covmodel"])
self.dic_facies=dic_facies
## magic fun
def __repr__(self):
s=self.name
return s
def __call__(self):
return print("Unit {}".format(self.name))
def __eq__(self, other):
"""
for comparison
"""
if other is not None:
if (self.name == other.name) & (self.order == other.order):
return True
else:
return False
else:
return False
def __it__(self, other): # inequality comparison
if (self.order < other.order):
return True
else:
return False
def __gt__(self, other): # inequality comparison
if (self.order > other.order):
return True
else:
return False
def __str__(self):
return self.name
#copy fun
def copy(self):
return copy.deepcopy(self)
def set_SubPile(self, SubPile):
"""
Change or define a subpile for filling
Parameters
----------
SubPile : :class:`Pile` object
"""
if isinstance(SubPile, Pile):
if self.f_method == "SubPile":
self.SubPile=SubPile
else:
if self.verbose:
print("Really ? Filling method is not SubPile")
else:
if self.verbose:
print("The SubPile object is not an Arch_table object")
def set_f_TI(self, TI):
"""
Change training image for a strati unit
Parameters
----------
TI: geone.image
Training image to use with the MPS
"""
if isinstance(TI, geone.img.Img):
self.f_TI=TI
if self.verbose:
print("Unit {}: TI added".format(self.name))
else:
if self.verbose:
print("TI NOT A GEONE IMAGE")
def set_Surface(self, surface):
"""
Change or add a top surface for Unit object.
This will define how the top of the formation will be modelled.
Parameters
----------
surface: :class:`Surface` object
Surface object to use for the simulation of the top of the formation
"""
if isinstance(surface, Surface):
self.surface=surface
if self.verbose:
print("Unit {}: Surface added for interpolation".format(self.name))
else:
raise ValueError("Surface object must be an object of the class Surface from ArchPy")
def set_f_covmodels(self, f_covmodel):
"""
Remove existing facies covmodels and one or more
depending of what is passed (array like or only one object)
Parameters
----------
f_covmodel: geone.covModel object
that will be used for the interpolation of the facies if method is SIS.
Can be a list or only one object.
"""
self.list_f_covmodel=[]
covmodels_class=(geone.covModel.CovModel3D)
try:
for covmodel in f_covmodel:
if isinstance(covmodel, covmodels_class):
f=1
else:
f=0
break
if f:
self.list_f_covmodel=f_covmodel
else:
if self.verbose:
print("at least one covmodel is not a 3D covmodel geone, nothing has changed")
except: # only one object passed
if isinstance(f_covmodel, covmodels_class):
self.list_f_covmodel.append(f_covmodel)
if self.verbose:
print("Unit {}: covmodel for SIS added".format(self.name))
else:
if self.verbose:
print("Unit {}: covmodel not a geone 3D covmodel".format(self.name))
def get_h_level(self):
"""
Return hierarchical level of the unit
"""
def func(unit, h_lev):
if unit.mummy_unit is not None:
h_lev += 1
h_lev=func(unit.mummy_unit, h_lev)
return h_lev
h_lev=1
h_lev=func(self, h_lev)
return h_lev
def goes_up_until(self, unit_target):
"""
Climb the hierarchical tree of self unit until finding a specific unit and return it
Parameters
----------
unit_target: :class:`Unit` object
Unit object to find in the hierarchical tree
Returns
-------
unit: :class:`Unit` object or None
Unit object found in the hierarchical tree
if not found, return None
"""
unit = self
for i in range(self.get_h_level()):
if unit != unit_target:
unit = unit.mummy_unit
if unit is None:
return None
else:
return unit
return unit
def get_big_mummy_unit(self):
"""
Return lowest unit (main unit) in hierarchical order
"""
unit = self
for i in range(self.get_h_level()):
if unit.mummy_unit is not None:
unit = unit.mummy_unit
return unit
def get_baby_units(self, recompute=False, vb=1):
"""
Method to return all units that are under the current unit in the hierarchy
Parameters
----------
recompute: bool
If True, recompute the list of units if there had been modifications in the hierarchy
vb: int
verbosity level, 0 for no print, 1 for print
Returns
-------
list
list of childs :class:`Unit` objects of the current unit
"""
if self.SubPile is None:
if vb:
print("Unit has no hierarchy")
return []
if recompute:
self.bb_units = []
def fun(unit):
if unit.SubPile is not None:
l=unit.SubPile.list_units
self.bb_units += l
for unit in l:
fun(unit)
fun(self)
return self.bb_units
def add_facies(self, facies):
"""
Add facies object to strati
Parameters
----------
facies: :class:`Facies` object or list of them
Facies object to model inside the unit
"""
if type(facies) == list:
for i in facies:
if (isinstance(i, Facies)) and (i not in self.list_facies): # check Facies object belong to Facies class
self.list_facies.append(i)
if self.verbose:
print("Facies {} added to unit {}".format(i.name, self.name))
else:
if self.verbose:
print("object isn't a Facies object or Facies object has already been added")
else: # facies not in a list
if (isinstance(facies, Facies)) and (facies not in self.list_facies):
self.list_facies.append(facies)
if self.verbose:
print("Facies {} added to unit {}".format(facies.name, self.name))
else:
if self.verbose:
print("object isn't a Facies object or Facies object has already been added")
return
def rem_facies(self, facies=None, all_facies=True):
"""
To remove facies from unit, by default all facies are removed
Parameters
----------
facies: :class:`Facies` object or list of them
Facies object to remove from the unit
all_facies: bool
If True, all facies are removed
"""
if all_facies:
self.list_facies=[]
else:
self.list_facies.remove(facies)
def compute_facies(self, ArchTable, nreal=1, mode="facies", verbose=0):
"""
Compute facies domain for the specific unit
Parameters
----------
ArchTable: :class:`Arch_table` object
ArchTable containing units, surface,
facies and at least a Pile (see example on the github)
nreal : int
number of realization (per unit realizations) to make
mode: str
"facies" or "strati"
verbose : int
verbosity level, 0 for no print, 1 for print
"""
xg=ArchTable.get_xg()
yg=ArchTable.get_yg()
zg=ArchTable.get_zg()
seed=ArchTable.seed
np.random.seed(seed)
## grid parameters
nx=len(xg)-1
ny=len(yg)-1
nz=len(zg)-1
dimensions=(nx, ny, nz)
ox=np.min(xg[0])
oy=np.min(yg[0])
oz=np.min(zg[0])
origin=(ox, oy, oz)
sx=np.diff(xg)[0]
sy=np.diff(yg)[0]
sz=np.diff(zg)[0]
spacing=(sx, sy, sz)
nreal_units = ArchTable.nreal_units
facies_domains=np.zeros([nreal_units, nreal, nz, ny, nx], dtype=np.int8)
if mode == "facies":
method=self.f_method # method of simulation
elif mode == "units":
if "units_fill_method" in self.dic_facies.keys():
method = self.dic_facies["units_fill_method"]
else:
method = "homogenous"
kwargs=self.dic_facies # retrieve keyword arguments
#default kwargs for SIS
kwargs_def_SIS={"neig": 10, "r": 1, "probability": None,"SIS_orientation": False,"azimuth": 0,"dip": 0,"plunge": 0}
kwargs_def_MPS={"varname":"code", "nv":1, "dataImage":None, "distanceType":["categorical"], "outputVarFlag":None,
"xr": 1, "yr": 1, "zr": 1, "maxscan": 0.25, "neig": 24, "thresh": 0.05, "xloc": False, "yloc": False, "zloc": False,
"homo_usage": 1, "rot_usage": 1, "rotAziLoc": False, "rotAzi": 0, "rotDipLoc": False, "rotDip": 0, "rotPlungeLoc": False, "rotPlunge": 0,
"azi_top":"gradient", "azi_bot":"gradient", "dip_top":"gradient", "dip_bot":"gradient",
"radiusMode": "large_default", "rx": nx*sx, "ry": ny*sy, "rz": nz*sz, "anisotropyRatioMode": "one", "ax": 1, "ay": 1, "az": 1,
"angle1": 0, "angle2": 0, "angle3": 0,
"globalPdf": None, "localPdf": None, "probaUsage": 0, "localPdfRadius": 12., "deactivationDistance": 4., "constantThreshold": 1e-3, "npost":1}
kwargs_def_TPGs={"neig": 20, "nit": 100, "grf_method": "fft"}
methods=["SIS", "MPS", "TPGs", "SubPile"]
kwargs_def=[kwargs_def_SIS, kwargs_def_MPS, kwargs_def_TPGs]
for f_method, kw in zip(methods, kwargs_def):
if method == f_method:
if len(kwargs) == 0:
kwargs=kw
else:
for k, v in kw.items():
if k not in kwargs.keys():
kwargs[k]=v
# list objects
if mode == "facies":
list_obj = self.list_facies
elif mode == "units":
list_obj = self.SubPile.list_units
#check if only 1 facies --> homogenous
if len(list_obj) < 2 and method != "homogenous":
method ="homogenous"
if self.verbose:
print("Unit {} has only one facies, facies method sets to homogenous".format(self.name))
### Simulations ###
if method != "SubPile" or mode == "units":
for iu in range(nreal_units): # loop over existing surfaces
if ArchTable.verbose:
print("### Unit {} - realization {} ###".format(self.name, iu))
if self.contact == "onlap":
mask=ArchTable.get_units_domains_realizations(iu) == self.ID
## HOMOGENOUS ##
if method.lower() == "homogenous":
if len(list_obj) > 1: # more than one
if ArchTable.verbose:
print("WARNING !! More than one facies has been passed to homogenous unit {}\nFirst in the list is taken".format(self.name))
elif len(list_obj) < 1: # no facies added
raise ValueError ("No facies passed to homogenous unit {}".format(self.name))
## setup a 3D array of the simulation grid size and assign one facies to the unit###
facies=list_obj[0]
for ireal in range(nreal):
facies_domains[iu, ireal][mask]=facies.ID
## SIS ##
elif method.upper() == "SIS":
## setup SIS ##
if mode == "facies":
hd, facies = ArchTable.hd_fa_in_unit(self, iu=iu)
elif mode == "units":
hd, facies = ArchTable.hd_un_in_unit(self, iu=iu)
cat_values=[i.ID for i in list_obj] # ID values
hd=np.array(hd)
facies=np.array(facies)
### orientation map ### -> to modify !!!!
if (kwargs["SIS_orientation"]) == "follow_surfaces": # if orientations must follow surfaces
# Warning: This option changes alpha and beta angles assuming that rz is smaller than rx and ry. Moreover, rx and ry must be similar.
azi,dip=ArchTable.orientation_map(self, smooth=5)
for cm in self.list_f_covmodel: # iterate through all facies covmodels and change angles
cm.alpha=azi
cm.beta=dip
elif kwargs["SIS_orientation"]: # if set True, change angles with inputs angles
al=kwargs["alpha"]
be=kwargs["beta"]
ga=kwargs["gamma"]
if type(al) is list and type(be) is list and type(ga) is list:
#if hasattr(al,"__iter__") and hasattr(be,"__iter__") and hasattr(ga,"__iter__"): #if a list is given
assert len(al) == len(be) == len(ga), "Error: number of given values/arrays in alpha, beta and gamma must be the same"
for i in range(len(al)): # loop over
for cm in self.list_f_covmodel:
cm.alpha=al[i]
cm.beta=be[i]
cm.gamma=ga[i]
else: # if only a ndarray or a value are given
for cm in self.list_f_covmodel:
cm.alpha=al
cm.beta=be
cm.gamma=ga
#Modify sill of covmodel if there is only one
if len(self.list_f_covmodel) == 1:
if ArchTable.verbose:
print("Only one facies covmodels for multiples facies, adapt sill to right proportions")
cm=self.list_f_covmodel[0]
sill=cm.sill() #get sill
self.list_f_covmodel=[] #reset list
ifa=0
for fa in list_obj: #loop over facies
cm_copy=copy.deepcopy(cm) #make a copy
if "probability" in kwargs.keys():
if kwargs["probability"] is not None:
p = kwargs["probability"][ifa]
else:
p=np.sum(facies == fa.ID)/len(facies == fa.ID) #calculate proportion of facies
else:
p=np.sum(facies == fa.ID)/len(facies == fa.ID) #calculate proportion of facies
if p > 0:
var=p*(1-p) #variance
ratio=var/sill #ratio btw covmodel and real variance
for e in cm_copy.elem:
e[1]["w"] *= ratio
else:
for e in cm_copy.elem:
e[1]["w"] *= 1
self.list_f_covmodel.append(cm_copy)
ifa += 1
## Simulation
simus = gci.simulateIndicator3D(cat_values, self.list_f_covmodel, dimensions, spacing, origin,
nreal=nreal, method="simple_kriging", x=hd, v=facies, mask=mask,
searchRadiusRelative=kwargs["r"], nneighborMax=kwargs["neig"],
probability=kwargs["probability"], verbose=verbose, nthreads = ArchTable.ncpu, seed=seed+iu)["image"].val
### rearrange data into a 2D array of the simulation grid size ###
for ireal in range(nreal):
grid=simus[ireal]
grid[grid==0]=np.nan
grid[mask==0]=np.nan # extract only the part on the real domain
facies_domains[iu, ireal][mask]=grid[mask]
elif method.upper() == "MPS":
## assertions ##
assert isinstance(self.f_TI, geone.img.Img), "TI is not a geone image object"
#load parameters
TI=self.f_TI
#facies IDs
IDs=np.unique(TI.val)
nclass=len(IDs)
classInterval=[]
for c in IDs:
classInterval.append([c-0.5, c+0.5])
# extract hard data
hd=np.ones([4, 1])
for fa in list_obj: #facies by facies in unit
fa_x=np.array(fa.x)
fa_y=np.array(fa.y)
fa_z=np.array(fa.z)
hdi=np.zeros([4, fa_x.shape[0]])
hdi[0]= fa_x
hdi[1]=fa_y
hdi[2]=fa_z
hdi[3]=np.ones(fa_x.shape)*fa.ID
hd=np.concatenate([hd, hdi], axis=1)
hd=np.delete(hd, 0, axis=1) # remove 1st point (I didn't find a clever way to do it...)
pt=img.PointSet(npt=hd.shape[1], nv=4, val=hd)
pt.set_varname("code")
# automatic inference of orientations
if kwargs["rotAzi"] == "inference" and kwargs["rotDip"] == "inference": # only if these two are set on inference
azi, dip=ArchTable.orientation_map(self, azi_top = kwargs["azi_top"], dip_top= kwargs["dip_top"],
azi_bot=kwargs["azi_bot"], dip_bot=kwargs["dip_bot"], smooth=2) # get azimuth and dip
kwargs["rotAzi"] = azi
kwargs["rotDip"] = dip
kwargs["rotAziLoc"] = True
kwargs["rotDipLoc"] = True
#DS research
snp=dsi.SearchNeighborhoodParameters(
radiusMode=kwargs["radiusMode"], rx=kwargs["rx"], ry=kwargs["ry"], rz=kwargs["rz"],
anisotropyRatioMode=kwargs["anisotropyRatioMode"], ax=kwargs["ax"], ay=kwargs["ay"], az=kwargs["az"],
angle1=kwargs["angle1"], angle2=kwargs["angle2"], angle3=kwargs["angle3"])
snp_l = []
for iv in range(kwargs["nv"]):
snp_l.append(snp)
#DS softproba
sp=dsi.SoftProbability(
probabilityConstraintUsage=kwargs["probaUsage"], # probability constraints method (1 for globa, 2 for local)
nclass=nclass, # number of classes of values
classInterval=classInterval, # list of classes
localPdf= kwargs["localPdf"], # local target PDF
globalPdf=kwargs["globalPdf"],
localPdfSupportRadius=kwargs["localPdfRadius"], # support radius
comparingPdfMethod=5, # method for comparing PDF's (see doc: help(geone.deesseinterface.SoftProbability))
deactivationDistance=kwargs["deactivationDistance"], # deactivation distance (checking PDF is deactivated for narrow patterns)
constantThreshold=kwargs["constantThreshold"]) # acceptation threshold
sp_l = []
for iv in range(kwargs["nv"]):
sp_l.append(sp)
#DS input
deesse_input=dsi.DeesseInput(
nx=nx, ny=ny, nz=nz, # dimension of the simulation grid (number of cells)
sx=sx, sy=sy, sz=sz, # cells units in the simulation grid (here are the default values)
ox=ox, oy=oy, oz=oz, # origin of the simulation grid (here are the default values)
nv=kwargs["nv"], varname=kwargs["varname"], # number of variable(s), name of the variable(s)
nTI=1, TI=TI, # number of TI(s), TI (class dsi.Img)
dataPointSet=pt, # hard data (optional)
dataImage = kwargs["dataImage"],
outputVarFlag = kwargs["outputVarFlag"],
distanceType = kwargs["distanceType"], # distance type: proportion of mismatching nodes (categorical var., default)
softProbability=sp_l,
searchNeighborhoodParameters=snp_l,
homothetyUsage=kwargs["homo_usage"],
homothetyXLocal=kwargs["xloc"],
homothetyXRatio=kwargs["xr"],
homothetyYLocal=kwargs["yloc"],
homothetyYRatio=kwargs["yr"],
homothetyZLocal=kwargs["zloc"],
homothetyZRatio=kwargs["zr"],
rotationUsage=kwargs["rot_usage"], # tolerance or not
rotationAzimuthLocal=kwargs["rotAziLoc"], # rotation according to azimuth: global
rotationAzimuth=kwargs["rotAzi"],
rotationDipLocal=kwargs["rotDipLoc"],
rotationDip=kwargs["rotDip"],
rotationPlungeLocal=kwargs["rotPlungeLoc"],
rotationPlunge=kwargs["rotPlunge"],
nneighboringNode=kwargs["neig"], # max. number of neighbors (for the patterns)
distanceThreshold=kwargs["thresh"], # acceptation threshold (for distance between patterns)
maxScanFraction=kwargs["maxscan"], # max. scanned fraction of the TI (for simulation of each cell)
npostProcessingPathMax=kwargs["npost"], # number of post-processing path(s)
seed=seed, # seed (initialization of the random number generator)
nrealization=nreal, # number of realization(s)
mask=mask) # ncpu
deesse_output=dsi.deesseRun(deesse_input, nthreads=ArchTable.ncpu, verbose=verbose)
simus=deesse_output["sim"]
for ireal in range(nreal):
sim=simus[ireal]
#self.facies_domains[iu, ireal]=sim.val[0] #output in facies_domains
facies_domains[iu, ireal][mask]=sim.val[0][mask]
elif method == "TPGs": #truncated (pluri)gaussian
#load params#
flag=self.flag
G_cm=self.G_cm
## data format ##
# data=(x, y, z, g1, g2, v), where x, y, z are the cartesian coordinates, g1 and g2 are the values of
# first/second gaussian fields and v is the facies value
## setup and get hard data
hd=np.array([])
for fa in list_obj:
#ndarray
nd=len(fa.x)
fa_x=np.array(fa.x)
fa_y=np.array(fa.y)
fa_z=np.array(fa.z)
facies=fa.ID*np.ones(nd)# append facies IDs
hd=np.concatenate([hd.reshape(-1, 6), np.concatenate([[fa_x], [fa_y], [fa_z], [np.zeros(nd)], [np.zeros(nd)], [facies]], axis=0).T]) # append data for input TPGs
if hd.shape[0] == 0:
hd=None
simus=run_tpgs(nreal, xg, yg, zg, hd, G_cm, flag, nmax=kwargs["neig"], grf_method=kwargs["grf_method"], mask=mask)
for ireal in range(nreal):
grid=simus[ireal]
grid[mask==0]=np.nan # extract only the part on the real domain
facies_domains[iu, ireal][mask]=grid[mask]
elif method == "nearest":
if ArchTable.verbose:
print("<===Nearest neighbors interpolation===>")
from sklearn.neighbors import NearestNeighbors
X = np.ones([nz, ny, nx])* ArchTable.xgc
Y = np.ones([nz, ny, nx])
Y[:] = np.ones([nx, ny]).T * ArchTable.ygc.reshape(-1, 1)
Z = np.ones([nz, ny, nx])
Z[:, :] =( np.ones([nz, nx]) * ArchTable.zgc.reshape(-1, 1)).reshape(nz, 1, nx)
xu3D = np.array([X.flatten(), Y.flatten(), Z.flatten()]).T
# get hard data
if mode == "facies":
hd, facies = ArchTable.hd_fa_in_unit(self, iu=iu)
elif mode == "units":
hd, facies = ArchTable.hd_un_in_unit(self, iu=iu)
hd=np.array(hd)
facies=np.array(facies)
X_fit = hd
y_fit = facies
X_pred = xu3D[mask.flatten()]
# fit
nn = NearestNeighbors(n_neighbors=1).fit(X_fit)
#pred
res = nn.kneighbors(X_pred, return_distance=False, n_neighbors=1)
# assign
y_pred = y_fit[res]
simus = np.zeros([nz, ny, nx])
simus[mask] = y_pred[:, 0] # reassign values
for ireal in range(nreal):
facies_domains[iu, ireal][mask]=simus[mask]
if mode == "facies":
ArchTable.Geol.facies_domains[iu,:, mask]=facies_domains[iu,:, mask] # store results
elif mode == "units":
ArchTable.Geol.units_domains[iu, mask] = facies_domains[iu, 0, mask] # store results
elif method == "SubPile": # Hierarchical filling
if ArchTable.verbose:
print("SubPile filling method, nothing happened")
pass
class Surface():
"""
Class Surface, must be linked to a :class:`Unit` object
Parameters
----------
name : string
to identify the surface for debugging purpose
contact : string
onlap or erode. Onlap indicates that this surface cannot erode older surfaces,
on contrary to erode surfaces
dic_surf : dict
parameters for surface interpolation:
- int_method : string
method of interpolation, possible methods are:
kriging, MPS, grf, grf_ineq, linear, nearest,
- covmodel : string
geone covariance model (see doc Geone for more information)
if a multi-gaussian method is used
- N_transfo : bool
Normal-score transform. Flag to apply or not a Normal Score on the data for the interpolation
- Units kwargs (passed in dic_surf directly):
for the search ellipsoid (kriging, grf, ...):
- r, relative (to covariance model) radius of research (default is 1)
- neig, number of neighbours
- krig_type: string, kriging method (ordinary_kriging, simple_kriging)
for MPS:
- TI
- various MPS parameters, see geone documentation
"""
def __init__(self, name="Surface_1",
dic_surf={"int_method": "nearest", "covmodel": None, "N_transfo": False},
contact="onlap"):
assert contact in ["erode", "onlap", "comf"], "contact must be erode or onlap or comf"
assert dic_surf["int_method"] is not None, "An interpolation method must be provided"
assert dic_surf["int_method"] in ["linear", "cubic", "nearest", "kriging", "grf", "grf_ineq", "MPS"], "Unknown interpolation method"
#default values dic surf
kwargs_def_surface={"covmodel": None, "N_transfo": False, "bandwidth_mult": 1, "tau": 0}
for k, v in kwargs_def_surface.items():
if k not in dic_surf.keys():
dic_surf[k]=v
self.name=name
self.x=[]
self.y=[]
self.z=[]
self.ineq=[]
self.int_method=dic_surf["int_method"]
self.N_transfo=dic_surf["N_transfo"]
self.covmodel=dic_surf["covmodel"]
self.contact=contact
#check inputs for surface methods
if self.int_method in ["kriging", "grf", "grf_ineq"]:
self.get_surface_covmodel()
if self.covmodel is not None and self.N_transfo and self.int_method in ["kriging", "grf", "grf_ineq"]:
if self.get_surface_covmodel().sill() != 1:
print("Unit {}: !! Normal transformation is applied but the variance of the Covariance model is not equal to 1 !!".format(self.name))
self.dic_surf=dic_surf
def copy(self):
return copy.deepcopy(self)
def set_covmodel(self, covmodel):
"""
change or add a covmodel for surface interpolation of self unit.
Parameters
----------
covmodel : geone.covModel.CovModel2D
covariance model to be used for surface interpolation
if the chosen method is grf, grf ineq or kriging
"""
if isinstance(covmodel, geone.covModel.CovModel2D):
self.covmodel=covmodel
# self.dic_surf["covmodel"]=covmodel
print("Surface {}: covmodel added".format(self.name))
else:
print("Surface {}: covmodel not a geone 2D covmodel".format(self.name))
def get_surface_covmodel(self, vb=1):
if self.covmodel is None:
if vb:
print ("Warning: Unit '{}' have no Covmodel for surface interpolation".format(self.name))
return None
else:
return self.covmodel
def set_contact(self, contact):
assert contact in ["erode", "onlap", "comf"], "contact must be erode or onlap or comf"
self.contact=contact
self.bhs_processed=0
def set_dic_surf(self, dic_surf):
self.dic_surf=dic_surf
class Facies():
"""
class for facies (2nd level of hierarchy)
Parameters
----------
ID : int
facies ID that is used in the results
name : string
facies name, used as an identifier
color : string
color of the facies, used for plotting
"""
def __init__(self, ID, name, color):
self.x=[]
self.y=[]
self.z=[]
if ID != 0:
self.ID=ID
else:
raise ValueError("ID facies cannot be equal to 0")
self.name=name
self.c=color
def __eq__(self, other):
if (self.name == other.name) & (self.ID == other.ID):
return True
else:
return False
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Prop():
"""
Class for defining a propriety to simulate (3rd level)
Parameters
----------
name : string
Property name
facies : list of :class:`Facies` objects
facies in which we want to simulate the property
(in the others the propriety will be set homogenous with a default value)
covmodels : list of :class:`geone.covModel.CovModel2D` objects
covmodels for the simulation (same size of facies or only 1)
means : list of floats
mean of the property in each facies (same size of facies)
int_method : string
method of interpolation, possible methods are:
- sgs, default
- fft
- homogenous
def_mean : float
default mean to used if none is passed in means array
vmin, vmax : float
min resp. max value of the property
x : ndarray of size (n, 2 --> x, y)
position of hard data
v : ndarray of size (n, 1 --> value)
value of hard data
"""
def __init__(self, name, facies, covmodels, means, int_method="sgs", x=None, v= None, def_mean=1, vmin=None, vmax=None):
assert isinstance(facies, list), "Facies must be a list of facies, even there is only one"
assert isinstance(vmin, float) or isinstance(vmin, int) or vmin is None, "Vmin error"
assert isinstance(vmax, float) or isinstance(vmax, int) or vmax is None, "Vmax error"
self.name=name
self.facies=facies
n_facies=len(facies)
self.n_facies=n_facies
self.vmin=vmin
self.vmax=vmax
self.x=x
self.v=v
#covmodels
try:
for cm in covmodels:
pass
self.covmodels=covmodels
except:
if isinstance(covmodels, gcm.CovModel3D):
self.covmodels=[covmodels]*n_facies
else:
raise ValueError("{} is not a valid CovModel3D".format(cm))
#means
self.means=means
try:
for m in means:
pass
except:
self.means=[means]*n_facies
#interpolation methods
self.int=int_method
try:
for i_method in int_method:
if i_method in ("sgs", "fft", "homogenous", "mps", "homogenous_uniform"):
pass
else:
raise ValueError("{} is not a valid inteprolation method".format(i_method))
except:
if int_method in ("sgs", "fft", "homogenous", "mps","homogenous_uniform"):
self.int=[int_method]*n_facies
else:
raise ValueError("{} is not a valid inteprolation method".format(i_method))
self.def_mean=def_mean
def __eq__(self, other):
if (self.name == other.name):
return True
else:
return False
def __repr__(self):
return self.name
def add_hd(self, x,v):
"""
add hard data to the property
Parameters
----------
x : ndarray of size (n, 2 --> x, y)
position of hard data
v : ndarray of size (n, 1 --> value)
value of hard data
"""
assert x.shape[1] == 3, "invalid shape for hd position (x), must be (ndata, 3)"
assert v.shape[0] == x.shape[0], "invalid number of data points between v and x"
self.x=x
self.v=v
class borehole():
"""
Class to create a borehole object.
Borehole are used to define the lithology and add conditioning data into ArchPy models
Parameters
----------
name : string
name of the borehole, not used
ID : int
ID of the borehole
x, y, z : float
x, y, z coordinates of the top of the borehole
depth : float
depth of the borehole
log_strati : list of tuples
log_strati contains the geological information about the units in the borehole
information is given by intervals of the form (strati, top) where strati is a :class:`Unit` object
and top is the top altitude of unit interval
log_facies : list of tuples
log_facies contains the facies information about the borehole
information is given by intervals of the form (facies, top) where facies is a :class:`Facies` object
and top is the top altitude of facies interval
"""
def __init__(self, name, ID, x, y, z, depth, log_strati, log_facies=None):
self.name=name # name of lithology
self.ID=ID # ID of the boreholes
self.x=x # x coordinate of borehole
self.y=y # y coordinate of borehole
self.z=z # altitude of the bh (côte terrain)
self.depth=depth
self.log_strati=log_strati
self.log_facies=log_facies
if log_strati is not None:
self.list_stratis=[s for s, d in self.log_strati]
if len(self.list_stratis) == 0:
self.log_strati=None
if log_facies is not None:
self.list_facies =[s for s, d in self.log_facies]
if len(self.log_facies) == 0:
self.log_facies=None
def get_list_stratis(self):
self.list_stratis=[s for s, d in self.log_strati]
return self.list_stratis
def get_list_facies(self):
self.list_facies =[s for s, d in self.log_facies]
return self.list_facies
def __eq__(self, other):
if (self.ID == other.ID) & \
(self.x == other.x) & (self.y == other.y) & (self.z == other.z):
return True
else:
return False
def prop_units(self):
"""
Return a dictionnary of the proportion of the units in the borehole
"""
d = {}
alt_prev = self.z
unit_prev = None
for s in self.log_strati:
if s[0] is not None:
if s[0].name not in d.keys():
d[s[0].name] = 0
if unit_prev is not None:
thk = unit_prev[1] - s[1]
d[unit_prev[0].name] += thk
unit_prev = s
thk = unit_prev[1] - (self.z - self.depth)
d[unit_prev[0].name] += thk
for k,v in d.items(): # mean
d[k] /= self.depth
return d
def extract(self, z, vb=1):
"""extract the units and facies information at specified altitude z"""
ls=False
lf=False
unit=None
facies=None
if z > self.z or z < self.z - self.depth:
if vb:
print("borehole have no information at this altitude")
return None
if self.log_strati is not None:
ls=True
for i in range(len(self.log_strati)):
if i < len(self.log_strati) - 1:
s1 = self.log_strati[i]
s2 = self.log_strati[i+1]
else:
s1 = self.log_strati[i]
s2 = (None, self.z - self.depth)
if s1[1] >= z and s2[1] < z:
unit=s1[0]
break
if self.log_facies is not None:
lf=True
for i in range(len(self.log_facies)):
if i < len(self.log_facies) - 1:
f1 = self.log_facies[i]
f2 = self.log_facies[i+1]
else:
f1 = self.log_facies[i]
f2 = (None, self.z - self.depth)
if f1[1] >= z and f2[1] < z:
facies=f1[0]
break
if ls and lf:
return (unit, facies)
elif ls:
return unit
elif lf:
return facies
class Geol():
"""
ArchPy output class which contain the results for the geological simulations
"""
def __init__(self):
self.surfaces=None
self.surfaces_bot=None
self.org_surfaces=None
self.surfaces_by_piles={}
self.surfaces_bot_by_piles={}
self.org_surfaces_by_piles={} | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/plugins/trezor/trezor.py | import traceback
import sys
from typing import NamedTuple, Any
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Xaya"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t | PypiClean |
/JT_Techfield-0.0.11-py3-none-any.whl/JT/MakeSet.py | import numpy as np
def Line(m, b, num = 50, noise_deviation = 1, span = (-10, 10)):
"""
Makes a set of x's and y's in the shape of a parabola following the equation:
y = m * x + b
Adds noise and returns both x and y
Note: The returned values are sorted in ascending order by the x's
"""
x = np.random.rand(num, 1) * (span[1] - span[0]) + span[0]
x[:, 0].sort()
noise = np.random.randn(num, 1) * noise_deviation
y = x @ [[m]] + b + noise
return x, y
def Parabola(a, b, c, num = 50, noise_deviation = 1, span = (-10, 10)):
"""
Makes a set of x's and y's shaped in a parabola of the form:
y = ax^2 + bx + c
Adds noise and returns both x and y
"""
x = np.random.rand(num, 1) * (span[1] - span[0]) + span[0] # Generates random smattering of x's
x[:, 0].sort() # Sort the x's
noise = np.random.randn(num, 1) * noise_deviation # Generate a column of random noise
X = np.hstack((np.ones((num, 1)), x, x ** 2)) # Generate the X array by combining ones, x, and x^2
w = [[c], [b], [a]] # Builds the weight vector from a, b, and c
y = X @ w + noise # Calculates y
return x, y
def MultiLine(weights, num = 50, noise_deviation = 1, span = (-10, 10)):
if type(weights) != np.array:
weights = np.array(weights)
dims = weights.shape[0] - 1
# print(dims)
x = np.random.rand(num, dims) * (span[1] - span[0]) + span[0]
bias = np.ones([num, 1])
x = np.hstack((bias, x))
noise = np.random.randn(num, 1) * noise_deviation
# print('x:', x.shape)
# print('w:', weights.shape)
y = x @ weights + noise
# print('y:', y.shape)
return x, y
def TriCluster(num = 6000, scale = 1, skew = False):
if skew:
cov1 = np.random.randn(2, 2)
cov2 = np.random.randn(2, 2)
cov3 = np.random.randn(2, 2)
else:
cov1 = np.eye(2)
cov2 = np.eye(2)
cov3 = np.eye(2)
x1 = np.random.randn(num // 3, 2) * scale @ cov1 + np.array([[0, -6]])
x2 = np.random.randn(num // 3, 2) * scale @ cov2 + np.array([[8, 0]])
x3 = np.random.randn(num // 3, 2) * scale @ cov3 + np.array([[0, 6]])
cat_1 = np.hstack([
x1,
np.ones([x1.shape[0], 1]),
np.zeros([x1.shape[0], 1]),
np.zeros([x1.shape[0], 1]),
])
cat_2 = np.hstack([
x2,
np.zeros([x1.shape[0], 1]),
np.ones([x1.shape[0], 1]),
np.zeros([x1.shape[0], 1]),
])
cat_3 = np.hstack([
x3,
np.zeros([x1.shape[0], 1]),
np.zeros([x1.shape[0], 1]),
np.ones([x1.shape[0], 1]),
])
data = np.vstack([cat_1, cat_2, cat_3])
np.random.shuffle(data)
x = data[:, [0, 1]]
y = data[:, 2:]
return x, y
def SpanSpace(x, num = 100):
"""
:param x: Input data arranged in columns.
:param num: number of points along each axis to create.
:return: Simulated input data that spans the input data on each features.
Works like an n-dimensional np.linspace() except the upper/lower bounds are
determined by the max/min of the data along each of it's features.
"""
step = 1 / num
n = x.shape[1]
grid = np.mgrid[[slice(0, 1, step)] * n]
return np.reshape(grid, (n, -1)).T * (x.max(axis = 0) - x.min(axis = 0)) + x.min(axis = 0) | PypiClean |
/ApeMan-0.1.1.tar.gz/ApeMan-0.1.1/docs/34/import.rst | Import in 3.4
=============
During the migration from Python 3.3 to Python 3.4 a number of API calls were updated, deprecating most of ``imp`` library for ``importlib``.
Inconveniently :func:`importlib.spec_from_module` (or :func:`importlib.spec_from_loader`, I can't remember) seems to have been excluded in this version of python.
Instead it was included in the next version, Python 3.5.
.. note :: API Changes
Python now calls :func:`imp.find_loader`, which supercedes :func:`imp.find_module`, before calling :func:`imp.load_module`.
This also saw the introduction of :func:`importlib.find_spec` which introduces the semaphore architecture.
:func:`find_spec`
-----------------
The aim of ``find_spec`` is to return a module specification.
This may be done by calling `machinery.ModuleSpec` directly or by using one of the helper functions.
The helper functions that are provided include ``util.spec_from_file`` and ``util.spec_from_loader``.
The former is very strict about receiving a *file* name, *folder* names are not accepted.
Specifically it will accept a packages' init file e.g. ``.\\PACKAGE\\__init__.py`` or module(s) e.g. ``.\\MODULE.py`` but not package directories e.g. ``.\\PACKAGE`` which excludes namespaced packages.
The :func:`find_spec` function accepts both a module name and possibly a module path.
The module path may be either relative or absolute, while the module path is always absolute.
The :func:`util.resolve_name` function may be used to convert these arguments into a fully qualified module name (FQMN).
.. topic :: Example : Standard :func:`find_spec` behaviour
It is helpful to review how the built in ``find_spec`` responds for different package and module configurations.
In both cases we are loading a package under the path ``E:\\Python\\overlay\\``.
The first setup represented a traditional package, with an `__init__.py` file, the resulting spec included a loader and the location of this file.
The submodule search location listed a single path, though one can supposedly extend this by appending paths to the `__path__` variable within the `__init__.py` file.
::
ModuleSpec(name='overlay',
loader=<_frozen_importlib.SourceFileLoader object at 0x0000000001283BA8>,
origin='E:\\Python\\overlay\\__init__.py',
submodule_search_locations=['E:\\Python\\overlay'])
The second setup represented a NameSpaced package, it excluded the `__init__.py` file, resulting spec had no loader and listed it's origin as *namespace*.
The submodule search location is now a ``_namespace`` object, which has list like properties but prevents popping.
::
ModuleSpec(name='overlay',
loader=None,
origin='namespace',
submodule_search_locations=_NamespacePath(['E:\\Python\\overlay']))
Had we been importing a module instead of a package then the `submodule_search_location` attribute would have been empty.
One does not know how the other attributes would've differed.
| PypiClean |
/Authlib-1.2.1.tar.gz/Authlib-1.2.1/authlib/oauth2/rfc6749/grants/client_credentials.py | import logging
from .base import BaseGrant, TokenEndpointMixin
from ..errors import UnauthorizedClientError
log = logging.getLogger(__name__)
class ClientCredentialsGrant(BaseGrant, TokenEndpointMixin):
"""The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner that have been previously
arranged with the authorization server.
The client credentials grant type MUST only be used by confidential
clients::
+---------+ +---------------+
| | | |
| |>--(A)- Client Authentication --->| Authorization |
| Client | | Server |
| |<--(B)---- Access Token ---------<| |
| | | |
+---------+ +---------------+
https://tools.ietf.org/html/rfc6749#section-4.4
"""
GRANT_TYPE = 'client_credentials'
def validate_token_request(self):
"""The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body:
grant_type
REQUIRED. Value MUST be set to "client_credentials".
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
The client MUST authenticate with the authorization server as
described in Section 3.2.1.
For example, the client makes the following HTTP request using
transport-layer security (with extra line breaks for display purposes
only):
.. code-block:: http
POST /token HTTP/1.1
Host: server.example.com
Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW
Content-Type: application/x-www-form-urlencoded
grant_type=client_credentials
The authorization server MUST authenticate the client.
"""
# ignore validate for grant_type, since it is validated by
# check_token_endpoint
client = self.authenticate_token_endpoint_client()
log.debug('Validate token request of %r', client)
if not client.check_grant_type(self.GRANT_TYPE):
raise UnauthorizedClientError()
self.request.client = client
self.validate_requested_scope()
def create_token_response(self):
"""If the access token request is valid and authorized, the
authorization server issues an access token as described in
Section 5.1. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in Section 5.2.
An example successful response:
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Cache-Control: no-store
Pragma: no-cache
{
"access_token":"2YotnFZFEjr1zCsicMWpAA",
"token_type":"example",
"expires_in":3600,
"example_parameter":"example_value"
}
:returns: (status_code, body, headers)
"""
token = self.generate_token(scope=self.request.scope, include_refresh_token=False)
log.debug('Issue token %r to %r', token, self.client)
self.save_token(token)
self.execute_hook('process_token', self, token=token)
return 200, token, self.TOKEN_RESPONSE_HEADER | PypiClean |
/KavModule-0.0.1-py3-none-any.whl/science.py | from Read_CSV import atomic_mass, atomic_num, symbol, element
class science:
# Pass mass and volume as arguments
# Returns density
def density(mass, volume):
return mass / volume
# Pass distance and time as arguments
# Returns speed
def speed(distance, time):
return distance / time
# Pass mass and velocity as arguments
# Returns momentum
def momentum(mass, velocity):
return mass * velocity
# Pass final and initial velocities and time as arguments
# Returns acceleration
def acceleration(final, initial, time):
return (final - initial) / time
# Pass mass and acceleration as arguments
# Returns force
def force(mass, acceleration):
return mass * acceleration
# Pass mass as argument
# Returns weight
def weight(mass):
return mass * 9.8
# Pass weight as argument
# Returns mass
def mass(weight):
return weight / 9.8
# Pass work and time as arguments
# Returns power
def power(work, time):
return work / time
# Pass froce and distance as arguments
# Returns work
def work(force, distance):
return force * distance
# Pass mass and height as arguments
# Returns gravitational potential energy
def GPE(mass, height):
return mass * 9.8 * height
# Pass mass and velocity as arguments
# Returns kinetic energy
def ke(mass, velocity):
return 0.5 * mass * (velocity ** 2)
# Pass Atomic Number as argument
# Returns Atomic Number, Element Name, Element Symbol, and Atomic Mass
def period_info(number):
atomic_number = "Atomic Number: " + str(atomic_num[number - 1])
element_name = "Element: " + str(element[number - 1])
element_symbol = "Symbol: " + str(symbol[number - 1])
atom_mass = "Atomic Mass: " + str(atomic_mass[number - 1])
return atomic_number + "\n" + element_name + "\n" + element_symbol + "\n" + atom_mass | PypiClean |
/Abhilash1_optimizers-0.1.tar.gz/Abhilash1_optimizers-0.1/Abhilash1_optimizers/Adagrad.py | import math
import numpy as np
#import pandas as pd
import Abhilash1_optimizers.Activation as Activation
import Abhilash1_optimizers.hyperparameters as hyperparameters
import Abhilash1_optimizers.Moment_Initializer as Moment_Initializer
#Adagrad variation of ADAM with beta_1=0 and beta_2~ 1 with noise
class ADAGRAD():
def __init__(alpha,b_1,b_2,epsilon,noise_g):
return hyperparameters.hyperparameter.initialise(alpha,b_1,b_2,epsilon,noise_g)
def init(m_t,v_t,t,theta):
return Moment_Initializer.Moment_Initializer.initialize(m_t,v_t,t,theta)
def Adagrad_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale):
alpha,b_1,b_2,epsilon,noise_g=ADAGRAD.__init__(alpha,b_1,b_2,epsilon,noise_g)
m_t,v_t,t,theta_0=ADAGRAD.init(0,0,0,0)
gradient_averages=[]
final_weight_vector=[]
for i in range(len_data):
theta_0=data[i]
for i in range(max_itr):
t+=1
if(act_func=="softPlus"):
g_t=Activation.Activation.softplus(theta_0)
elif (act_func=="relu"):
g_t=Activation.Activation.relu(theta_0)
elif (act_func=="elu"):
g_t=Activation.Activation.elu(theta_0,alpha)
elif (act_func=="selu"):
g_t=Activation.Activation.selu(scale,theta_0,theta)
elif (act_func=="tanh"):
g_t=Activation.Activation.tanh(theta_0)
elif (act_func=="hardSigmoid"):
g_t=Activation.Activation.hard_sigmoid(theta_0)
elif (act_func=="softSign"):
g_t=Activation.Activation.softsign(theta_0)
elif (act_func=="linear"):
g_t=Activation.Activation.linear(theta_0)
elif (act_func=="exponential"):
g_t=Activation.Activation.exponential(theta_0)
gradient_averages.append(g_t**2)
g_sum=sum(gradient_averages)
print("gradient_sum",g_sum)
m_t=b_1*m_t + (1-b_1)*g_t
v_t=b_2*v_t +(1-b_2)*g_t*g_t
m_hat=m_t/(1-(b_1**t))
v_hat=v_t/(1-(b_2**t))
theta_prev=theta_0
#alpha_t=(alpha*(math.sqrt(1-b_2**t)/(1-b_1**t)))
alpha_t=(alpha*(g_t/(noise_g + math.sqrt(g_sum))))
theta_0=theta_prev-(alpha_t)
print("Intrermediate gradients")
print("==========================================")
print("Previous gradient",theta_prev)
print("Present gradient",theta_0)
print("==========================================")
#if theta_0==theta_prev:
# break;
final_weight_vector.append(theta_0)
return final_weight_vector
def initialize(data,max_itr):
len_data=len(data)
optimized_weights=ADAGRAD.Adagrad_optimizer(data,len_data,max_itr,alpha,b_1,b_2,epsilon,noise_g,act_func,scale)
print("Optimized Weight Vector")
print("=====================================")
for i in range(len(optimized_weights)):
print("=====",optimized_weights[i])
if __name__=='__main__':
print("Verbose")
#t_0=Adagrad_optimizer()
#print("gradient coefficient",t_0)
#solve_grad=poly_func(t_0)
#print("Gradient Value",solve_grad)
sample_data=[1,0.5,0.7,0.1]
#ADAGRAD.initialize(sample_data,10) | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/Struct21.py | from genice2.cell import cellvectors
import genice2.lattices
desc = {"ref": {"SpaceFullerene": 'Sikiric 2010'},
"usage": "No options available.",
"brief": "A space fullerene."
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.pairs = """
155 80
106 222
250 197
220 159
137 101
106 97
34 25
142 184
93 50
212 234
251 150
49 123
31 18
184 177
177 183
27 32
43 79
155 143
74 64
239 250
100 39
208 217
71 240
182 152
193 116
133 91
89 41
24 114
34 203
163 162
26 164
120 52
3 161
37 206
19 51
204 59
101 33
75 84
16 94
62 170
118 114
221 218
183 6
32 222
104 160
42 245
6 238
21 104
43 48
161 92
169 175
5 88
87 219
37 201
49 73
71 85
29 138
54 38
129 217
17 65
227 128
149 156
63 117
96 144
1 139
221 94
88 20
32 233
198 194
37 90
55 83
89 244
241 74
25 104
73 124
139 105
236 105
83 7
244 165
9 61
94 21
199 135
237 150
149 242
112 13
89 230
55 220
4 44
212 19
243 181
77 214
237 113
2 97
81 210
15 143
28 12
3 61
23 176
178 215
22 245
216 143
16 191
168 117
22 70
42 212
147 251
182 134
147 191
239 57
139 190
168 153
5 66
197 52
202 39
66 116
184 35
125 170
201 207
18 121
4 179
121 108
244 223
69 162
0 63
136 145
216 41
198 82
80 223
98 44
20 223
189 87
56 139
133 34
117 156
205 161
232 25
35 64
43 133
68 219
169 75
70 19
207 148
70 214
218 72
1 37
209 84
47 40
93 234
216 165
48 21
77 138
10 207
118 187
132 151
2 248
107 237
136 30
209 248
27 39
167 51
49 176
1 227
209 98
47 36
87 196
10 62
162 134
42 110
81 241
76 183
65 190
21 205
224 164
31 166
185 250
22 120
126 79
229 17
11 98
132 165
236 247
228 145
28 69
25 88
127 138
122 144
123 145
214 57
55 145
54 107
152 35
153 36
27 171
178 78
155 48
29 96
225 59
133 225
232 244
125 121
16 9
213 109
141 180
208 45
136 8
75 195
218 111
193 227
231 122
16 160
245 186
78 77
153 33
15 45
198 211
189 85
0 38
208 66
45 180
225 126
28 231
95 199
10 190
114 175
164 241
142 210
52 246
188 140
103 233
107 33
220 158
210 224
134 101
97 105
142 182
212 235
251 149
15 230
191 40
192 38
130 222
119 58
23 12
31 67
163 137
102 6
201 65
62 108
31 27
23 102
235 85
84 234
76 82
8 57
58 7
180 148
91 165
206 99
40 211
176 146
121 65
19 50
203 59
141 91
249 236
60 237
43 217
38 113
151 204
125 95
167 235
48 111
160 92
236 233
172 200
220 239
126 92
127 158
157 146
32 90
183 124
54 40
60 63
166 249
235 179
146 96
9 211
171 17
202 68
128 230
134 6
90 17
178 120
249 190
238 74
232 151
60 82
47 194
226 138
174 204
13 158
12 159
86 153
26 238
86 137
221 156
72 9
49 164
8 83
166 108
127 58
142 69
178 115
130 125
181 122
228 159
192 14
0 194
243 102
154 230
120 7
81 36
175 196
50 186
187 51
186 52
51 188
99 116
80 205
5 89
179 110
157 73
86 210
226 159
28 13
172 115
97 229
131 4
131 140
226 30
127 200
185 187
50 53
112 146
79 104
155 132
63 14
177 194
29 115
242 211
78 140
72 149
176 228
106 56
61 113
103 106
147 192
157 163
231 224
58 239
88 91
189 2
53 197
130 193
166 103
205 232
141 217
172 144
196 110
109 195
170 135
215 8
184 238
167 114
93 24
243 46
119 77
174 61
26 69
219 229
112 162
169 240
172 7
103 248
101 74
11 179
45 99
181 13
248 67
68 247
161 204
203 160
36 64
219 67
243 73
83 197
215 70
167 131
191 156
213 24
3 192
234 44
119 185
147 92
18 222
124 137
2 247
111 151
60 152
129 199
216 180
107 242
173 213
175 85
174 218
100 195
72 203
136 115
201 95
118 110
87 195
24 75
1 154
42 109
4 173
76 182
241 124
34 111
169 39
5 135
71 247
224 163
102 112
22 119
188 250
128 66
126 80
209 240
96 158
229 18
0 35
79 20
100 67
170 148
46 12
199 41
206 148
108 56
100 98
202 11
173 246
208 20
44 109
99 154
47 117
168 82
193 56
130 206
207 41
15 223
55 144
171 68
157 122
95 227
181 200
46 30
233 240
214 188
213 186
251 59
221 14
177 81
90 105
198 113
129 128
226 57
249 171
118 173
132 225
46 123
202 196
141 135
62 116
23 26
78 246
174 150
14 150
215 53
231 123
10 154
86 76
152 33
245 187
140 53
189 84
29 228
54 64
246 185
131 93
3 94
129 143
168 242
71 11
200 30
"""
self.waters = """
0.3125 0.05868 0.29674
0.30868 0.0625 0.95326
0.25 0.25 0.82733
0.1875 0.05868 0.20326
0.0 0.75 0.71034
0.0 0.375 0.03966
0.0 0.05868 0.39719
0.68368 0.375 0.57799
0.31632 0.375 0.57799
0.0 0.375 0.22309
0.0 0.875 0.96034
0.0 0.875 0.77691
0.31632 0.0625 0.48017
0.68368 0.0625 0.48017
0.5 0.05868 0.23659
0.30868 0.9375 0.04674
0.1875 0.44132 0.20326
0.69132 0.06632 0.88577
0.69132 0.43368 0.88577
0.25 0.25 0.67267
0.18368 0.625 0.07799
0.30868 0.25 0.14719
0.875 0.25 0.63708
0.19132 0.9375 0.45326
0.5 0.625 0.72309
0.0 0.375 0.11293
0.31632 0.875 0.42201
0.69132 0.75 0.85282
0.5 0.125 0.46034
0.0 0.75 0.53966
0.125 0.25 0.52691
0.81632 0.55868 0.86424
0.5 0.75 0.875
0.75 0.75 0.32733
0.81632 0.44132 0.13577
0.375 0.93368 0.32799
0.375 0.56632 0.32799
0.5 0.9375 0.93986
0.1875 0.93368 0.26983
0.6875 0.75 0.81015
0.1875 0.56632 0.26983
0.81632 0.0625 0.01983
0.875 0.25 0.71034
0.5 0.44132 0.10282
0.0 0.55868 0.73659
0.375 0.75 0.02691
0.19132 0.25 0.48659
0.3125 0.44132 0.29674
0.5 0.25 0.125
0.19132 0.5625 0.45326
0.375 0.43368 0.67201
0.375 0.06632 0.67201
0.68368 0.55868 0.63577
0.31632 0.55868 0.63577
0.125 0.75 0.28966
0.5 0.625 0.53966
0.18368 0.375 0.92201
0.31632 0.125 0.57799
0.68368 0.125 0.57799
0.75 0.75 0.17267
0.6875 0.05868 0.29674
0.0 0.125 0.22309
0.0 0.625 0.96034
0.5 0.125 0.27691
0.25 0.75 0.32733
0.81632 0.125 0.92201
0.18368 0.4375 0.01983
0.875 0.43368 0.82799
0.875 0.06632 0.82799
0.5 0.0 0.42267
0.125 0.25 0.63708
0.1875 0.94132 0.79674
0.8125 0.44132 0.20326
0.0 0.4375 0.43986
0.125 0.75 0.36293
0.5 0.55868 0.76341
0.80868 0.25 0.35282
0.0 0.94132 0.60282
0.0 0.75 0.625
0.30868 0.56632 0.11424
0.30868 0.93368 0.11424
0.31632 0.44132 0.36424
0.8125 0.25 0.31015
0.5 0.5 0.57733
0.3125 0.43368 0.76983
0.3125 0.06632 0.76983
0.68368 0.44132 0.36424
0.625 0.25 0.78966
0.0 0.5 0.07733
0.0 0.125 0.03966
0.5 0.94132 0.89719
0.81632 0.625 0.07799
0.25 0.75 0.17267
0.3125 0.55868 0.70326
0.3125 0.25 0.18986
0.625 0.25 0.97309
0.80868 0.75 0.51341
0.375 0.25 0.86293
0.0 0.625 0.77691
0.30868 0.75 0.98659
0.8125 0.55868 0.79674
0.875 0.75 0.36293
0.0 0.0625 0.43986
0.18368 0.55868 0.86424
0.18368 0.44132 0.13577
0.30868 0.06632 0.88577
0.30868 0.43368 0.88577
0.875 0.75 0.28966
0.0 0.5 0.92267
0.8125 0.43368 0.73017
0.8125 0.06632 0.73017
0.69132 0.25 0.14719
0.80868 0.9375 0.45326
0.0 0.05868 0.26341
0.5 0.875 0.72309
0.0 0.5625 0.56015
0.18368 0.5625 0.98017
0.5 0.375 0.27691
0.6875 0.94132 0.70326
0.80868 0.06632 0.61424
0.80868 0.43368 0.61424
0.81632 0.375 0.92201
0.68368 0.4375 0.48017
0.31632 0.4375 0.48017
0.0 0.44132 0.39719
0.69132 0.4375 0.95326
0.375 0.75 0.13708
0.80868 0.0625 0.54674
0.30868 0.25 0.01341
0.5 0.25 0.03966
0.5 0.5625 0.93986
0.1875 0.75 0.68986
0.69132 0.93368 0.11424
0.69132 0.56632 0.11424
0.80868 0.93368 0.38577
0.81632 0.4375 0.01983
0.19132 0.4375 0.54674
0.80868 0.56632 0.38577
0.0 0.9375 0.56015
0.18368 0.125 0.92201
0.19132 0.75 0.64719
0.69132 0.5625 0.04674
0.5 0.125 0.38708
0.5 0.0625 0.06015
0.68368 0.5625 0.51983
0.31632 0.5625 0.51983
0.875 0.75 0.47309
0.375 0.75 0.21034
0.69132 0.75 0.98659
0.6875 0.56632 0.23017
0.6875 0.93368 0.23017
0.81632 0.05868 0.13577
0.625 0.93368 0.32799
0.625 0.56632 0.32799
0.18368 0.9375 0.98017
0.5 0.05868 0.10282
0.5 0.44132 0.23659
0.80868 0.5625 0.45326
0.68368 0.9375 0.51983
0.31632 0.9375 0.51983
0.125 0.56632 0.17201
0.125 0.93368 0.17201
0.68368 0.875 0.42201
0.68368 0.625 0.42201
0.31632 0.625 0.42201
0.81632 0.875 0.07799
0.0 0.625 0.88708
0.3125 0.94132 0.70326
0.6875 0.44132 0.29674
0.5 0.75 0.78966
0.81632 0.5625 0.98017
0.81632 0.94132 0.86424
0.80868 0.4375 0.54674
0.8125 0.75 0.68986
0.8125 0.05868 0.20326
0.5 0.94132 0.76341
0.125 0.75 0.47309
0.19132 0.25 0.35282
0.0 0.55868 0.60282
0.0 0.94132 0.73659
0.625 0.75 0.02691
0.80868 0.25 0.48659
0.68368 0.05868 0.36424
0.0 0.25 0.375
0.31632 0.05868 0.36424
0.68368 0.94132 0.63577
0.625 0.43368 0.67201
0.625 0.06632 0.67201
0.31632 0.94132 0.63577
0.375 0.25 0.78966
0.0 0.0 0.92267
0.3125 0.56632 0.23017
0.3125 0.93368 0.23017
0.30868 0.4375 0.95326
0.1875 0.25 0.31015
0.6875 0.43368 0.76983
0.6875 0.06632 0.76983
0.5 0.625 0.61293
0.0 0.25 0.28966
0.69132 0.25 0.01341
0.875 0.25 0.52691
0.69132 0.0625 0.95326
0.8125 0.94132 0.79674
0.875 0.56632 0.17201
0.875 0.93368 0.17201
0.18368 0.05868 0.13577
0.5 0.75 0.96034
0.81632 0.9375 0.98017
0.30868 0.5625 0.04674
0.1875 0.55868 0.79674
0.5 0.375 0.38708
0.0 0.44132 0.26341
0.125 0.25 0.71034
0.6875 0.55868 0.70326
0.19132 0.06632 0.61424
0.19132 0.43368 0.61424
0.69132 0.9375 0.04674
0.5 0.4375 0.06015
0.6875 0.25 0.18986
0.75 0.25 0.82733
0.5 0.875 0.53966
0.5 0.25 0.21034
0.5 0.55868 0.89719
0.18368 0.875 0.07799
0.5 0.5 0.42267
0.625 0.75 0.13708
0.19132 0.0625 0.54674
0.375 0.25 0.97309
0.19132 0.75 0.51341
0.625 0.25 0.86293
0.18368 0.0625 0.01983
0.5 0.375 0.46034
0.0 0.125 0.11293
0.30868 0.75 0.85282
0.1875 0.43368 0.73017
0.1875 0.06632 0.73017
0.18368 0.94132 0.86424
0.8125 0.93368 0.26983
0.19132 0.93368 0.38577
0.5 0.0 0.57733
0.3125 0.75 0.81015
0.19132 0.56632 0.38577
0.8125 0.56632 0.26983
0.0 0.25 0.46034
0.0 0.0 0.07733
0.75 0.25 0.67267
0.80868 0.75 0.64719
0.125 0.06632 0.82799
0.125 0.43368 0.82799
0.0 0.875 0.88708
0.5 0.875 0.61293
0.625 0.75 0.21034
"""
self.coord = "relative"
self.cages = """
14 0.0 0.98471 0.32932
14 -0.23471 -0.25 -0.07932
12 -0.25 -0.25 -0.25
14 0.5 -0.75 0.625
14 0.0 0.48471 -0.32932
12 0.5 1.5 1.0
14 0.0 -0.98471 -0.32932
12 0.5 0.75 0.08194
12 0.25 -0.25 0.75
15 0.5 0.25 0.52569
14 1.0 1.51529 1.32932
15 0.5 1.25 -0.27569
12 0.0 1.0 0.5
14 0.5 0.51529 0.17068
14 0.0 -0.75 -0.125
12 -0.25 0.25 -0.75
15 0.0 -0.25 1.02569
12 0.0 0.25 0.16806
12 1.0 1.5 1.5
12 0.0 -0.25 -0.16806
14 -0.5 0.75 0.375
14 0.5 0.98471 0.17068
14 0.73471 0.75 0.57932
12 0.5 1.0 1.0
15 0.5 0.75 1.27569
12 0.5 0.25 0.33194
12 0.0 0.75 0.41806
14 0.23471 -0.25 -0.07932
14 0.23471 0.25 0.07932
15 0.0 1.25 0.77569
12 0.5 1.25 0.91806
14 0.26529 0.25 0.42068
15 0.0 0.25 -0.02569
12 0.5 -0.25 0.66806
14 0.76529 0.25 1.07932
15 -0.5 -0.25 -0.52569
15 0.0 -1.25 -0.77569
14 0.5 1.01529 0.82932
14 0.26529 -0.25 0.57932
14 0.5 1.48471 0.82932
14 -0.26529 0.25 -0.57932
12 0.25 0.25 0.25
12 0.0 -0.75 -0.41806
14 0.0 0.75 0.125
"""
self.bondlen = 3
self.cell = """
13.020869213334082 13.020869213334082 70.73623470390184
"""
self.density = 0.6280734030039621
self.cell = cellvectors(a=13.020869213334082,
b=13.020869213334082,
c=70.73623470390184) | PypiClean |
/OTLModel/Classes/Onderdeel/FiguratieMarkering.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.Abstracten.FiguratieMarkeringToegang import FiguratieMarkeringToegang
from OTLMOW.OTLModel.Datatypes.KlFiguratieCode import KlFiguratieCode
from OTLMOW.OTLModel.Datatypes.KlFiguratieSoort import KlFiguratieSoort
from OTLMOW.OTLModel.Datatypes.KlFiguratieType import KlFiguratieType
from OTLMOW.OTLModel.Datatypes.KwantWrdInVierkanteMeter import KwantWrdInVierkanteMeter
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class FiguratieMarkering(FiguratieMarkeringToegang, PuntGeometrie):
"""Een markering als figuratie op de weg aangebracht om het verkeer te waarschuwen, informeren of regelen."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#FiguratieMarkering'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
FiguratieMarkeringToegang.__init__(self)
PuntGeometrie.__init__(self)
self._code = OTLAttribuut(field=KlFiguratieCode,
naam='code',
label='code',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#FiguratieMarkering.code',
definition='De code van de figuratie markering.',
owner=self)
self._oppervlakte = OTLAttribuut(field=KwantWrdInVierkanteMeter,
naam='oppervlakte',
label='oppervlakte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#FiguratieMarkering.oppervlakte',
definition='De oppervlakte van de markering zoals beschreven in de algemene omzendbrief.',
owner=self)
self._soortOmschrijving = OTLAttribuut(field=KlFiguratieSoort,
naam='soortOmschrijving',
label='soort omschrijving',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#FiguratieMarkering.soortOmschrijving',
definition='De soort en tevens de omschrijving van de figuratie markering.',
owner=self)
self._type = OTLAttribuut(field=KlFiguratieType,
naam='type',
label='type',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#FiguratieMarkering.type',
definition='Het type van figuratie markering.',
owner=self)
@property
def code(self):
"""De code van de figuratie markering."""
return self._code.get_waarde()
@code.setter
def code(self, value):
self._code.set_waarde(value, owner=self)
@property
def oppervlakte(self):
"""De oppervlakte van de markering zoals beschreven in de algemene omzendbrief."""
return self._oppervlakte.get_waarde()
@oppervlakte.setter
def oppervlakte(self, value):
self._oppervlakte.set_waarde(value, owner=self)
@property
def soortOmschrijving(self):
"""De soort en tevens de omschrijving van de figuratie markering."""
return self._soortOmschrijving.get_waarde()
@soortOmschrijving.setter
def soortOmschrijving(self, value):
self._soortOmschrijving.set_waarde(value, owner=self)
@property
def type(self):
"""Het type van figuratie markering."""
return self._type.get_waarde()
@type.setter
def type(self, value):
self._type.set_waarde(value, owner=self) | PypiClean |
/OctoBot-Commons-1.9.18.tar.gz/OctoBot-Commons-1.9.18/octobot_commons/tentacles_management/class_inspector.py | import inspect
import octobot_commons.logging as logging_util
def default_parent_inspection(element, parent):
"""
Check if the element bases has the specified parent
:param element: the element to check
:param parent: the expected parent
:return: the check result
"""
return parent in element.__bases__
def default_parents_inspection(element, parent):
"""
Check if the element has the specified parent
:param element: the element to check
:param parent: the expected parent
:return: the check result
"""
return parent in element.mro()
def evaluator_parent_inspection(element, parent):
"""
Recursively check if the evaluator class has the specified parent
:param element: the element to check
:param parent: the expected parent
:return: the check result
"""
return hasattr(
element, "get_parent_evaluator_classes"
) and element.get_parent_evaluator_classes(parent)
def trading_mode_parent_inspection(element, parent):
"""
Check if the trading class has the specified parent
:param element: the element to check
:param parent: the expected parent
:return: the check result
"""
return hasattr(
element, "get_parent_trading_mode_classes"
) and element.get_parent_trading_mode_classes(parent)
def get_class_from_parent_subclasses(class_string, parent):
"""
Search the class string in parent subclasses
:param class_string: the class name to search
:param parent: the parent
:return: the class if found else None
"""
for found in parent.__subclasses__():
if found.__name__ == class_string:
return found
return None
def get_deep_class_from_parent_subclasses(class_string, parent):
"""
Search for a class in parent subclasses "deeply"
:param class_string: the class name to search
:param parent: the expected parent
:return: the class if found else None
"""
found = get_class_from_parent_subclasses(class_string, parent)
if found is not None:
return found
for parent_class in parent.__subclasses__():
found = get_deep_class_from_parent_subclasses(class_string, parent_class)
if found is not None:
return found
return None
def get_class_from_string(
class_string: str,
parent,
module,
parent_inspection=default_parent_inspection,
error_when_not_found: bool = False,
):
"""
Search a class from a class string in a specified module for a specified parent
:param class_string: the class name to search
:param parent: the class expected parent
:param module: the class expected module
:param parent_inspection: the parent inspection
:param error_when_not_found: if errors should be raised
:return: the class if found else None
"""
if any(
m[0] == class_string
and hasattr(m[1], "__bases__")
and parent_inspection(m[1], parent)
for m in inspect.getmembers(module)
):
return getattr(module, class_string)
if error_when_not_found:
raise ModuleNotFoundError(f"Cant find {class_string} module")
return None
def is_abstract_using_inspection_and_class_naming(clazz):
"""
Check if a class is abstract
:param clazz: the class to check
:return: the check result
"""
return inspect.isabstract(clazz) or "abstract" in clazz.__name__.lower()
def get_all_classes_from_parent(parent_class) -> list:
"""
Get all sub classes from parent including multi level sub-classes
:param parent_class: the parent class
:return: the class from parent
"""
classes = []
for subclass in parent_class.__subclasses__():
if subclass.__subclasses__():
# append this subclass
classes.append(subclass)
# and all its subclasses
classes += get_all_classes_from_parent(subclass)
else:
classes.append(subclass)
return classes
def get_single_deepest_child_class(clazz) -> object:
"""
Get the single deepest child class
:param clazz: the class
:return: the single deepest child class
"""
children_classes = clazz.__subclasses__()
if len(children_classes) == 0:
return clazz
if len(children_classes) > 1:
logging_util.get_logger(__name__).error(
f"More than one child class of {clazz}, expecting one, "
f"using {children_classes[0]}"
)
return get_single_deepest_child_class(children_classes[0]) | PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/modeling/roi_heads/box_head/roi_box_predictors.py | from PTMD.maskrcnn_benchmark.modeling import registry
from torch import nn
@registry.ROI_BOX_PREDICTOR.register("FastRCNNPredictor")
class FastRCNNPredictor(nn.Module):
def __init__(self, config, in_channels):
super(FastRCNNPredictor, self).__init__()
assert in_channels is not None
num_inputs = in_channels
num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.cls_score = nn.Linear(num_inputs, num_classes)
num_bbox_reg_classes = 2 if config.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(num_inputs, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, mean=0, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001)
nn.init.constant_(self.bbox_pred.bias, 0)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
cls_logit = self.cls_score(x)
bbox_pred = self.bbox_pred(x)
return cls_logit, bbox_pred
@registry.ROI_BOX_PREDICTOR.register("FPNPredictor")
class FPNPredictor(nn.Module):
def __init__(self, cfg, in_channels):
super(FPNPredictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = in_channels
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = 2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes
self.bbox_pred = nn.Linear(representation_size, num_bbox_reg_classes * 4)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
if x.ndimension() == 4:
assert list(x.shape[2:]) == [1, 1]
x = x.view(x.size(0), -1)
scores = self.cls_score(x)
bbox_deltas = self.bbox_pred(x)
return scores, bbox_deltas
def make_roi_box_predictor(cfg, in_channels):
func = registry.ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR]
return func(cfg, in_channels) | PypiClean |
/Dash-ElectrumX-1.16.0.1.tar.gz/Dash-ElectrumX-1.16.0.1/electrumx/server/mempool.py | import itertools
import time
from abc import ABC, abstractmethod
from asyncio import Lock
from collections import defaultdict
from typing import Sequence, Tuple, TYPE_CHECKING, Type, Dict
import math
import attr
from aiorpcx import TaskGroup, run_in_thread, sleep
from electrumx.lib.hash import hash_to_hex_str, hex_str_to_hash
from electrumx.lib.util import class_logger, chunks
from electrumx.server.db import UTXO
if TYPE_CHECKING:
from electrumx.lib.coins import Coin
@attr.s(slots=True)
class MemPoolTx:
prevouts = attr.ib() # type: Sequence[Tuple[bytes, int]]
# A pair is a (hashX, value) tuple
in_pairs = attr.ib()
out_pairs = attr.ib()
fee = attr.ib()
size = attr.ib()
@attr.s(slots=True)
class MemPoolTxSummary:
hash = attr.ib()
fee = attr.ib()
has_unconfirmed_inputs = attr.ib()
class DBSyncError(Exception):
pass
class MemPoolAPI(ABC):
'''A concrete instance of this class is passed to the MemPool object
and used by it to query DB and blockchain state.'''
@abstractmethod
async def height(self):
'''Query bitcoind for its height.'''
@abstractmethod
def cached_height(self):
'''Return the height of bitcoind the last time it was queried,
for any reason, without actually querying it.
'''
@abstractmethod
def db_height(self):
'''Return the height flushed to the on-disk DB.'''
@abstractmethod
async def mempool_hashes(self):
'''Query bitcoind for the hashes of all transactions in its
mempool, returned as a list.'''
@abstractmethod
async def raw_transactions(self, hex_hashes):
'''Query bitcoind for the serialized raw transactions with the given
hashes. Missing transactions are returned as None.
hex_hashes is an iterable of hexadecimal hash strings.'''
@abstractmethod
async def lookup_utxos(self, prevouts):
'''Return a list of (hashX, value) pairs each prevout if unspent,
otherwise return None if spent or not found.
prevouts - an iterable of (hash, index) pairs
'''
@abstractmethod
async def on_mempool(self, touched, height):
'''Called each time the mempool is synchronized. touched is a set of
hashXs touched since the previous call. height is the
daemon's height at the time the mempool was obtained.'''
class MemPool:
'''Representation of the daemon's mempool.
coin - a coin class from coins.py
api - an object implementing MemPoolAPI
Updated regularly in caught-up state. Goal is to enable efficient
response to the calls in the external interface. To that end we
maintain the following maps:
tx: tx_hash -> MemPoolTx
hashXs: hashX -> set of all hashes of txs touching the hashX
'''
def __init__(self, coin: Type['Coin'], api: MemPoolAPI, refresh_secs=5.0, log_status_secs=60.0):
assert isinstance(api, MemPoolAPI)
self.coin = coin
self.api = api
self.logger = class_logger(__name__, self.__class__.__name__)
self.txs = {}
self.hashXs = defaultdict(set) # None can be a key
self.cached_compact_histogram = []
self.refresh_secs = refresh_secs
self.log_status_secs = log_status_secs
# Prevents mempool refreshes during fee histogram calculation
self.lock = Lock()
async def _logging(self, synchronized_event):
'''Print regular logs of mempool stats.'''
self.logger.info('beginning processing of daemon mempool. '
'This can take some time...')
start = time.monotonic()
await synchronized_event.wait()
elapsed = time.monotonic() - start
self.logger.info(f'synced in {elapsed:.2f}s')
while True:
mempool_size = sum(tx.size for tx in self.txs.values()) / 1_000_000
self.logger.info(f'{len(self.txs):,d} txs {mempool_size:.2f} MB '
f'touching {len(self.hashXs):,d} addresses')
await sleep(self.log_status_secs)
await synchronized_event.wait()
async def _refresh_histogram(self, synchronized_event):
while True:
await synchronized_event.wait()
async with self.lock:
# Threaded as can be expensive
await run_in_thread(self._update_histogram, 100_000)
await sleep(self.coin.MEMPOOL_HISTOGRAM_REFRESH_SECS)
def _update_histogram(self, bin_size):
# Build a histogram by fee rate
histogram = defaultdict(int)
for tx in self.txs.values():
fee_rate = tx.fee / tx.size
# use 0.1 sat/byte resolution
# note: rounding *down* is intentional. This ensures txs
# with a given fee rate will end up counted in the expected
# bucket/interval of the compact histogram.
fee_rate = math.floor(10 * fee_rate) / 10
histogram[fee_rate] += tx.size
compact = self._compress_histogram(histogram, bin_size=bin_size)
self.logger.info(f'compact fee histogram: {compact}')
self.cached_compact_histogram = compact
@classmethod
def _compress_histogram(
cls, histogram: Dict[float, int], *, bin_size: int
) -> Sequence[Tuple[float, int]]:
'''Calculate and return a compact fee histogram as needed for
"mempool.get_fee_histogram" protocol request.
histogram: feerate (sat/byte) -> total size in bytes of txs that pay approx feerate
'''
# Now compact it. For efficiency, get_fees returns a
# compact histogram with variable bin size. The compact
# histogram is an array of (fee_rate, vsize) values.
# vsize_n is the cumulative virtual size of mempool
# transactions with a fee rate in the interval
# [rate_(n-1), rate_n)], and rate_(n-1) > rate_n.
# Intervals are chosen to create tranches containing at
# least 100kb of transactions
assert bin_size > 0
compact = []
cum_size = 0
prev_fee_rate = None
for fee_rate, size in sorted(histogram.items(), reverse=True):
# if there is a big lump of txns at this specific size,
# consider adding the previous item now (if not added already)
if size > 2 * bin_size and prev_fee_rate is not None and cum_size > 0:
compact.append((prev_fee_rate, cum_size))
cum_size = 0
bin_size *= 1.1
# now consider adding this item
cum_size += size
if cum_size > bin_size:
compact.append((fee_rate, cum_size))
cum_size = 0
bin_size *= 1.1
prev_fee_rate = fee_rate
return compact
def _accept_transactions(self, tx_map, utxo_map, touched):
'''Accept transactions in tx_map to the mempool if all their inputs
can be found in the existing mempool or a utxo_map from the
DB.
Returns an (unprocessed tx_map, unspent utxo_map) pair.
'''
hashXs = self.hashXs
txs = self.txs
deferred = {}
unspent = set(utxo_map)
# Try to find all prevouts so we can accept the TX
for hash, tx in tx_map.items():
in_pairs = []
try:
for prevout in tx.prevouts:
utxo = utxo_map.get(prevout)
if not utxo:
prev_hash, prev_index = prevout
# Raises KeyError if prev_hash is not in txs
utxo = txs[prev_hash].out_pairs[prev_index]
in_pairs.append(utxo)
except KeyError:
deferred[hash] = tx
continue
# Spend the prevouts
unspent.difference_update(tx.prevouts)
# Save the in_pairs, compute the fee and accept the TX
tx.in_pairs = tuple(in_pairs)
# Avoid negative fees if dealing with generation-like transactions
# because some in_parts would be missing
tx.fee = max(0, (sum(v for _, v in tx.in_pairs) -
sum(v for _, v in tx.out_pairs)))
txs[hash] = tx
for hashX, _value in itertools.chain(tx.in_pairs, tx.out_pairs):
touched.add(hashX)
hashXs[hashX].add(hash)
return deferred, {prevout: utxo_map[prevout] for prevout in unspent}
async def _refresh_hashes(self, synchronized_event):
'''Refresh our view of the daemon's mempool.'''
# Touched accumulates between calls to on_mempool and each
# call transfers ownership
touched = set()
while True:
height = self.api.cached_height()
hex_hashes = await self.api.mempool_hashes()
if height != await self.api.height():
continue
hashes = {hex_str_to_hash(hh) for hh in hex_hashes}
try:
async with self.lock:
await self._process_mempool(hashes, touched, height)
except DBSyncError:
# The UTXO DB is not at the same height as the
# mempool; wait and try again
self.logger.debug('waiting for DB to sync')
else:
synchronized_event.set()
synchronized_event.clear()
await self.api.on_mempool(touched, height)
touched = set()
await sleep(self.refresh_secs)
async def _process_mempool(self, all_hashes, touched, mempool_height):
# Re-sync with the new set of hashes
txs = self.txs
hashXs = self.hashXs
if mempool_height != self.api.db_height():
raise DBSyncError
# First handle txs that have disappeared
for tx_hash in (set(txs) - all_hashes):
tx = txs.pop(tx_hash)
tx_hashXs = {hashX for hashX, value in tx.in_pairs}
tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
for hashX in tx_hashXs:
hashXs[hashX].remove(tx_hash)
if not hashXs[hashX]:
del hashXs[hashX]
touched |= tx_hashXs
# Process new transactions
new_hashes = list(all_hashes.difference(txs))
if new_hashes:
group = TaskGroup()
for hashes in chunks(new_hashes, 200):
coro = self._fetch_and_accept(hashes, all_hashes, touched)
await group.spawn(coro)
if mempool_height != self.api.db_height():
raise DBSyncError
tx_map = {}
utxo_map = {}
async for task in group:
deferred, unspent = task.result()
tx_map.update(deferred)
utxo_map.update(unspent)
prior_count = 0
# FIXME: this is not particularly efficient
while tx_map and len(tx_map) != prior_count:
prior_count = len(tx_map)
tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map,
touched)
if tx_map:
self.logger.error(f'{len(tx_map)} txs dropped')
return touched
async def _fetch_and_accept(self, hashes, all_hashes, touched):
'''Fetch a list of mempool transactions.'''
hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
raw_txs = await self.api.raw_transactions(hex_hashes_iter)
def deserialize_txs(): # This function is pure
to_hashX = self.coin.hashX_from_script
deserializer = self.coin.DESERIALIZER
txs = {}
for hash, raw_tx in zip(hashes, raw_txs):
# The daemon may have evicted the tx from its
# mempool or it may have gotten in a block
if not raw_tx:
continue
tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
# Convert the inputs and outputs into (hashX, value) pairs
# Drop generation-like inputs from MemPoolTx.prevouts
txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
for txin in tx.inputs
if not txin.is_generation())
txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
for txout in tx.outputs)
txs[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
0, tx_size)
return txs
# Thread this potentially slow operation so as not to block
tx_map = await run_in_thread(deserialize_txs)
# Determine all prevouts not in the mempool, and fetch the
# UTXO information from the database. Failed prevout lookups
# return None - concurrent database updates happen - which is
# relied upon by _accept_transactions. Ignore prevouts that are
# generation-like.
prevouts = tuple(prevout for tx in tx_map.values()
for prevout in tx.prevouts
if prevout[0] not in all_hashes)
utxos = await self.api.lookup_utxos(prevouts)
utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)}
return self._accept_transactions(tx_map, utxo_map, touched)
#
# External interface
#
async def keep_synchronized(self, synchronized_event):
'''Keep the mempool synchronized with the daemon.'''
async with TaskGroup() as group:
await group.spawn(self._refresh_hashes(synchronized_event))
await group.spawn(self._refresh_histogram(synchronized_event))
await group.spawn(self._logging(synchronized_event))
async def balance_delta(self, hashX):
'''Return the unconfirmed amount in the mempool for hashX.
Can be positive or negative.
'''
value = 0
if hashX in self.hashXs:
for hash in self.hashXs[hashX]:
tx = self.txs[hash]
value -= sum(v for h168, v in tx.in_pairs if h168 == hashX)
value += sum(v for h168, v in tx.out_pairs if h168 == hashX)
return value
async def compact_fee_histogram(self):
'''Return a compact fee histogram of the current mempool.'''
return self.cached_compact_histogram
async def potential_spends(self, hashX):
'''Return a set of (prev_hash, prev_idx) pairs from mempool
transactions that touch hashX.
None, some or all of these may be spends of the hashX, but all
actual spends of it (in the DB or mempool) will be included.
'''
result = set()
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs[tx_hash]
result.update(tx.prevouts)
return result
async def transaction_summaries(self, hashX):
'''Return a list of MemPoolTxSummary objects for the hashX.'''
result = []
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs[tx_hash]
has_ui = any(hash in self.txs for hash, idx in tx.prevouts)
result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui))
return result
async def unordered_UTXOs(self, hashX):
'''Return an unordered list of UTXO named tuples from mempool
transactions that pay to hashX.
This does not consider if any other mempool transactions spend
the outputs.
'''
utxos = []
for tx_hash in self.hashXs.get(hashX, ()):
tx = self.txs.get(tx_hash)
for pos, (hX, value) in enumerate(tx.out_pairs):
if hX == hashX:
utxos.append(UTXO(-1, pos, tx_hash, 0, value))
return utxos | PypiClean |
/Hyperion_obf-1.0.0.tar.gz/Hyperion_obf-1.0.0/src/hyperion_obf/__init__.py | from zlib import compress
from random import choice, shuffle, randint
from re import findall
from io import BytesIO
from tokenize import tokenize, untokenize, TokenInfo
from binascii import hexlify
from builtins import *
builtglob = list(globals().keys())
__title__: str = 'Hyperion_obf'
__author__: str = 'BillyTheGoat356 & BlueRed (module created by LeRatGondin)'
__version__: str = '1.0.0'
__licence__: str = 'EPL-2'
__doc__: str = 'https://pypi.org/project/Hyperion_obf/'
r"""
|:| # Hyperion_Obf
|:| ===========
|:| PyBayfile is a Python module that implement the script "https://github.com/billythegoat356/Hyperion"
|:|
|:| exemple :
|:| import Hyperion_obf
|:|
|:| # obfuscate a simple code
|:| print(Hyperion_obf.obfuscate("print('Hello, world!')"))
"""[1:-1]
class Hyperion:
def __init__(self, content: str, clean=True, obfcontent=True, renlibs=True, renvars=True, addbuiltins=True, randlines=True, shell=True, camouflate=True, safemode=True, ultrasafemode=False) -> None:
if ultrasafemode == True:
randlines, shell, renlibs, renvars = False, False, False, False
self.content = "exec('')\n\n" + content
self.camouflate = camouflate
self.add_imports = []
self.impcontent2 = []
self.safemode = safemode
if addbuiltins:
self.AddBuiltins()
self.CreateVars()
if renlibs:
valid = self.RenameImports()
if renvars and valid:
self.RenameVars()
self.strings = {}
if obfcontent:
self.ObfContent()
if clean:
self.CleanCode()
if not self._verify_lin(content):
randlines, shell = False, False
if randlines:
self.RandLines()
if shell:
self.Shell()
self.Organise()
self.AntiSkid()
if clean:
self.CleanCode()
self.Compress()
if camouflate:
self.Camouflate()
else:
self.content = ';'.join(self.content)
# Layers
def AntiSkid(self):
if self.camouflate:
self.content = fr"""
# GG! You just deobfuscated a file obfuscated with Hyperion
# Congratulations!
# https://github.com/billythegoat356/Hyperion
# by billythegoat356 and BlueRed
# Module created by LeRatGondin
{self.content}""".strip()
def AddBuiltins(self):
imp = "from builtins import " + ','.join(f'{var}' for var in builtglob if not var.startswith(
'__') and var not in ('None', 'True', 'False') and f'{var}(' in self.content) + '\n'
if imp == "from builtins import \n":
imp = ""
self.content = imp + self.content
def CreateVars(self):
self.globals = self._randvar()
self.locals = self._randvar()
self.vars = self._randvar()
self.__import__ = self._randvar()
imports = self._to_import
impcontent = """
{0}()['{1}']=locals
{1}()['{2}']=__import__
{0}()['{3}']={2}('builtins').vars"""[1:].format(self.globals, self.locals, self.__import__, self.vars, self.unhexlify).splitlines()
nimpcontent = [
f"{self._randglob()}()['{imports[imp]}']={imp}" for imp in imports]
shuffle(nimpcontent)
impcontent.extend(iter(nimpcontent))
self.local_import = f"locals()['{self.globals}']=globals"
self.impcontent = impcontent
def RenameImports(self):
_imports = self._gather_imports()
if _imports == False:
return False
imports = []
for imp in _imports:
imports.extend(iter(imp))
self.imports = {}
for imp in imports:
self.imports[imp] = self._randvar()
impcontent = [
f"{self._randglob()}()['{self.imports[imp]}']={self._randglob()}()[{self._protect(imp)}]" for imp in self.imports]
shuffle(impcontent)
self.add_imports = [
lin for lin in self.content.splitlines() if self._is_valid(lin)]
self.content = '\n'.join(
lin for lin in self.content.splitlines() if lin not in self.add_imports)
self.impcontent2 = iter(impcontent)
return True
def RenameVars(self):
f = BytesIO(self.content.encode('utf-8'))
self.tokens = list(tokenize(f.readline))
strings = {}
ntokens = []
passed = []
for token in self.tokens:
string, type = token.string, token.type
if type == 1:
if (
((self.tokens[self.tokens.index(token)+1].string == '=' and self._is_not_arg(string)) or
self.tokens[self.tokens.index(token)-1].string in ('def', 'class')) and
self._check_fstring(string) and
self._is_not_library(token=token) and
string not in passed and
string not in self.imports and
(not string.startswith('__') and not string.endswith('__'))
):
string = self._randvar()
strings[token.string] = string
elif string in strings and self._is_not_library(token=token) and self.tokens[self.tokens.index(token)+1].string != '=':
string = strings[string]
elif string in self.imports and self._is_exact_library(token=token):
if ((self.tokens[self.tokens.index(token)+1].string != '=') and
self.tokens[self.tokens.index(token)-1].string not in ('def', 'class')):
string = self.imports[string]
else:
passed.append(string)
ntokens.append(
TokenInfo(type, string, token.start, token.end, token.line))
self.content = untokenize(ntokens).decode('utf-8')
def ObfContent(self):
f = BytesIO(self.content.encode('utf-8'))
self.tokens = list(tokenize(f.readline))
# input('\n'.join(str(tok) for tok in self.tokens))
ntokens = []
for token in self.tokens:
string, type = token.string, token.type
if type == 1:
if string in ('True', 'False'):
string = self._obf_bool(string)
elif type == 2:
string = self._obf_int(string)
elif type == 3:
string = self._obf_str(string)
ntokens.append(
TokenInfo(type, string, token.start, token.end, token.line))
self.ostrings = self.strings
self.lambdas = []
self._add_lambdas()
strings = [f"{self.vars}()[{self._protect(var)}]={value}" for var,
value in self.strings.items()]
shuffle(strings)
self.strings = strings
self.content = untokenize(ntokens).decode('utf-8')
def CleanCode(self):
self.RemoveComments()
self.CompressCode()
def RandLines(self):
content = []
lines = self.content.splitlines()
for lin, nextlin in zip(lines, range(len(lines))):
content.append(lin)
if (
nextlin == len(lines)-1 or
self._get_first_statement(lines[nextlin+1]) in ('elif', 'else', 'except', 'finally') or
lin.strip()[-1] == ','
):
continue
fakelin = self._fake_lin(self._get_indentations(lines[nextlin+1]))
content.append(fakelin)
self.content = '\n'.join(content)
def Shell(self):
chunks = self._get_chunks()
chunks = [
f"{self._protect_var(self.exec)}({self._protect(chunk, r=1)})" for chunk in chunks]
chunks = [
f"""{self._protect_var(self.eval)}({self._protect_var(self.compile)}({self._protect(chunk, char=2)},filename={self._protect(self._randvar())},mode={self._protect('eval')}))""" for chunk in chunks]
self.content = '\n'.join(chunks)
def Organise(self):
gd_vars = [f"{self.globals}()[{self._protect(self.getattr, basic=True, )}]=getattr",
f"{self.globals}()[{self._protect(self.dir, basic=True)}]=dir"]
shuffle(gd_vars)
exec_var = f"{self.globals}()[{self._protect(self.exec)}]={self._protect_built('exec')}"
add_imports = [
f"{self.globals}()[{self._protect(self.exec)}]({self._protect(imp.strip())})" for imp in self.add_imports]
self.content = self.local_import + '\n' + '\n'.join(gd_vars) + '\n' + '\n'.join(self.impcontent) + '\n' + exec_var + '\n' + '\n'.join(
add_imports) + '\n' + '\n'.join(self.impcontent2) + '\n' + '\n'.join(self.strings) + '\n' + self.content
def Compress(self):
eval_var = f"globals()['{self._hex('eval')}']"
str_var = f"globals()['{self._hex('str')}']"
compile_var = f"globals()['{self._hex('compile')}']"
arg1, arg2 = self._randvar(), self._randvar()
lambda1 = f"""(lambda {arg1}:{eval_var}({compile_var}({str_var}("{self._hex(eval_var)}({arg1})"),filename='{self._hex(self._randvar())}',mode='{self._hex('eval')}')))"""
lambda2 = f"(lambda {arg1}:{arg1}(__import__('{self._hex('zlib')}')))"
lambda3 = f"(lambda {arg1}:{arg1}['{self._hex('decompress')}'])"
lambdas = [lambda1, lambda2, lambda3]
lambda4 = f"""(lambda {arg2},{arg1}:{arg2}({arg1}))"""
lambda5 = f"""(lambda:{lambda1}('{self._hex("__import__('builtins').exec")}'))"""
lambdas2 = [lambda4, lambda5]
shuffle(lambdas)
shuffle(lambdas2)
keys = {lamb: self._randvar() for lamb in lambdas}
keys2 = {lamb: self._randvar() for lamb in lambdas2}
compressed = self._compress(self.content)
if self.camouflate:
self.compressed = compressed
compressed = "RANDOMVARS"
decompress = f"{keys[lambda3]}({keys[lambda2]}({keys[lambda1]}('{self._hex('vars')}')))"
exec_content = f"{keys2[lambda5]}()({keys2[lambda4]}({decompress},{compressed}))"
all_keys = keys
all_keys.update(keys2)
self.content = ['from builtins import *', ','.join(
all_keys.values()) + '=' + ','.join(all_keys.keys()), exec_content]
def Camouflate(self):
self.gen = gen = []
content = self.content
for _ in range(25):
self._gen_var()
compressed = self._split_content(self.compressed, n=2500)
bvars = {self._randvar(): var for var in compressed}
vars = [f"{self._rand_pass()}{' ' * 250};{gen[0]}.{gen[19]}({gen[21]}='{a}',{gen[22]}={b})" for a,
b in bvars.items()]
vars = '\n\n'.join(' ' * 8 + var for var in vars)
actions = ('!=', 'is', '==', '<', '>', '>=', '<=')
keys = ('',)
ext = ('((var1, var2) for var2 in var3)', 'var1 if action else action2',
'((var2, var1) for var2 in var3 if action)', '(var1 or var2 if var1 and var2 else ... or (var2, var1))')
def generate(): return [
'{%s: %s}' % (tuple(
choice(
[repr(self._randvar2()), *gen[11:17]]
) for _ in range(2)
)),
('(' + ', '.join(f'var{num + 1}' for num in range(randint(2, 3))) + ')').replace(
'var1', choice(gen[11:17])
).replace(
'var2', choice(gen[11:17])
).replace(
'var3', choice(gen[11:17])
).replace(
'var4', choice(gen[11:17])
)
]
gen2 = generate()
for _ in range(int((20 / 2) - 1)):
gen2.extend(generate())
rands = [
'\n' + (' ' * (4 * 2)) + 'try:\n' + ' ' * 3 + self._rand_gen(ext, keys, gen, gen2, actions) + '\n\n' + (' ' * (4 * 2)) + f'except {self._rand_error()}:\n' + ' ' * 3 + self._rand_gen(
ext, keys, gen, gen2, actions) + '\n\n' + (' ' * (4 * 2)) + f'except:\n' + ' ' * 3 + f"{gen[24]}({self._rand_int()} {self._rand_op()} {self._rand_int()}) == {self._rand_type()}"
for _ in range(4)
]
randomvars = '+'.join(
f"{gen[0]}.{gen[18]}({gen[20]}='{var}')" for var in bvars)
sourcery = "# sourcery skip: collection-to-bool, remove-redundant-boolean, remove-redundant-except-handler"
self.content = f"""
{content[0]}
from math import prod as {gen[5]}
__code__ = 'print("Hello world!")'
{gen[11]}, {gen[12]}, {gen[13]}, {gen[14]}, {gen[15]}, {gen[17]}, {gen[24]} = exec, str, tuple, map, ord, globals, type
class {gen[0]}:
def __init__(self, {gen[4]}):
self.{gen[3]} = {gen[5]}(({gen[4]}, {self._rand_int()}))
self.{gen[1]}({gen[6]}={self._rand_int()})
def {gen[1]}(self, {gen[6]} = {self._rand_type()}):
{sourcery}
self.{gen[3]} {self._rand_op()}= {self._rand_int()} {self._rand_op()} {gen[6]}
{rands[0]}
def {gen[2]}(self, {gen[7]} = {self._rand_int()}):
{sourcery}
{gen[7]} {self._rand_op()}= {self._rand_int()} {self._rand_op()} {self._rand_int()}
self.{gen[8]} != {self._rand_type()}
{rands[1]}
def {gen[18]}({gen[20]} = {self._rand_type()}):
return {gen[17]}()[{gen[20]}]
def {gen[19]}({gen[21]} = {self._rand_int()} {self._rand_op()} {self._rand_int()}, {gen[22]} = {self._rand_type()}, {gen[23]} = {gen[17]}):
{sourcery}
{gen[23]}()[{gen[21]}] = {gen[22]}
{rands[2]}
def execute(code = str):
return {gen[11]}({gen[12]}({gen[13]}({gen[14]}({gen[15]}, code))))
@property
def {gen[8]}(self):
self.{gen[9]} = '<__main__.{choice(gen)} object at 0x00000{randint(1000, 9999)}BE{randint(10000, 99999)}>'
return (self.{gen[9]}, {gen[0]}.{gen[8]})
if __name__ == '__main__':
try:
{gen[0]}.execute(code = __code__)
{gen[10]} = {gen[0]}({gen[4]} = {self._rand_int()} {self._rand_op()} {self._rand_int()})
{vars}
{self._rand_pass()}{' ' * 250};{content[1]}
{self._rand_pass()}{' ' * 250};{content[2].replace("RANDOMVARS", randomvars)}
except Exception as {gen[16]}:
if {self._rand_bool(False)}:
{gen[0]}.execute(code = {gen[12]}({gen[16]}))
elif {self._rand_bool(False)}:
{self._rand_pass(line = False)}
""".strip()
# Exceptions
class StarImport(Exception):
def __init__(self):
super().__init__("Star Import is forbidden, please update your script")
# All
def _verify_lin(self, content):
return all(lin.strip() not in ['(', '[', '{', '}', ']', ')'] for lin in content.splitlines())
def _hex(self, var):
return ''.join(f"\\x{hexlify(char.encode('utf-8')).decode('utf-8')}" for char in var)
def _randvar(self):
return choice((
''.join(choice(('l', 'I')) for _ in range(randint(17, 25))),
'O' + ''.join(choice(('O', '0', 'o'))
for _ in range(randint(17, 25))),
''.join(choice(('D', 'O', 'o')) for _ in range(randint(17, 25))),
'S' + ''.join(choice(('S', '2')) for _ in range(randint(17, 25))),
''.join(choice(('M', 'N')) for _ in range(randint(17, 25))),
''.join(choice(('m', 'n')) for _ in range(randint(17, 25))),
''.join(choice(('X', 'W')) for _ in range(randint(17, 25))),
''.join(choice(('x', 'w')) for _ in range(randint(17, 25))),
''.join(choice(('J', 'I', 'L')) for _ in range(randint(17, 25))),
''.join(choice(('j', 'i', 'l')) for _ in range(randint(17, 25)))
))
def _randvar2(self):
return ''.join(choice('billythegoat356BlueRed') for _ in range(randint(5, 20)))
def _randglob(self):
return choice((
self.globals,
self.locals,
self.vars
))
def _protect(self, var, basic=False, r=0, char=1):
char = "'" if char == 1 else '"'
if basic:
return f"{char}{''.join(reversed(var))}{char}[::+-+-(-(+1))]"
if type(var) == int:
return self._adv_int(var)
if r == 0:
r = randint(1, 2)
if r == 1:
return f"{self.unhexlify}({hexlify(var.encode('utf-8'))}).decode({self.utf8})"
else:
return f"{char}{''.join(reversed(var))}{char}[::+-+-(-(+{self._protect(1, basic=basic)}))]"
def _protect_built(self, var, lib='builtins'):
protected = self._protect(lib, r=2, basic=True)
return f"{self.getattr}({self.__import__}({protected}),{self.dir}({self.__import__}({protected}))[{self.dir}({self.__import__}({protected})).index({self._protect(var, r=2, basic=True)})])"
# CreateVars
@property
def _to_import(self):
self.dir = self._randvar()
self.getattr = self._randvar()
self.exec = self._randvar()
self.eval = self._randvar()
self.compile = self._randvar()
self.join = self._randvar()
self.true = self._randvar()
self.false = self._randvar()
self.bool = self._randvar()
self.str = self._randvar()
self.float = self._randvar()
self.unhexlify = self._randvar()
imports = {
self._protect_built('eval'): self.eval,
self._protect_built('compile'): self.compile,
"''.join": self.join,
self._protect_built('True'): self.true,
self._protect_built('False'): self.false,
self._protect_built('bool'): self.bool,
self._protect_built('str'): self.str,
self._protect_built('float'): self.float,
self._protect_built('unhexlify', lib='binascii'): self.unhexlify,
}
return imports
@property
def utf8(self):
return self._protect('utf8', basic=True, r=2)
def _gather_imports(self):
imports = [lin for lin in self.content.splitlines()
if self._is_valid(lin)]
for imp in imports:
if '*' in imp:
return False
return [imp.replace('import ', ',').replace('from ', '').replace(' ', '').split(',')[1:] if 'from' in imp else imp.replace('import ', '').replace(' ', '').split(',') for imp in imports]
def _is_valid(self, lin: str):
return ('import' in lin and '"' not in lin and "'" not in lin and ';' not in lin and '.' not in lin and '#' not in lin)
def _is_not_arg(self, string):
if not self.safemode:
return True
funcs = self._gather_funcs
for lin in self.content.splitlines():
if string in lin:
for imp in self.imports.keys():
if imp in lin and '=' in lin and lin.index(imp) < lin.index('='):
return False
return all(string.lower() not in func for func in funcs)
def _check_fstring(self, string):
fstrings = findall(
r'{[' + self._fstring_legal_chars + r']*}', self.content.lower())
return all(string.lower() not in fstring for fstring in fstrings)
def _is_not_library(self, token: str):
while True:
if self.tokens[self.tokens.index(token)-1].string == '.':
token = self.tokens[self.tokens.index(token)-2]
else:
break
return token.string not in self.imports
def _is_exact_library(self, token: str):
ntoken = token
while True:
if self.tokens[self.tokens.index(token)-1].string == '.':
token = self.tokens[self.tokens.index(token)-2]
else:
break
return ntoken == token
@property
def _gather_funcs(self):
lins = [lin.strip().split('(')[1] for lin in self.content.splitlines()
if lin.strip().split(' ')[0] == 'def']
return lins
@property
def _fstring_legal_chars(self):
return """abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUV_WXYZ0123456789/*-+. ,/():"'"""
def _obf_bool(self, string):
if string == 'False':
obf = f'not({self.bool}({self.str}({self.false})))'
elif string == 'True':
obf = f'{self.bool}((~{self.false})or(({self.true})and({self.false})))'
string = self._randvar()
while string in self.strings:
string = self._randvar()
self.strings[string] = obf
return string
def _obf_int(self, string):
if string.isdigit():
obf = self._adv_int(int(string))
elif string.replace('.', '').isdigit():
obf = f"{self.float}({self._protect(string)})"
else:
return string
string = self._randvar()
while string in self.strings:
string = self._randvar()
self.strings[string] = obf
return string
def _obf_str(self, string):
obf, do = self._adv_str(string)
if do:
string = self._randvar()
while string in self.strings:
string = self._randvar()
self.strings[string] = obf
else:
string = obf
return string
def _adv_int(self, string):
n = choice((1, 2))
if n == 1:
rnum = randint(1000000, 9999999999)
x = rnum - string
return f"{self.eval}({self._protect(f'{self._underscore_int(rnum)}+(-{self._underscore_int(x)})')})"
elif n == 2:
rnum = randint(0, string)
x = string - rnum
return f"{self.eval}({self._protect(f'{self._underscore_int(x)}-(-{self._underscore_int(rnum)})')})"
def _adv_str(self, string):
var = f"""{self.eval}({self._protect(string, r=1)})"""
if (string.replace('b', '').replace('u', '').replace('r', '').replace('f', '')[0] == '"' and string.split('"')[0].count('f') != 0) or (string.replace('b', '').replace('u', '').replace('r', '').replace('f', '')[0] == "'" and string.split("'")[0].count('f') != 0):
return var, False
return var, True
def _underscore_int(self, string):
return '_'.join(str(string)).replace('-_', '-').replace('+_', '+')
def RemoveComments(self):
self.content = "".join(lin + '\n' for lin in self.content.splitlines()
if lin.strip() and not lin.strip().startswith('#'))
def CompressCode(self):
content = self.content
while True:
for x in ('=', '(', ')', '[', ']', '{', '}', '*', '+', '-', '/', ':', '<', '>', ','):
content = content.replace(f' {x}', x).replace(f'{x} ', x)
if content == self.content:
break
self.content = content
def _get_indentations(self, lin):
i = 0
for x in lin:
if x == ' ':
i += 1
else:
break
return i
def _get_first_statement(self, lin):
s = ''
for x in lin.strip():
if x.lower() in 'abcdefghijklmnopqrstuvwxyz':
s += x
else:
break
return s
def _add_lambdas(self):
for _ in range(10):
lamb = self._randvar()
arg = self._randvar()
self.strings[lamb] = f'lambda {arg}:{self._randglob()}()'
self.lambdas.append(lamb)
def _fake_lin(self, indent):
return f"{' ' * indent}if {choice(list(self.ostrings.keys()))}:\n{' ' * indent * 2 if indent else ' '}{choice(self.lambdas)}({choice(list(self.ostrings.keys()))})"
def _get_chunks(self):
chunks = []
lines = self.content.splitlines()
chunk = []
for lin, nextlin in zip(lines, range(len(lines))):
chunk.append(lin)
if nextlin+1 == len(lines):
break
if (
self._get_indentations(lines[nextlin+1]) == 0 and
self._get_first_statement(lines[nextlin+1]) not in ('elif', 'else', 'except', 'finally') and
lin.strip()[-1] != ','
):
chunks.append('\n'.join(chunk))
chunk = []
if chunk:
chunks.append('\n'.join(chunk))
return chunks
def _protect_var(self, var):
return f"{self._randglob()}()[{self._protect(var)}]"
def _compress(self, content):
return compress(content.encode('utf-8'))
def _gen_var(self):
var = choice(self._gen_vars)
while var in self.gen:
var = choice(self._gen_vars)
self.gen.append(var)
return var
def _rand_type(self):
return choice(('type', 'None', 'Ellipsis', 'True', 'False', 'str', 'int', 'float', 'bool'))
def _rand_int(self):
return randint(-100000, 100000)
def _rand_op(self):
return choice(('+', '-', '*', '/'))
def _rand_pass(self, line=True):
gen = self.gen
a1 = f"{gen[0]}({gen[4]} = {self._rand_int()} {self._rand_op()} {self._rand_int()})"
c1 = f"{gen[2]}({gen[7]} = {self._rand_int()} {self._rand_op()} {gen[10]}.{gen[3]})"
c2 = f"{gen[1]}({gen[6]} = {gen[10]}.{gen[3]} {self._rand_op()} {self._rand_int()})"
chosen = choice((
f"{gen[10]}.{c1}",
f"{gen[10]}.{c2}",
f"{a1}.{c1}",
f"{a1}.{c2}"
))
return self._rand_line(chosen) if line else chosen
def _rand_line(self, chosen):
if randint(1, 2) == 1:
return chosen
c2 = self._rand_pass(line=False)
final = f"""
if {self._rand_bool(False)}:
{chosen}
elif {self._rand_bool(True)}:
{c2}
""".strip()
return final
def _rand_bool(self, op):
op = '<' if op == True else '>'
return f"{randint(100000, 499999)} {op} {randint(500000, 9999999)}"
def _split_content(self, content, n=500):
ncontent = []
while content:
if len(content) > n:
ncontent.append(content[:n])
else:
ncontent.append(content)
break
content = content[n:]
return ncontent
def _rand_gen(self, ext, keys, gen, gen2, actions):
return ' '.join([
choice(keys),
choice(
ext
).replace('action2', ' '.join([gen2[randint(11, 17)], choice(actions), gen[randint(11, 17)]])).replace(
'var1', gen2[randint(11, 17)]
).replace(
'var2', choice(gen[11:17])
).replace(
'var3', gen2[randint(11, 17)]
).replace('action', ' '.join([gen[randint(11, 17)], choice(actions), gen[randint(11, 17)]])).replace(
'var1', gen2[randint(11, 17)]
).replace(
'var2', gen2[randint(11, 17)]
).replace(
'var3', gen2[randint(11, 17)]
)
]).strip()
def _rand_error(self):
return choice((
'OSError',
'TypeError',
'ArithmeticError',
'AssertionError',
'AttributeError'
))
@property
def _gen_vars(self):
gen = [
'MemoryAccess', 'StackOverflow', 'System',
'Divide', 'Product', 'CallFunction',
'Math', 'Calculate', 'Hypothesis',
'Frame', 'DetectVar', 'Substract',
'Theory', 'Statistics', 'Random',
'Round', 'Absolute', 'Negative',
'Algorithm', 'Run', 'Builtins',
'Positive', 'Invert', 'Square',
'Add', 'Multiply', 'Modulo',
'Power', 'Floor', 'Ceil',
'Cube', 'Walk', 'While',
]
_gen = list(gen)
gen.extend(f'_{g.lower()}' for g in _gen)
return gen
def obfuscate(file: str = None, script: str = None, skiprenaming: bool = False, skipchunks: bool = False) -> str:
"""
Obfuscate a file.
:param file: The name of the file you want to be obfuscated.
:param script: The script to obfuscate.
:param skiprenaming: If true all the variables won't be renamed
:param skipchunks: If true the script will skip the protection of chunks
"""
if file:
script = open(file, mode='rb').read().decode('utf-8')
renvars, renlibs = (False, False) if skiprenaming else (True, True)
randlines, shell = (False, False) if skipchunks else (True, True)
Hype = Hyperion(content=script, renvars=renvars,
renlibs=renlibs, randlines=randlines, shell=shell)
script = Hype.content
return script | PypiClean |
/Camelot-13.04.13-gpl-pyqt.tar.gz/Camelot-13.04.13-gpl-pyqt/camelot/view/action_steps/select_file.py |
from PyQt4 import QtGui, QtCore
from camelot.admin.action import ActionStep
from camelot.view.action_runner import hide_progress_dialog
from camelot.core.exception import CancelRequest
class SelectFile( ActionStep ):
"""Select one or more files to open or to process.
:param file_name_filter: Filter on the names of the files that can
be selected, such as 'All files (*)'.
See :class:`QtGui.QFileDialog` for more documentation.
.. attribute:: single
defaults to :const:`True`, set to :const:`False` if selection
of multiple files is allowed
.. attribute:: existing
defaults to :const:`True`, set to :const:`False` if non existing
files are allowed (to save something)
The :keyword:`yield` statement of :class:`SelectFile` returns a list
of selected file names. This list has only one element when single is
set to :const:`True`. Raises a
:class:`camelot.core.exception.CancelRequest` when no file was selected.
.. image:: /_static/actionsteps/select_file.png
This action step stores its last location into the :class:`QtCore.QSettings`
and uses it as the initial location the next time it is invoked.
"""
def __init__( self, file_name_filter = '' ):
self.file_name_filter = file_name_filter
self.single = True
self.existing = True
def render( self, directory = None ):
"""create the file dialog widget. this method is used to unit test
the action step.
:param directory: the directory in which to open the dialog, None to
use the default
"""
dialog = QtGui.QFileDialog( filter = self.file_name_filter,
directory = (None or '') )
if self.existing == False:
file_mode = QtGui.QFileDialog.AnyFile
else:
if self.single == True:
file_mode = QtGui.QFileDialog.ExistingFile
else:
file_mode = QtGui.QFileDialog.ExistingFiles
dialog.setFileMode( file_mode )
return dialog
def gui_run( self, gui_context ):
settings = QtCore.QSettings()
directory = settings.value( 'datasource' ).toString()
dialog = self.render( directory )
with hide_progress_dialog( gui_context ):
if dialog.exec_() == QtGui.QDialog.Rejected:
raise CancelRequest()
file_names = [unicode(fn) for fn in dialog.selectedFiles()]
if file_names:
settings.setValue( 'datasource', QtCore.QVariant( file_names[0] ) )
return file_names | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/cpu_percent.py | from glances.logger import logger
from glances.timer import Timer
from glances.compat import FileNotFoundError, PermissionError
import psutil
class CpuPercent(object):
"""Get and store the CPU percent."""
def __init__(self, cached_timer_cpu=3):
self.cpu_info = {'cpu_name': None, 'cpu_hz_current': None, 'cpu_hz': None}
self.cpu_percent = 0
self.percpu_percent = []
# Get CPU name
self.__get_cpu_name()
# cached_timer_cpu is the minimum time interval between stats updates
# since last update is passed (will retrieve old cached info instead)
self.cached_timer_cpu = cached_timer_cpu
self.timer_cpu = Timer(0)
self.timer_percpu = Timer(0)
# psutil.cpu_freq() consumes lots of CPU
# So refresh the stats every refresh*2 (6 seconds)
self.cached_timer_cpu_info = cached_timer_cpu * 2
self.timer_cpu_info = Timer(0)
def get_key(self):
"""Return the key of the per CPU list."""
return 'cpu_number'
def get(self, percpu=False):
"""Update and/or return the CPU using the psutil library.
If percpu, return the percpu stats"""
if percpu:
return self.__get_percpu()
else:
return self.__get_cpu()
def get_info(self):
"""Get additional information about the CPU"""
# Never update more than 1 time per cached_timer_cpu_info
if self.timer_cpu_info.finished() and hasattr(psutil, 'cpu_freq'):
# Get the CPU freq current/max
try:
cpu_freq = psutil.cpu_freq()
except Exception as e:
logger.debug('Can not grab CPU information ({})'.format(e))
else:
if hasattr(cpu_freq, 'current'):
self.cpu_info['cpu_hz_current'] = cpu_freq.current
else:
self.cpu_info['cpu_hz_current'] = None
if hasattr(cpu_freq, 'max'):
self.cpu_info['cpu_hz'] = cpu_freq.max
else:
self.cpu_info['cpu_hz'] = None
# Reset timer for cache
self.timer_cpu_info.reset(duration=self.cached_timer_cpu_info)
return self.cpu_info
def __get_cpu_name(self):
# Get the CPU name once from the /proc/cpuinfo file
# @TODO: Multisystem...
try:
self.cpu_info['cpu_name'] = open('/proc/cpuinfo', 'r').readlines()[4].split(':')[1].strip()
except (FileNotFoundError, PermissionError, IndexError, KeyError, AttributeError):
self.cpu_info['cpu_name'] = 'CPU'
return self.cpu_info['cpu_name']
def __get_cpu(self):
"""Update and/or return the CPU using the psutil library."""
# Never update more than 1 time per cached_timer_cpu
if self.timer_cpu.finished():
self.cpu_percent = psutil.cpu_percent(interval=0.0)
# Reset timer for cache
self.timer_cpu.reset(duration=self.cached_timer_cpu)
return self.cpu_percent
def __get_percpu(self):
"""Update and/or return the per CPU list using the psutil library."""
# Never update more than 1 time per cached_timer_cpu
if self.timer_percpu.finished():
self.percpu_percent = []
for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
cpu = {
'key': self.get_key(),
'cpu_number': cpu_number,
'total': round(100 - cputimes.idle, 1),
'user': cputimes.user,
'system': cputimes.system,
'idle': cputimes.idle,
}
# The following stats are for API purposes only
if hasattr(cputimes, 'nice'):
cpu['nice'] = cputimes.nice
if hasattr(cputimes, 'iowait'):
cpu['iowait'] = cputimes.iowait
if hasattr(cputimes, 'irq'):
cpu['irq'] = cputimes.irq
if hasattr(cputimes, 'softirq'):
cpu['softirq'] = cputimes.softirq
if hasattr(cputimes, 'steal'):
cpu['steal'] = cputimes.steal
if hasattr(cputimes, 'guest'):
cpu['guest'] = cputimes.guest
if hasattr(cputimes, 'guest_nice'):
cpu['guest_nice'] = cputimes.guest_nice
# Append new CPU to the list
self.percpu_percent.append(cpu)
# Reset timer for cache
self.timer_percpu.reset(duration=self.cached_timer_cpu)
return self.percpu_percent
# CpuPercent instance shared between plugins
cpu_percent = CpuPercent() | PypiClean |
/Gbtestapi0.2-0.1a10.tar.gz/Gbtestapi0.2-0.1a10/src/gailbot/services/converter/converter.py | from typing import Dict, List, Union, Tuple
from .payload import (
load_transcribed_dir_payload,
load_audio_payload,
load_conversation_dir_payload,
PayLoadObject,
)
from gailbot.core.utils.logger import makelogger
from ..organizer.source import SourceObject
from gailbot.workspace.manager import WorkspaceManager
logger = makelogger("converter")
class Converter:
"""
Provides functionality that converts the sourceObject to payload and
keeps track of the converted payloads
"""
loaders = [
load_audio_payload,
load_transcribed_dir_payload,
load_conversation_dir_payload,
]
""" mapping payload name to payloadObject """
def __init__(self, ws_manager: WorkspaceManager) -> None:
self.ws_manager = ws_manager
self.payloads_dict: Dict[str, PayLoadObject] = dict()
def load_source(self, source: SourceObject) -> bool:
"""
Loads a given source object with the correct loader
Args:
source: SourceObject: source to load
Returns:
bool: true if successfully loaded, false if not
"""
for loader in self.loaders:
try:
payloads: List[PayLoadObject] = loader(source, self.ws_manager)
logger.info(payloads)
if isinstance(payloads, list):
self.payloads_dict[source.name] = payloads
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def __call__(
self, sources: List[SourceObject]
) -> Union[bool, Tuple[List[PayLoadObject], List[str]]]:
"""given a list of the source files, and convert them into a list of
payload objects
Args:
sources (List[SourceObject]): a list of source object
Returns:
Union[bool, List[PayLoadObject]]: a list of payload object that are
successfully converted by the converter
"""
logger.info("converter is called")
logger.info(sources)
self.payloads_dict = dict()
invalid = list()
try:
for source in sources:
logger.info(source)
if not self.load_source(source):
invalid.append(source.name)
logger.info(self.payloads_dict)
converters = sum(list(self.payloads_dict.values()), [])
return converters, invalid
except Exception as e:
logger.error(e, exc_info=e)
return False | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/paper-spinner/README.md | [](https://travis-ci.org/PolymerElements/paper-spinner)
##<paper-spinner>
Material design: [Progress & activity](https://www.google.com/design/spec/components/progress-activity.html)
Element providing a multiple color material design circular spinner.
<!---
```
<custom-element-demo>
<template>
<script src="../webcomponentsjs/webcomponents-lite.js"></script>
<link rel="import" href="paper-spinner.html">
<link rel="import" href="paper-spinner-lite.html">
<style is="custom-style">
paper-spinner, paper-spinner-lite {
margin: 8px 8px 8px 0;
}
paper-spinner-lite.orange {
--paper-spinner-color: var(--google-yellow-500);
}
paper-spinner-lite.green {
--paper-spinner-color: var(--google-green-500);
}
paper-spinner-lite.thin {
--paper-spinner-stroke-width: 1px;
}
paper-spinner-lite.thick {
--paper-spinner-stroke-width: 6px;
}
#container {
display: flex;
}
</style>
<div id="container">
<next-code-block></next-code-block>
</div>
</template>
</custom-element-demo>
```
-->
```html
<paper-spinner active>...</paper-spinner>
<paper-spinner-lite active class="orange"></paper-spinner-lite>
<paper-spinner-lite active class="green"></paper-spinner-lite>
<paper-spinner-lite active class="thin"></paper-spinner-lite>
<paper-spinner-lite active class="thick"></paper-spinner-lite>
```
| PypiClean |
/Lightbox-2.1.tar.bz2/Lightbox-2.1/lightbox/widgets.py | __all__ = [
'default_config',
'Lightbox',
'LightboxConfig',
'lightbox_js',
'lightbox_css',
]
import pkg_resources
from scriptaculous import prototype_js, scriptaculous_js
from turbogears import url
from turbogears.widgets import CSSLink, JSLink, Widget, WidgetDescription, \
register_static_directory, JSSource
static_dir = pkg_resources.resource_filename("lightbox", "static")
register_static_directory("lightbox", static_dir)
lightbox_js = JSLink("lightbox", "javascript/lightbox.js")
lightbox_css = CSSLink("lightbox", "css/lightbox.css", media="screen")
class LightboxConfig(JSSource):
"""Widget for inserting the Lightbox configuration JavaScript into the page.
See parameter description for available configuration options.
"""
template = """
<script type="text/javascript">
var LightboxOptions = {
fileLoadingImage: "${loading_img_url}",
fileBottomNavCloseImage:"${close_img_url}",
overlayOpacity: ${overlay_opacity},
animate: "${animate}",
resizeSpeed: ${resize_speed},
borderSize: ${border_size},
labelImage: "${label_image}",
labelOf: "${label_of}"
};
</script>
"""
params = ["overlay_opacity", "animate", "resize_speed", "border_size",
"label_image", "label_of"]
overlay_opacity = 0.8
animate = 'true'
resize_speed = 7
border_size = 10
label_image = "Image"
label_of = "of"
params_doc = {
'overlay_opacity': 'Controls transparency of shadow overlay '
'(0.0 - 1.0, default: 0.8)',
'animate': 'Toggles resizing animations'
'("true" or "false", default: "true")',
'resize_speed': 'Controls the speed of the image resizing animations '
'(1=slowest and 10=fastest, default: 7)',
'border_size': 'If you adjust the padding in the CSS, you will also '
'need to specify this setting (default: 10)',
'label_image': 'Term to use for "Image" part of "Image # of #" label. '
'Change it for non-english localization.',
'label_of': 'Term to use for "of" part of "Image # of #" label. '
'Change it for non-english localization.'
}
def __init__(self, **params):
super(LightboxConfig, self).__init__("dummy", **params)
def update_params(self, d):
super(LightboxConfig, self).update_params(d)
d["static"] = url("/tg_widgets/lightbox/")
d.setdefault('loading_img_url', d["static"] + 'images/loading.gif')
d.setdefault('close_img_url', d["static"] + 'images/closelabel.gif')
default_config = LightboxConfig()
class Lightbox(Widget):
"""Widget that creates a Lightbox photo viewer.
The value should be the URL of the main image to display. You also need to
pass in 'thumb_url', 'thumb_width' and 'thumb_height' to specify the
thumbnail image that will be displayed (see also the parameter description).
"""
template = """\
<a xmlns:py="http://purl.org/kid/ns#" href="${value}" rel="${rel}" py:attrs="dict(title=title)"><img src="${thumb_url}" width="${thumb_width}" height="${thumb_height}" border="0"/></a>
"""
params = ['group', 'rel', 'thumb_url', 'thumb_width', 'thumb_height',
'title']
thumb_url = ''
thumb_width = 80
thumb_height = 60
title = None
group = None
rel = 'lightbox'
params_doc = {
'thumb_url': 'URL of thumbnail image (required)',
'thumb_width': 'Width of thumbnail image (required)',
'thumb_height': 'Height of thumbnail image (required)',
'title': 'Caption to show below image (optional)',
'group': 'Name of group of related images. You can browse through all '
'Lightbox images in the same group in one popup (optional).'
}
def __init__(self, config=default_config, css=lightbox_css, **params):
if isinstance(css, Widget):
css = [css]
self.css = css
self.javascript = [prototype_js, scriptaculous_js, lightbox_js, config]
super(Lightbox, self).__init__(**params)
def update_params(self, d):
super(Lightbox, self).update_params(d)
if d['group']:
d['rel'] += '[%s]' % d['group']
class LightboxConfigDesc(WidgetDescription):
name = "Lightbox configuration"
for_widget = LightboxConfig()
template = "<div>Configuration for the Lightbox widget.</div>"
full_class_name = "lightbox.LightboxConfig"
class LightboxDesc(WidgetDescription):
name = "Lightbox demo"
for_widget = Lightbox()
show_separately = True
template = """
<div>
${for_widget.display(static + "images/image-1.jpg", thumb_url=static + "images/thumb-1.jpg", thumb_width=100, thumb_height=40, title="Just a sample image", group="mygroup")}
${for_widget.display(static + "images/image-3.jpg", thumb_url=static + "images/thumb-3.jpg", thumb_width=100, thumb_height=40, title="Another sample", group="mygroup")}
${for_widget.display(static + "images/image-4.jpg", thumb_url=static + "images/thumb-4.jpg", thumb_width=100, thumb_height=40, title="That's it", group="mygroup")}
</div>"""
def update_params(self, d):
super(LightboxDesc, self).update_params(d)
d["static"] = url("/tg_widgets/lightbox/") | PypiClean |
/HalWeb-0.6.0.tar.gz/HalWeb-0.6.0/src/halicea/baseProject/static_data/jscripts/cleditor/jquery.cleditor.js | /**
@preserve CLEditor WYSIWYG HTML Editor v1.3.0
http://premiumsoftware.net/cleditor
requires jQuery v1.4.2 or later
Copyright 2010, Chris Landowski, Premium Software, LLC
Dual licensed under the MIT or GPL Version 2 licenses.
*/
// ==ClosureCompiler==
// @compilation_level SIMPLE_OPTIMIZATIONS
// @output_file_name jquery.cleditor.min.js
// ==/ClosureCompiler==
(function($) {
//==============
// jQuery Plugin
//==============
$.cleditor = {
// Define the defaults used for all new cleditor instances
defaultOptions: {
width: 500, // width not including margins, borders or padding
height: 250, // height not including margins, borders or padding
controls: // controls to add to the toolbar
"bold italic underline strikethrough subscript superscript | font size " +
"style | color highlight removeformat | bullets numbering | outdent " +
"indent | alignleft center alignright justify | undo redo | " +
"rule image link unlink | cut copy paste pastetext | print source",
colors: // colors in the color popup
"FFF FCC FC9 FF9 FFC 9F9 9FF CFF CCF FCF " +
"CCC F66 F96 FF6 FF3 6F9 3FF 6FF 99F F9F " +
"BBB F00 F90 FC6 FF0 3F3 6CC 3CF 66C C6C " +
"999 C00 F60 FC3 FC0 3C0 0CC 36F 63F C3C " +
"666 900 C60 C93 990 090 399 33F 60C 939 " +
"333 600 930 963 660 060 366 009 339 636 " +
"000 300 630 633 330 030 033 006 309 303",
fonts: // font names in the font popup
"Arial,Arial Black,Comic Sans MS,Courier New,Narrow,Garamond," +
"Georgia,Impact,Sans Serif,Serif,Tahoma,Trebuchet MS,Verdana",
sizes: // sizes in the font size popup
"1,2,3,4,5,6,7",
styles: // styles in the style popup
[["Paragraph", "<p>"], ["Header 1", "<h1>"], ["Header 2", "<h2>"],
["Header 3", "<h3>"], ["Header 4","<h4>"], ["Header 5","<h5>"],
["Header 6","<h6>"]],
useCSS: false, // use CSS to style HTML when possible (not supported in ie)
docType: // Document type contained within the editor
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">',
docCSSFile: // CSS file used to style the document contained within the editor
"",
bodyStyle: // style to assign to document body contained within the editor
"margin:4px; font:10pt Arial,Verdana; cursor:text"
},
// Define all usable toolbar buttons - the init string property is
// expanded during initialization back into the buttons object and
// seperate object properties are created for each button.
// e.g. buttons.size.title = "Font Size"
buttons: {
// name,title,command,popupName (""=use name)
init:
"bold,,|" +
"italic,,|" +
"underline,,|" +
"strikethrough,,|" +
"subscript,,|" +
"superscript,,|" +
"font,,fontname,|" +
"size,Font Size,fontsize,|" +
"style,,formatblock,|" +
"color,Font Color,forecolor,|" +
"highlight,Text Highlight Color,hilitecolor,color|" +
"removeformat,Remove Formatting,|" +
"bullets,,insertunorderedlist|" +
"numbering,,insertorderedlist|" +
"outdent,,|" +
"indent,,|" +
"alignleft,Align Text Left,justifyleft|" +
"center,,justifycenter|" +
"alignright,Align Text Right,justifyright|" +
"justify,,justifyfull|" +
"undo,,|" +
"redo,,|" +
"rule,Insert Horizontal Rule,inserthorizontalrule|" +
"image,Insert Image,insertimage,url|" +
"link,Insert Hyperlink,createlink,url|" +
"unlink,Remove Hyperlink,|" +
"cut,,|" +
"copy,,|" +
"paste,,|" +
"pastetext,Paste as Text,inserthtml,|" +
"print,,|" +
"source,Show Source"
},
// imagesPath - returns the path to the images folder
imagesPath: function() { return imagesPath(); }
};
// cleditor - creates a new editor for each of the matched textareas
$.fn.cleditor = function(options) {
// Create a new jQuery object to hold the results
var $result = $([]);
// Loop through all matching textareas and create the editors
this.each(function(idx, elem) {
if (elem.tagName == "TEXTAREA") {
var data = $.data(elem, CLEDITOR);
if (!data) data = new cleditor(elem, options);
$result = $result.add(data);
}
});
// return the new jQuery object
return $result;
};
//==================
// Private Variables
//==================
var
// Misc constants
BACKGROUND_COLOR = "backgroundColor",
BUTTON = "button",
BUTTON_NAME = "buttonName",
CHANGE = "change",
CLEDITOR = "cleditor",
CLICK = "click",
DISABLED = "disabled",
DIV_TAG = "<div>",
TRANSPARENT = "transparent",
UNSELECTABLE = "unselectable",
// Class name constants
MAIN_CLASS = "cleditorMain", // main containing div
TOOLBAR_CLASS = "cleditorToolbar", // toolbar div inside main div
GROUP_CLASS = "cleditorGroup", // group divs inside the toolbar div
BUTTON_CLASS = "cleditorButton", // button divs inside group div
DISABLED_CLASS = "cleditorDisabled",// disabled button divs
DIVIDER_CLASS = "cleditorDivider", // divider divs inside group div
POPUP_CLASS = "cleditorPopup", // popup divs inside body
LIST_CLASS = "cleditorList", // list popup divs inside body
COLOR_CLASS = "cleditorColor", // color popup div inside body
PROMPT_CLASS = "cleditorPrompt", // prompt popup divs inside body
MSG_CLASS = "cleditorMsg", // message popup div inside body
// Test for ie
ie = $.browser.msie,
ie6 = /msie\s6/i.test(navigator.userAgent),
// Test for iPhone/iTouch/iPad
iOS = /iphone|ipad|ipod/i.test(navigator.userAgent),
// Popups are created once as needed and shared by all editor instances
popups = {},
// Used to prevent the document click event from being bound more than once
documentClickAssigned,
// Local copy of the buttons object
buttons = $.cleditor.buttons;
//===============
// Initialization
//===============
// Expand the buttons.init string back into the buttons object
// and create seperate object properties for each button.
// e.g. buttons.size.title = "Font Size"
$.each(buttons.init.split("|"), function(idx, button) {
var items = button.split(","), name = items[0];
buttons[name] = {
stripIndex: idx,
name: name,
title: items[1] === "" ? name.charAt(0).toUpperCase() + name.substr(1) : items[1],
command: items[2] === "" ? name : items[2],
popupName: items[3] === "" ? name : items[3]
};
});
delete buttons.init;
//============
// Constructor
//============
// cleditor - creates a new editor for the passed in textarea element
cleditor = function(area, options) {
var editor = this;
// Get the defaults and override with options
editor.options = options = $.extend({}, $.cleditor.defaultOptions, options);
// Hide the textarea and associate it with this editor
var $area = editor.$area = $(area)
.hide()
.data(CLEDITOR, editor)
.blur(function() {
// Update the iframe when the textarea loses focus
updateFrame(editor, true);
});
// Create the main container and append the textarea
var $main = editor.$main = $(DIV_TAG)
.addClass(MAIN_CLASS)
.width(options.width)
.height(options.height);
// Create the toolbar
var $toolbar = editor.$toolbar = $(DIV_TAG)
.addClass(TOOLBAR_CLASS)
.appendTo($main);
// Add the first group to the toolbar
var $group = $(DIV_TAG)
.addClass(GROUP_CLASS)
.appendTo($toolbar);
// Add the buttons to the toolbar
$.each(options.controls.split(" "), function(idx, buttonName) {
if (buttonName === "") return true;
// Divider
if (buttonName == "|") {
// Add a new divider to the group
var $div = $(DIV_TAG)
.addClass(DIVIDER_CLASS)
.appendTo($group);
// Create a new group
$group = $(DIV_TAG)
.addClass(GROUP_CLASS)
.appendTo($toolbar);
}
// Button
else {
// Get the button definition
var button = buttons[buttonName];
// Add a new button to the group
var $buttonDiv = $(DIV_TAG)
.data(BUTTON_NAME, button.name)
.addClass(BUTTON_CLASS)
.attr("title", button.title)
.bind(CLICK, $.proxy(buttonClick, editor))
.appendTo($group)
.hover(hoverEnter, hoverLeave);
// Prepare the button image
var map = {};
if (button.css) map = button.css;
else if (button.image) map.backgroundImage = imageUrl(button.image);
if (button.stripIndex) map.backgroundPosition = button.stripIndex * -24;
$buttonDiv.css(map);
// Add the unselectable attribute for ie
if (ie)
$buttonDiv.attr(UNSELECTABLE, "on");
// Create the popup
if (button.popupName)
createPopup(button.popupName, options, button.popupClass,
button.popupContent, button.popupHover);
}
});
// Add the main div to the DOM and append the textarea
$main.insertBefore($area)
.append($area);
// Bind the document click event handler
if (!documentClickAssigned) {
$(document).click(function(e) {
// Dismiss all non-prompt popups
var $target = $(e.target);
if (!$target.add($target.parents()).is("." + PROMPT_CLASS))
hidePopups();
});
documentClickAssigned = true;
}
// Bind the window resize event when the width or height is auto or %
if (/auto|%/.test("" + options.width + options.height))
$(window).resize(function() {refresh(editor);});
// Create the iframe and resize the controls
refresh(editor);
};
//===============
// Public Methods
//===============
var fn = cleditor.prototype,
// Expose the following private functions as methods on the cleditor object.
// The closure compiler will rename the private functions. However, the
// exposed method names on the cleditor object will remain fixed.
methods = [
["clear", clear],
["disable", disable],
["execCommand", execCommand],
["focus", focus],
["hidePopups", hidePopups],
["sourceMode", sourceMode, true],
["refresh", refresh],
["select", select],
["selectedHTML", selectedHTML, true],
["selectedText", selectedText, true],
["showMessage", showMessage],
["updateFrame", updateFrame],
["updateTextArea", updateTextArea]
];
$.each(methods, function(idx, method) {
fn[method[0]] = function() {
var editor = this, args = [editor];
// using each here would cast booleans into objects!
for(var x = 0; x < arguments.length; x++) {args.push(arguments[x]);}
var result = method[1].apply(editor, args);
if (method[2]) return result;
return editor;
};
});
// change - shortcut for .bind("change", handler) or .trigger("change")
fn.change = function(handler) {
var $this = $(this);
return handler ? $this.bind(CHANGE, handler) : $this.trigger(CHANGE);
};
//===============
// Event Handlers
//===============
// buttonClick - click event handler for toolbar buttons
function buttonClick(e) {
var editor = this,
buttonDiv = e.target,
buttonName = $.data(buttonDiv, BUTTON_NAME),
button = buttons[buttonName],
popupName = button.popupName,
popup = popups[popupName];
// Check if disabled
if (editor.disabled || $(buttonDiv).attr(DISABLED) == DISABLED)
return;
// Fire the buttonClick event
var data = {
editor: editor,
button: buttonDiv,
buttonName: buttonName,
popup: popup,
popupName: popupName,
command: button.command,
useCSS: editor.options.useCSS
};
if (button.buttonClick && button.buttonClick(e, data) === false)
return false;
// Toggle source
if (buttonName == "source") {
// Show the iframe
if (sourceMode(editor)) {
delete editor.range;
editor.$area.hide();
editor.$frame.show();
buttonDiv.title = button.title;
}
// Show the textarea
else {
editor.$frame.hide();
editor.$area.show();
buttonDiv.title = "Show Rich Text";
}
// Enable or disable the toolbar buttons
// IE requires the timeout
setTimeout(function() {refreshButtons(editor);}, 100);
}
// Check for rich text mode
else if (!sourceMode(editor)) {
// Handle popups
if (popupName) {
var $popup = $(popup);
// URL
if (popupName == "url") {
// Check for selection before showing the link url popup
if (buttonName == "link" && selectedText(editor) === "") {
showMessage(editor, "A selection is required when inserting a link.", buttonDiv);
return false;
}
// Wire up the submit button click event handler
$popup.children(":button")
.unbind(CLICK)
.bind(CLICK, function() {
// Insert the image or link if a url was entered
var $text = $popup.find(":text"),
url = $.trim($text.val());
if (url !== "")
execCommand(editor, data.command, url, null, data.button);
// Reset the text, hide the popup and set focus
$text.val("http://");
hidePopups();
focus(editor);
});
}
// Paste as Text
else if (popupName == "pastetext") {
// Wire up the submit button click event handler
$popup.children(":button")
.unbind(CLICK)
.bind(CLICK, function() {
// Insert the unformatted text replacing new lines with break tags
var $textarea = $popup.find("textarea"),
text = $textarea.val().replace(/\n/g, "<br />");
if (text !== "")
execCommand(editor, data.command, text, null, data.button);
// Reset the text, hide the popup and set focus
$textarea.val("");
hidePopups();
focus(editor);
});
}
// Show the popup if not already showing for this button
if (buttonDiv !== $.data(popup, BUTTON)) {
showPopup(editor, popup, buttonDiv);
return false; // stop propagination to document click
}
// propaginate to documnt click
return;
}
// Print
else if (buttonName == "print")
editor.$frame[0].contentWindow.print();
// All other buttons
else if (!execCommand(editor, data.command, data.value, data.useCSS, buttonDiv))
return false;
}
// Focus the editor
focus(editor);
}
// hoverEnter - mouseenter event handler for buttons and popup items
function hoverEnter(e) {
var $div = $(e.target).closest("div");
$div.css(BACKGROUND_COLOR, $div.data(BUTTON_NAME) ? "#FFF" : "#FFC");
}
// hoverLeave - mouseleave event handler for buttons and popup items
function hoverLeave(e) {
$(e.target).closest("div").css(BACKGROUND_COLOR, "transparent");
}
// popupClick - click event handler for popup items
function popupClick(e) {
var editor = this,
popup = e.data.popup,
target = e.target;
// Check for message and prompt popups
if (popup === popups.msg || $(popup).hasClass(PROMPT_CLASS))
return;
// Get the button info
var buttonDiv = $.data(popup, BUTTON),
buttonName = $.data(buttonDiv, BUTTON_NAME),
button = buttons[buttonName],
command = button.command,
value,
useCSS = editor.options.useCSS;
// Get the command value
if (buttonName == "font")
// Opera returns the fontfamily wrapped in quotes
value = target.style.fontFamily.replace(/"/g, "");
else if (buttonName == "size") {
if (target.tagName == "DIV")
target = target.children[0];
value = target.innerHTML;
}
else if (buttonName == "style")
value = "<" + target.tagName + ">";
else if (buttonName == "color")
value = hex(target.style.backgroundColor);
else if (buttonName == "highlight") {
value = hex(target.style.backgroundColor);
if (ie) command = 'backcolor';
else useCSS = true;
}
// Fire the popupClick event
var data = {
editor: editor,
button: buttonDiv,
buttonName: buttonName,
popup: popup,
popupName: button.popupName,
command: command,
value: value,
useCSS: useCSS
};
if (button.popupClick && button.popupClick(e, data) === false)
return;
// Execute the command
if (data.command && !execCommand(editor, data.command, data.value, data.useCSS, buttonDiv))
return false;
// Hide the popup and focus the editor
hidePopups();
focus(editor);
}
//==================
// Private Functions
//==================
// checksum - returns a checksum using the Adler-32 method
function checksum(text)
{
var a = 1, b = 0;
for (var index = 0; index < text.length; ++index) {
a = (a + text.charCodeAt(index)) % 65521;
b = (b + a) % 65521;
}
return (b << 16) | a;
}
// clear - clears the contents of the editor
function clear(editor) {
editor.$area.val("");
updateFrame(editor);
}
// createPopup - creates a popup and adds it to the body
function createPopup(popupName, options, popupTypeClass, popupContent, popupHover) {
// Check if popup already exists
if (popups[popupName])
return popups[popupName];
// Create the popup
var $popup = $(DIV_TAG)
.hide()
.addClass(POPUP_CLASS)
.appendTo("body");
// Add the content
// Custom popup
if (popupContent)
$popup.html(popupContent);
// Color
else if (popupName == "color") {
var colors = options.colors.split(" ");
if (colors.length < 10)
$popup.width("auto");
$.each(colors, function(idx, color) {
$(DIV_TAG).appendTo($popup)
.css(BACKGROUND_COLOR, "#" + color);
});
popupTypeClass = COLOR_CLASS;
}
// Font
else if (popupName == "font")
$.each(options.fonts.split(","), function(idx, font) {
$(DIV_TAG).appendTo($popup)
.css("fontFamily", font)
.html(font);
});
// Size
else if (popupName == "size")
$.each(options.sizes.split(","), function(idx, size) {
$(DIV_TAG).appendTo($popup)
.html("<font size=" + size + ">" + size + "</font>");
});
// Style
else if (popupName == "style")
$.each(options.styles, function(idx, style) {
$(DIV_TAG).appendTo($popup)
.html(style[1] + style[0] + style[1].replace("<", "</"));
});
// URL
else if (popupName == "url") {
$popup.html('Enter URL:<br><input type=text value="http://" size=35><br><input type=button value="Submit">');
popupTypeClass = PROMPT_CLASS;
}
// Paste as Text
else if (popupName == "pastetext") {
$popup.html('Paste your content here and click submit.<br /><textarea cols=40 rows=3></textarea><br /><input type=button value=Submit>');
popupTypeClass = PROMPT_CLASS;
}
// Add the popup type class name
if (!popupTypeClass && !popupContent)
popupTypeClass = LIST_CLASS;
$popup.addClass(popupTypeClass);
// Add the unselectable attribute to all items
if (ie) {
$popup.attr(UNSELECTABLE, "on")
.find("div,font,p,h1,h2,h3,h4,h5,h6")
.attr(UNSELECTABLE, "on");
}
// Add the hover effect to all items
if ($popup.hasClass(LIST_CLASS) || popupHover === true)
$popup.children().hover(hoverEnter, hoverLeave);
// Add the popup to the array and return it
popups[popupName] = $popup[0];
return $popup[0];
}
// disable - enables or disables the editor
function disable(editor, disabled) {
// Update the textarea and save the state
if (disabled) {
editor.$area.attr(DISABLED, DISABLED);
editor.disabled = true;
}
else {
editor.$area.removeAttr(DISABLED);
delete editor.disabled;
}
// Switch the iframe into design mode.
// ie6 does not support designMode.
// ie7 & ie8 do not properly support designMode="off".
try {
if (ie) editor.doc.body.contentEditable = !disabled;
else editor.doc.designMode = !disabled ? "on" : "off";
}
// Firefox 1.5 throws an exception that can be ignored
// when toggling designMode from off to on.
catch (err) {}
// Enable or disable the toolbar buttons
refreshButtons(editor);
}
// execCommand - executes a designMode command
function execCommand(editor, command, value, useCSS, button) {
// Restore the current ie selection
restoreRange(editor);
// Set the styling method
if (!ie) {
if (useCSS === undefined || useCSS === null)
useCSS = editor.options.useCSS;
editor.doc.execCommand("styleWithCSS", 0, useCSS.toString());
}
// Execute the command and check for error
var success = true, description;
if (ie && command.toLowerCase() == "inserthtml")
getRange(editor).pasteHTML(value);
else {
try { success = editor.doc.execCommand(command, 0, value || null); }
catch (err) { description = err.description; success = false; }
if (!success) {
if ("cutcopypaste".indexOf(command) > -1)
showMessage(editor, "For security reasons, your browser does not support the " +
command + " command. Try using the keyboard shortcut or context menu instead.",
button);
else
showMessage(editor,
(description ? description : "Error executing the " + command + " command."),
button);
}
}
// Enable the buttons
refreshButtons(editor);
return success;
}
// focus - sets focus to either the textarea or iframe
function focus(editor) {
setTimeout(function() {
if (sourceMode(editor)) editor.$area.focus();
else editor.$frame[0].contentWindow.focus();
refreshButtons(editor);
}, 0);
}
// getRange - gets the current text range object
function getRange(editor) {
if (ie) return getSelection(editor).createRange();
return getSelection(editor).getRangeAt(0);
}
// getSelection - gets the current text range object
function getSelection(editor) {
if (ie) return editor.doc.selection;
return editor.$frame[0].contentWindow.getSelection();
}
// Returns the hex value for the passed in string.
// hex("rgb(255, 0, 0)"); // #FF0000
// hex("#FF0000"); // #FF0000
// hex("#F00"); // #FF0000
function hex(s) {
var m = /rgba?\((\d+), (\d+), (\d+)/.exec(s),
c = s.split("");
if (m) {
s = ( m[1] << 16 | m[2] << 8 | m[3] ).toString(16);
while (s.length < 6)
s = "0" + s;
}
return "#" + (s.length == 6 ? s : c[1] + c[1] + c[2] + c[2] + c[3] + c[3]);
}
// hidePopups - hides all popups
function hidePopups() {
$.each(popups, function(idx, popup) {
$(popup)
.hide()
.unbind(CLICK)
.removeData(BUTTON);
});
}
// imagesPath - returns the path to the images folder
function imagesPath() {
var cssFile = "jquery.cleditor.css",
href = $("link[href$='" + cssFile +"']").attr("href");
return href.substr(0, href.length - cssFile.length) + "images/";
}
// imageUrl - Returns the css url string for a filemane
function imageUrl(filename) {
return "url(" + imagesPath() + filename + ")";
}
// refresh - creates the iframe and resizes the controls
function refresh(editor) {
var $main = editor.$main,
options = editor.options;
// Remove the old iframe
if (editor.$frame)
editor.$frame.remove();
// Create a new iframe
var $frame = editor.$frame = $('<iframe frameborder="0" src="javascript:true;">')
.hide()
.appendTo($main);
// Load the iframe document content
var contentWindow = $frame[0].contentWindow,
doc = editor.doc = contentWindow.document,
$doc = $(doc);
doc.open();
doc.write(
options.docType +
'<html>' +
((options.docCSSFile === '') ? '' : '<head><link rel="stylesheet" type="text/css" href="' + options.docCSSFile + '" /></head>') +
'<body style="' + options.bodyStyle + '"></body></html>'
);
doc.close();
// Work around for bug in IE which causes the editor to lose
// focus when clicking below the end of the document.
if (ie)
$doc.click(function() {focus(editor);});
// Load the content
updateFrame(editor);
// Bind the ie specific iframe event handlers
if (ie) {
// Save the current user selection. This code is needed since IE will
// reset the selection just after the beforedeactivate event and just
// before the beforeactivate event.
$doc.bind("beforedeactivate beforeactivate selectionchange keypress", function(e) {
// Flag the editor as inactive
if (e.type == "beforedeactivate")
editor.inactive = true;
// Get rid of the bogus selection and flag the editor as active
else if (e.type == "beforeactivate") {
if (!editor.inactive && editor.range && editor.range.length > 1)
editor.range.shift();
delete editor.inactive;
}
// Save the selection when the editor is active
else if (!editor.inactive) {
if (!editor.range)
editor.range = [];
editor.range.unshift(getRange(editor));
// We only need the last 2 selections
while (editor.range.length > 2)
editor.range.pop();
}
});
// Restore the text range when the iframe gains focus
$frame.focus(function() {
restoreRange(editor);
});
}
// Update the textarea when the iframe loses focus
($.browser.mozilla ? $doc : $(contentWindow)).blur(function() {
updateTextArea(editor, true);
});
// Enable the toolbar buttons as the user types or clicks
$doc.click(hidePopups)
.bind("keyup mouseup", function() {
refreshButtons(editor);
});
// Show the textarea for iPhone/iTouch/iPad or
// the iframe when design mode is supported.
if (iOS) editor.$area.show();
else $frame.show();
// Wait for the layout to finish - shortcut for $(document).ready()
$(function() {
var $toolbar = editor.$toolbar,
$group = $toolbar.children("div:last"),
wid = $main.width();
// Resize the toolbar
var hgt = $group.offset().top + $group.outerHeight() - $toolbar.offset().top + 1;
$toolbar.height(hgt);
// Resize the iframe
hgt = (/%/.test("" + options.height) ? $main.height() : parseInt(options.height)) - hgt;
$frame.width(wid).height(hgt);
// Resize the textarea. IE6 textareas have a 1px top
// & bottom margin that cannot be removed using css.
editor.$area.width(wid).height(ie6 ? hgt - 2 : hgt);
// Switch the iframe into design mode if enabled
disable(editor, editor.disabled);
// Enable or disable the toolbar buttons
refreshButtons(editor);
});
}
// refreshButtons - enables or disables buttons based on availability
function refreshButtons(editor) {
// Webkit requires focus before queryCommandEnabled will return anything but false
if (!iOS && $.browser.webkit && !editor.focused) {
editor.$frame[0].contentWindow.focus();
window.focus();
editor.focused = true;
}
// Get the object used for checking queryCommandEnabled
var queryObj = editor.doc;
if (ie) queryObj = getRange(editor);
// Loop through each button
var inSourceMode = sourceMode(editor);
$.each(editor.$toolbar.find("." + BUTTON_CLASS), function(idx, elem) {
var $elem = $(elem),
button = $.cleditor.buttons[$.data(elem, BUTTON_NAME)],
command = button.command,
enabled = true;
// Determine the state
if (editor.disabled)
enabled = false;
else if (button.getEnabled) {
var data = {
editor: editor,
button: elem,
buttonName: button.name,
popup: popups[button.popupName],
popupName: button.popupName,
command: button.command,
useCSS: editor.options.useCSS
};
enabled = button.getEnabled(data);
if (enabled === undefined)
enabled = true;
}
else if (((inSourceMode || iOS) && button.name != "source") ||
(ie && (command == "undo" || command == "redo")))
enabled = false;
else if (command && command != "print") {
if (ie && command == "hilitecolor")
command = "backcolor";
// IE does not support inserthtml, so it's always enabled
if (!ie || command != "inserthtml") {
try {enabled = queryObj.queryCommandEnabled(command);}
catch (err) {enabled = false;}
}
}
// Enable or disable the button
if (enabled) {
$elem.removeClass(DISABLED_CLASS);
$elem.removeAttr(DISABLED);
}
else {
$elem.addClass(DISABLED_CLASS);
$elem.attr(DISABLED, DISABLED);
}
});
}
// restoreRange - restores the current ie selection
function restoreRange(editor) {
if (ie && editor.range)
editor.range[0].select();
}
// select - selects all the text in either the textarea or iframe
function select(editor) {
setTimeout(function() {
if (sourceMode(editor)) editor.$area.select();
else execCommand(editor, "selectall");
}, 0);
}
// selectedHTML - returns the current HTML selection or and empty string
function selectedHTML(editor) {
restoreRange(editor);
var range = getRange(editor);
if (ie)
return range.htmlText;
var layer = $("<layer>")[0];
layer.appendChild(range.cloneContents());
var html = layer.innerHTML;
layer = null;
return html;
}
// selectedText - returns the current text selection or and empty string
function selectedText(editor) {
restoreRange(editor);
if (ie) return getRange(editor).text;
return getSelection(editor).toString();
}
// showMessage - alert replacement
function showMessage(editor, message, button) {
var popup = createPopup("msg", editor.options, MSG_CLASS);
popup.innerHTML = message;
showPopup(editor, popup, button);
}
// showPopup - shows a popup
function showPopup(editor, popup, button) {
var offset, left, top, $popup = $(popup);
// Determine the popup location
if (button) {
var $button = $(button);
offset = $button.offset();
left = --offset.left;
top = offset.top + $button.height();
}
else {
var $toolbar = editor.$toolbar;
offset = $toolbar.offset();
left = Math.floor(($toolbar.width() - $popup.width()) / 2) + offset.left;
top = offset.top + $toolbar.height() - 2;
}
// Position and show the popup
hidePopups();
$popup.css({left: left, top: top})
.show();
// Assign the popup button and click event handler
if (button) {
$.data(popup, BUTTON, button);
$popup.bind(CLICK, {popup: popup}, $.proxy(popupClick, editor));
}
// Focus the first input element if any
setTimeout(function() {
$popup.find(":text,textarea").eq(0).focus().select();
}, 100);
}
// sourceMode - returns true if the textarea is showing
function sourceMode(editor) {
return editor.$area.is(":visible");
}
// updateFrame - updates the iframe with the textarea contents
function updateFrame(editor, checkForChange) {
var code = editor.$area.val(),
options = editor.options,
updateFrameCallback = options.updateFrame,
$body = $(editor.doc.body);
// Check for textarea change to avoid unnecessary firing
// of potentially heavy updateFrame callbacks.
if (updateFrameCallback) {
var sum = checksum(code);
if (checkForChange && editor.areaChecksum == sum)
return;
editor.areaChecksum = sum;
}
// Convert the textarea source code into iframe html
var html = updateFrameCallback ? updateFrameCallback(code) : code;
// Prevent script injection attacks by html encoding script tags
html = html.replace(/<(?=\/?script)/ig, "<");
// Update the iframe checksum
if (options.updateTextArea)
editor.frameChecksum = checksum(html);
// Update the iframe and trigger the change event
if (html != $body.html()) {
$body.html(html);
$(editor).triggerHandler(CHANGE);
}
}
// updateTextArea - updates the textarea with the iframe contents
function updateTextArea(editor, checkForChange) {
var html = $(editor.doc.body).html(),
options = editor.options,
updateTextAreaCallback = options.updateTextArea,
$area = editor.$area;
// Check for iframe change to avoid unnecessary firing
// of potentially heavy updateTextArea callbacks.
if (updateTextAreaCallback) {
var sum = checksum(html);
if (checkForChange && editor.frameChecksum == sum)
return;
editor.frameChecksum = sum;
}
// Convert the iframe html into textarea source code
var code = updateTextAreaCallback ? updateTextAreaCallback(html) : html;
// Update the textarea checksum
if (options.updateFrame)
editor.areaChecksum = checksum(code);
// Update the textarea and trigger the change event
if (code != $area.val()) {
$area.val(code);
$(editor).triggerHandler(CHANGE);
}
}
})(jQuery); | PypiClean |
/CNN4IE-0.1.9-py3-none-any.whl/cnn4ie/attention_augmented_cnn/predict.py | import torch
import os
from configparser import ConfigParser
import pickle
from cnn4ie.attention_augmented_cnn.train import Train
from cnn4ie.util.crf_util import get_tags, format_result
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Predict():
def __init__(self):
self.model = None
self.source_vocab = None
self.target_vocab = None
self.max_length = 0
self.tags = list()
self.tags_map = dict()
def _predict_ner(self, model, source_vocab, target_vocab, sentence, max_length):
'''
predict ner without crf
:param model:
:param source_vocab:
:param target_vocab:
:param sentence:
:param max_length
:return:
'''
model.eval()
tokenized = list(sentence) # tokenize the sentence
if len(tokenized) > max_length:
tokenized = tokenized[:max_length]
#tokenized = ['<sos>'] + tokenized + ['<eos>']
indexed = [source_vocab[t] for t in tokenized] # convert to integer sequence
#print("tokenized: {}".format(tokenized))
#print("indexed: {}".format(indexed))
src_tensor = torch.LongTensor(indexed) # convert to tensor
src_tensor = src_tensor.unsqueeze(0).to(DEVICE) # reshape in form of batch,no. of words
with torch.no_grad():
sentence_output = model(src_tensor) # [batch_size, src_len, output_dim]
pred_token = sentence_output.argmax(2)[:, -1].item()
pred_token = [target_vocab.itos[i] for i in pred_token]
return pred_token
def _load_vocab(self, vocab_path):
'''
load vocab
:param vocab_path:
:return:
'''
if os.path.exists(vocab_path):
# load vocab
with open(vocab_path, 'rb') as f_words:
vocab = pickle.load(f_words)
return vocab
else:
raise FileNotFoundError("File not found!")
def _predict_crf_ner(self, model, source_vocab, sentence, max_length, tags, tags_map):
'''
predict ner with crf
:param model:
:param source_vocab:
:param sentence:
:param max_length
:param tags
:param tags_map
:return:
'''
model.eval()
tokenized =list(sentence) # tokenize the sentence
if len(tokenized) > max_length:
tokenized = tokenized[:max_length]
# tokenized = ['<sos>'] + tokenized + ['<eos>']
indexed = [source_vocab[t] for t in tokenized] # convert to integer sequence
#print("tokenized: {}".format(tokenized))
#print("token index: {}".format(indexed))
src_tensor = torch.LongTensor(indexed) # convert to tensor
src_tensor = src_tensor.unsqueeze(0).to(DEVICE) # reshape in form of batch,no. of words
with torch.no_grad():
predictions = model(src_tensor)
print('predictions:{}'.format(predictions))
entities = []
for tag in tags:
ner_tags = get_tags(predictions[0], tag, tags_map)
entities += format_result(ner_tags, sentence, tag)
return entities
def load_model_vocab(self, config_path):
'''
load model and vocab
:param config_path:
:return:
'''
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
#parent_directory = os.path.dirname(os.path.abspath("."))
#print('os.getcwd:{}'.format(os.path.dirname(os.path.abspath("."))))
# load config file
config = ConfigParser()
#config.read(os.path.join(os.getcwd(), 'config.cfg'))
#config.read(os.path.split(config_path)[0], 'config.cfg')
config.read(config_path)
section = config.sections()[0]
# get path, vocabs of source, target
source_vocab_path = config.get(section, "source_vocab_path")
# source_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', source_vocab_path)
print('source_vocab_path:{}'.format(source_vocab_path))
target_vocab_path = config.get(section, "target_vocab_path")
# target_vocab_path = os.path.join(os.path.dirname(os.path.abspath('..')), 'data', target_vocab_path)
print('target_vocab_path:{}'.format(target_vocab_path))
# load source vocab
self.source_vocab = self._load_vocab(source_vocab_path)
print("source_vocab size:{}".format(len(self.source_vocab)))
# load target vocab
self.target_vocab = self._load_vocab(target_vocab_path)
print("target_vocab size:{}".format(len(self.target_vocab)))
tags = set()
kv = self.target_vocab.stoi
for k, v in kv.items():
self.tags_map[k] = v
index = k.find('_')
if index != -1:
k = k[index + 1:]
tags.add(k)
self.tags = list(tags)
print('tags:{}'.format(self.tags))
print('tags_map:{}'.format(self.tags_map))
# model save/load path
model_path = config.get(section, "model_path")
# model_path = os.path.join(os.path.dirname(os.path.abspath('..')), "model", model_path)
# model param config
self.max_length = config.getint(section, "max_length")
input_dim = len(self.source_vocab)
output_dim = len(self.target_vocab)
emb_dim = config.getint(section, "emb_dim")
hid_dim = config.getint(section, "hid_dim")
cnn_layers = config.getint(section, "cnn_layers")
encoder_layers = config.getint(section, "encoder_layers")
kernel_size = config.getint(section, "kernel_size")
dropout = config.getfloat(section, "dropout")
loss_name = config.get(section, 'loss')
PAD_IDX = self.source_vocab['<pad>']
# define loss
if loss_name == 'crf':
use_crf = True
else:
use_crf = False
# load model
self.model = Train.load_model(input_dim,
output_dim,
emb_dim,
hid_dim,
cnn_layers,
encoder_layers,
kernel_size,
dropout,
PAD_IDX,
self.max_length,
model_path,
use_crf=use_crf)
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
def predict(self, sentence):
'''
predict
:param sentence:
:return:
'''
if len(sentence.strip()) == 0 or sentence == None:
raise ValueError('Invalid parameter:' + sentence)
if self.model.use_crf:
predictions = self._predict_crf_ner(self.model, self.source_vocab, sentence, self.max_length, self.tags, self.tags_map)
else:
predictions = self._predict_ner(self.model, self.source_vocab, self.target_vocab, sentence, self.max_length)
return predictions
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
predict = Predict()
predict.load_model_vocab(config_path)
result = predict.predict('本报北京2月28日讯记者苏宁报道:八届全国人大常委会第三十次会议今天下午在京闭幕。')
print('predict result:{}'.format(result))
# predict result:[{'start': 2, 'stop': 4, 'word': '北京', 'type': 'LOC'}, {'start': 12, 'stop': 14, 'word': '苏宁', 'type': 'LOC'}, {'start': 32, 'stop': 36, 'word': '今天下午', 'type': 'T'}] | PypiClean |
/FinDates-0.2.zip/FinDates-0.2/findates/holidays.py | import collections
import datetime
import dateutils
class OrderMapper:
def __init__(self, items=None):
self._map = dict()
self._counter = 0
if items:
self.add(items)
def add(self, items):
for k in items:
self._map[k] = self._counter
self._counter += 1
def __getitem__(self, item):
return self._map[item]
def __contains__(self, item):
return item in self._map
# Names and abbreviated names of days of the week
# in English (not in locale, as definition of the calendars
# are written in English
day_names = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ]
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
abbreviated_day_names = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun' ]
month_names = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
month_name_order = OrderMapper(map(str.lower, month_names))
weekday_name_order = OrderMapper(map(str.lower, day_names))
def easter(year):
"""
Calculate the date of Easter in the given year
Adapted from Wikipedia: http://en.wikipedia.org/wiki/Computus
"""
a = year % 19
b = year // 100
c = year % 100
d = b // 4
e = b % 4
f = (b+8) // 25
g = (b - f + 1) // 3
h = (19*a+b-d-g+15) % 30
i = c // 4
k = c % 4
L = (32+2*e+2*i-h-k) % 7
m = (a + 11*h+22*L) // 451
month = (h+L-7*m+114) // 31
day = ((h+L-7*m+114) % 31)+1
return datetime.datetime(year, month, day)
def holy_thursday(year):
"""
For a given year determine a date of a Holy Thursday (Thursday before Easter)
"""
return easter(year) - datetime.timedelta(days=3)
def good_friday(year):
"""
For a given year determine a date of a Good Friday (Friday before Easter)
"""
return easter(year) - datetime.timedelta(days=2)
def easter_monday(year):
"""
For a given year determine a date of an Easter Monday (Monday after Easter)
"""
return easter(year) + datetime.timedelta(days=1)
def ascension_thursday(year):
"""
For a given year determine a date of an Ascension Thursday (Thursday 40 days after Easter)
"""
return easter(year) + datetime.timedelta(days=39)
def pentecost(year):
"""
For a given year determine a date of Pentecost (Sunday 7 weeks from Easter)
"""
return easter(year) + datetime.timedelta(days=49)
def whit_monday(year):
"""
For a given year determine a date of a Whit Monday (50 days from Easter)
"""
return easter(year) + datetime.timedelta(days=50)
def trinity_sunday(year):
"""
For a given year determine a date of a Trinity (Sunday 8 weeks from Easter)
"""
return easter(year) + datetime.timedelta(days=56)
def corpus_christi_thursday(year):
"""
For a given year determine a date of a Trinity (Sunday 8 weeks from Easter)
"""
return easter(year) + datetime.timedelta(days=60)
def weekday_on_or_before(dt, weekday):
""" Find weekday happening on or before a given date
This method is used to determine the date of some holidays, e.g.
Victoria day in Canada is celebrated on the Monday on or
preceding 24th of May
Parameters
----------
dt : datetime.date or datetime.datetime
weekday: weekday number (0 - Monday, 6 - Sunday)
Returns
-------
datetime.date or datetime.datetime with a date of a holiday
"""
end_wd = dt.weekday()
delta = end_wd - weekday
if delta < 0:
delta += 7
return dt - datetime.timedelta(days=delta)
def weekday_on_or_after(dt, weekday):
""" Find weekday happening on or after a given date
This method is used to determine the date of some holidays, e.g.
Victoria Day in Canada is celebrated on Monday on or before 24th of May
Parameters
----------
dt : datetime.date or datetime.datetime
weekday: weekday number (0 - Monday, 6 - Sunday)
Returns
-------
datetime.date or datetime.datetime with a date of a holiday
"""
start_wd = dt.weekday()
delta = weekday - start_wd
if delta < 0:
delta += 7
return dt + datetime.timedelta(days=delta)
def midsummer_eve(year):
""" Celebrated in Finland and Sweden: Friday between June 18-24 inclusive
"""
dt_start = datetime.datetime(year, 6, 18)
return weekday_on_or_after(dt_start, FRIDAY)
def victoria_day(year):
""" Victoria day in Canada - Monday on or preceding 24th of May
"""
dt_end = datetime.datetime(year, 5, 24)
return weekday_on_or_before(dt_end, MONDAY)
idiosyncratic_holidays = dict({
'holy thursday': holy_thursday,
'good friday': good_friday,
'easter monday': easter_monday,
'ascension thursday': ascension_thursday,
'pentecost': pentecost,
'whit monday': whit_monday,
'trinity sunday': trinity_sunday,
'corpus christi thursday': corpus_christi_thursday,
'midsummer eve': midsummer_eve,
'victoria day': victoria_day
})
class Calendar:
def __init__(self):
self._weekdays = [True] * dateutils.DAYS_IN_WEEK
self._dated_holidays = collections.defaultdict(dict)
self._numbered_weekday_holidays = collections.defaultdict(set)
self._holiday_cache = collections.defaultdict(set)
self._idiosyncratic_years_cached = dict()
self._idiosyncratic = dict()
def add_holiday(self, name, date_description, **kwargs):
""" Add holiday to the calendar
Parameters
----------
name: name of the holiday (use 'weekend' for weekend days as it has special
meaning for holiday moving rules)
date_description: date description in one of the following forms:
* A name of the day of the week. Most probably you must specify
'name' parameter as 'weekend' in this case
* a specific date in "month day" string (e.g. "July 1st"
* specific weekday in the month, e.g. "2nd Thursday in June" or
"last Monday in May"
* a name of holiday for which add_holiday supports a special calculation,
e.g. 'Pentecost' or 'Victoria Day'
Keyword parameters:
move: can be 'next' or 'closest'. If holiday falls on a weekend (days of week
specified with 'weekend' in the previous 'add_holiday()' call, then holiday is
moved to the next available day that was not designated as weekend or holiday.
'closest' first try to move holiday to both the available previous day and available
following day and picks the one closest to the actual holiday date. In that case, if you
have Saturdays and Sundays as weekend days, holidays happening on Saturday will be moved
to Friday and those happening on Sunday will be moved to Monday.
"""
if 'move' in kwargs:
move = kwargs['move']
else:
move = None
# go through days of the week
if date_description.lower() in idiosyncratic_holidays.keys():
self._idiosyncratic[date_description.lower()] = (name, move)
for day_name_idx in range(len(day_names)):
day_name = day_names[day_name_idx]
if date_description.lower()==day_name.lower():
self._weekdays[day_name_idx] = False
desc_parts = date_description.split(' ')
if len(desc_parts)==2:
month_name = desc_parts[0].lower()
if month_name in month_name_order:
daystr= desc_parts[1].lower()
if daystr.endswith('st') or daystr.endswith('nd') or daystr.endswith('rd') or daystr.endswith('th'):
daystr = daystr[:-2]
day_num = int(daystr)
month_num = month_name_order[month_name]
self._dated_holidays[month_num][day_num] = (name, move)
if len(desc_parts)==4:
# nth weekday in month
month_num = month_name_order[desc_parts[3].lower()]
weekday_num = weekday_name_order[desc_parts[1].lower()]
order_str = desc_parts[0]
if order_str.lower() == 'last':
order = -1
else:
if order_str.endswith('st') or order_str.endswith('nd') or order_str.endswith('rd') or order_str.endswith('th'):
order_str = order_str[:-2]
order = int(order_str)
self._numbered_weekday_holidays[month_num].add( (order, weekday_num, name) )
def _is_idiosyncratic(self, dt):
""" check if date is one of idiosyncratic holidays
"""
Y = dt.year
if not Y in self._idiosyncratic_years_cached:
# compute idiosyncratic holidays for year Y
id_table = dict()
for id in self._idiosyncratic.keys():
name, move = self._idiosyncratic[id]
id_table[idiosyncratic_holidays[id](Y)] = (name, move)
self._idiosyncratic_years_cached[Y] = id_table
if dt in self._idiosyncratic_years_cached[Y]:
name, move = self._idiosyncratic_years_cached[Y][dt]
return True, name, move
else:
return False, None, None
def is_holiday(self, dt):
""" Check if specific date is holiday
"""
dt = dateutils.asdatetime(dt)
year = dt.year
if not year in self._holiday_cache:
# create year cache
self._holiday_cache[year] = set()
current = datetime.datetime(year, 1, 1)
end_year = datetime.datetime(year, 12, 31)
while current <= end_year:
b_holiday, name, move_day = self._verify_holiday(current)
if b_holiday:
self._holiday_cache[year].add(current)
if move_day is not None:
self._holiday_cache[year].add(move_day)
current = current + datetime.timedelta(days = 1)
return dt in self._holiday_cache[year]
def _move_holiday(self, dt, move):
next_day = dt + datetime.timedelta(days = 1)
while not self._weekdays[next_day.weekday()] or self._verify_holiday(next_day)[0]:
next_day += datetime.timedelta(days = 1)
if move == 'closest':
prev_day = dt - datetime.timedelta(days = 1)
while not self._weekdays[prev_day.weekday()] or self._verify_holiday(prev_day)[0]:
prev_day -= datetime.timedelta(days = 1)
delta_prev = dt - prev_day
delta_next = next_day - dt
if delta_prev < delta_next:
result = prev_day
else:
result = next_day
else:
# add next day to the list of moves
result = next_day
return result
def _verify_holiday(self, dt):
move_day = None
weekday = dt.weekday()
is_weekend = not self._weekdays[weekday]
idiosyncratic, name, move = self._is_idiosyncratic(dt)
# No move rules for idiosyncratic holidays?
if idiosyncratic:
return True, name, None
if dt.month in self._dated_holidays:
holiday_day_descriptions = self._dated_holidays[dt.month]
if dt.day in holiday_day_descriptions:
name, move = holiday_day_descriptions[dt.day]
if move is not None and is_weekend:
move_day = self._move_holiday(dt, move)
return True, name, move_day
if len(self._numbered_weekday_holidays[dt.month]) > 0:
for tup in self._numbered_weekday_holidays[dt.month]:
order, weekday, name = tup
# n-th day of the month holidays are usually not moved
# as they always happen on a particular day of the week
if dt.weekday() == weekday:
if order > 0 and (((dt.day-1)//dateutils.DAYS_IN_WEEK+1) == order):
return True, name, None
if order == -1:
# last weekday of the month
eom_day = dateutils.eom(dt.year, dt.month)
if dt == weekday_on_or_before(eom_day, weekday):
return True, name, None
# if everything else in other categories does not work - check if it is weekend
if is_weekend:
return True, 'weekend', None
return False, '', move_day
def get_calendar(calendar_code):
cl = Calendar()
calendar_code = calendar_code.lower()
if calendar_code == 'us' or calendar_code == 'united states':
cl.add_holiday("weekend", "Saturday")
cl.add_holiday("weekend", "Sunday")
cl.add_holiday("New Year's Day", "January 1st", move='closest')
cl.add_holiday("Martin Luther King's birthday", "3rd Monday in January")
cl.add_holiday("Presidents' day", "3rd Monday in February")
cl.add_holiday("Independence Day", "July 4th", move='closest')
cl.add_holiday("Labor Day", "1st Monday in September")
cl.add_holiday("Columbus Day", "2nd Monday in October")
cl.add_holiday("Veterans Day", "November 11th", move='closest')
cl.add_holiday("Thanksgiving", "4th Thursday in November")
cl.add_holiday("Christmas", "December 25th", move='closest')
elif calendar_code == "ca" or calendar_code == "canada":
cl.add_holiday("weekend", "Saturday")
cl.add_holiday("weekend", "Sunday")
cl.add_holiday("New Year's Day", "January 1st", move='next')
cl.add_holiday("Good Friday", "Good Friday")
cl.add_holiday("Easter Monday", "Easter Monday")
cl.add_holiday("Victoria Day", "Victoria Day")
cl.add_holiday("Canada Day", "July 1st", move='next')
cl.add_holiday("Civic Holiday", "1st Monday in August")
cl.add_holiday("Labor Day", "1st Monday in September")
cl.add_holiday("Thanksgiving", "2nd Monday in October")
cl.add_holiday("Remembrance Day", "November 11th")
cl.add_holiday("Christmas", "December 25th", move='next')
cl.add_holiday("Boxing Day", "December 26th", move='next')
elif calendar_code == 'de':
cl.add_holiday("weekend", "Saturday")
cl.add_holiday("weekend", "Sunday")
cl.add_holiday("New Year's Day", "January 1st", move='next')
cl.add_holiday("Good Friday", "Good Friday")
cl.add_holiday("Easter Monday", "Easter Monday")
cl.add_holiday("Ascension Thursday", "Ascension Thursday")
cl.add_holiday("Whit Monday", "Whit Monday")
cl.add_holiday("Corpus Christi Thursday", "Corpus Christi Thursday")
cl.add_holiday("Labour Day", "May 1st")
cl.add_holiday("National Day", "October 3rd")
cl.add_holiday("Christmas Eve", "December 24th")
cl.add_holiday("Christmas", "December 25th")
cl.add_holiday("Boxing Day", "December 26th")
cl.add_holiday("New Year's Eve", "December 31st")
elif calendar_code == 'de.frankfurt' or calendar_code == 'de.xetra' or calendar_code == 'de.eurex':
cl.add_holiday("weekend", "Saturday")
cl.add_holiday("weekend", "Sunday")
cl.add_holiday("New Year's Day", "January 1st", move='next')
cl.add_holiday("Good Friday", "Good Friday")
cl.add_holiday("Easter Monday", "Easter Monday")
cl.add_holiday("Labour Day", "May 1st")
cl.add_holiday("Christmas Eve", "December 24th")
cl.add_holiday("Christmas", "December 25th")
cl.add_holiday("Boxing Day", "December 26th")
cl.add_holiday("New Year's Eve", "December 31st")
elif calendar_code == 'uk':
cl.add_holiday("weekend", "Saturday")
cl.add_holiday("weekend", "Sunday")
cl.add_holiday("New Year's Day", "January 1st", move='next')
cl.add_holiday("Good Friday", "Good Friday")
cl.add_holiday("Easter Monday", "Easter Monday")
cl.add_holiday("Early May Bank Holiday", "1st Monday in May")
cl.add_holiday("Spring Bank Holiday", "last Monday in May")
cl.add_holiday("Summer Bank Holiday", "last Monday in August")
cl.add_holiday("Christmas", "December 25th", move='next')
cl.add_holiday("Boxing Day", "December 26th", move='next')
else:
raise ValueError('unknown calendar code \'%s\'' % calendar_code)
return cl | PypiClean |
/ChocoPY-0.1.6-py3-none-any.whl/chocopy/get_information.py | from flask import request
def get_message() -> str:
"""
The get_message function is to receive message
from request
user_message = get_message()
:return: The Message that is parsed request
"""
text = request.json['userRequest']['utterance']
return text
def get_block() -> str:
"""
The get_intent function is to receive block name
from request
block_name = get_block()
:return: Block name
"""
block = request.json['userRequest']['block']['name']
return block
def get_bot_name() -> str:
"""
The get_bot_name function is to receive bot name
from request
:return: Bot name used by the user
"""
bot_name = request.json['bot']['name']
return bot_name
def get_action_clientExtra() -> str or None:
"""
The get_action_clientExtra function is to receive clientExtra
:return: Client Extra
"""
action_clientExtra = request.json['action']['clientExtra']
return action_clientExtra
def get_intent_block_name() -> str:
"""
The get_intent_block_name is to receive intent block name
:return: Intent block name
"""
intent_block_name = request.json['intent']['name']
return intent_block_name
def get_timezone() -> str:
"""
The get_timezone function is receive requests timezone
:return: Timezone , Example : Asia/Seoul
"""
timezone = request.json['userRequest']['timezone']
return timezone
def get_user_id() -> str:
"""
This function is receive User ID
:return: User ID
"""
user_id = request.json['userRequest']['type']
return user_id
def get_params() -> dict:
"""
This function is receive parameters
parameter mean button that is run command or anything
:return: Parameters
"""
params = request.json['action']['params']
return params
def get_detailParams() -> dict:
"""
This function is receive detail parameters
:return: Parameters
"""
params = request.json['action']['detailParams']
return params | PypiClean |
/Isomyr-0.1.tar.gz/Isomyr-0.1/examples/OpenGround/openground.py | import os
from pygame import (
K_DOWN, K_LEFT, K_RETURN, K_RIGHT, K_SPACE, K_UP, K_l, K_x, K_z)
from isomyr.config import Keys
from isomyr.engine import Engine
from isomyr.objects.portal import Portal
from isomyr.skin import Skin, DirectedAnimatedSkin
from isomyr.util import ImageLoader
from isomyr.thing import PhysicalThing
from isomyr.universe import worldFactory
dirname = os.path.dirname(__file__)
# Set the custom keys for the game
custom_keys = Keys(
left=K_LEFT,
right=K_RIGHT,
up=K_UP,
down=K_DOWN,
jump=K_SPACE,
pick_up=K_z,
drop=K_x,
examine=K_l,
using=K_RETURN)
# An image loader that lets us run the tutorial anywhere the isomyr library
# can be imported (i.e., you don't have to be in the same directory as the
# tutorial to run it).
image_loader = ImageLoader(dirname)
# Tile setup.
sliceSizeX, sliceSizeY = (348, 176)
originX, originY = (1084, 302)
tiles = [
# column 1
(originX - 5 * (sliceSizeX / 2), originY - sliceSizeY / 2),
(originX - 5 * (sliceSizeX / 2), originY + sliceSizeY / 2),
# column 2
(originX - 2 * sliceSizeX, originY - sliceSizeY),
(originX - 2 * sliceSizeX, originY),
# column 3
(originX - 3 * (sliceSizeX / 2), originY - sliceSizeY / 2),
(originX - 3 * (sliceSizeX / 2), originY + sliceSizeY / 2),
# column 4
(originX - sliceSizeX, originY - sliceSizeY),
(originX - sliceSizeX, originY),
# column 5
(originX - sliceSizeX / 2, originY - sliceSizeY / 2),
(originX - sliceSizeX / 2, originY + sliceSizeY / 2),
# column 6
(originX, originY - sliceSizeY),
(originX, originY),
# column 7
(originX + sliceSizeX / 2, originY - sliceSizeY / 2),
(originX + sliceSizeX / 2, originY + sliceSizeY / 2),
]
# Map the isometric tiles to each other.
map = {
tiles[0]: {
"scene": None,
"N": None, "E": tiles[2], "S": tiles[3], "W": None},
tiles[1]: {
"scene": None,
"N": None, "E": tiles[3], "S": None, "W": None},
tiles[2]: {
"scene": None,
"N": None, "E": None, "S": tiles[4], "W": tiles[0]},
tiles[3]: {
"scene": None,
"N": tiles[0], "E": tiles[4], "S": tiles[5], "W": tiles[1]},
tiles[4]: {
"scene": None,
"N": tiles[2], "E": tiles[6], "S": tiles[7], "W": tiles[3]},
tiles[5]: {
"scene": None,
"N": tiles[3], "E": tiles[4], "S": None, "W": None},
tiles[6]: {
"scene": None,
"N": None, "E": None, "S": tiles[8], "W": tiles[4]},
tiles[7]: {
"scene": None,
"N": tiles[4], "E": tiles[8], "S": tiles[9], "W": tiles[5]},
tiles[8]: {
"scene": None,
"N": tiles[6], "E": tiles[10], "S": tiles[11], "W": tiles[7]},
tiles[9]: {
"scene": None,
"N": tiles[7], "E": tiles[11], "S": None, "W": None},
tiles[10]: {
"scene": None,
"N": None, "E": None, "S": tiles[12], "W": tiles[8]},
tiles[11]: {
"scene": None,
"N": tiles[8], "E": tiles[12], "S": tiles[13], "W": tiles[9]},
tiles[12]: {
"scene": None,
"N": tiles[10], "E": None, "S": None, "W": tiles[11]},
tiles[13]: {
"scene": None,
"N": tiles[11], "E": None, "S": None, "W": None}}
def loadScene(world, coords):
name = "%sx%s" % coords
scene = world.addScene(name)
filename = "backgrounds/%s.png" % name
image = image_loader.load(filename)
scene.setSkin(Skin(image))
ground = PhysicalThing(
"ground", [-1000, -1000, -100], [2000, 2000, 100])
scene.addObject(ground)
map[coords]["scene"] = scene
return scene
def connectScene(data):
scene = data.get("scene")
# North boundary of open area.
if data.get("N"):
destination = map[data.get("N")]["scene"]
connectionN = Portal(
name="boundary", location=[-20, 0, -20], size=[20, 180, 120],
toScene=destination, toLocation=[160, 90, 0])
else:
connectionN = PhysicalThing(
name="boundary", location=[-20, 0, -20], size=[20, 180, 120])
# East boundary of open area.
if data.get("E"):
destination = map[data.get("E")]["scene"]
connectionE = Portal(
name="boundary", location=[0, -20, -20], size=[180, 20, 120],
toScene=destination, toLocation=[90, 160, 0])
else:
connectionE = PhysicalThing(
name="boundary", location=[0, -20, -20], size=[180, 20, 120])
# South boundary of open area.
if data.get("S"):
destination = map[data.get("S")]["scene"]
connectionS = Portal(
name="boundary", location=[180, 0, -20], size=[20, 180, 120],
toScene=destination, toLocation=[20, 90, 0])
else:
connectionS = PhysicalThing(
name="boundary", location=[180, 0, -20], size=[20, 180, 120])
# West boundary of open area.
if data.get("W"):
destination = map[data.get("W")]["scene"]
connectionW = Portal(
name="boundary", location=[0, 180, -20], size=[180, 20, 120],
toScene=destination, toLocation=[90, 20, 0])
else:
connectionW = PhysicalThing(
name="boundary", location=[0, 180, -20], size=[180, 20, 120])
# Add all the connections.
scene.addObjects([connectionN, connectionE, connectionS, connectionW])
def connectScenes():
for coord, data in map.items():
connectScene(data)
def setupWorld():
"""
Create the world, the scenes that can be visited, the objects in the
scenes, and the player.
"""
# Create the world.
world = worldFactory(name="Wilderness World")
# Create all the scenes.
for coords in tiles:
scene = loadScene(world, coords)
if coords == (originX, originY):
startScene = scene
# Create the player and set his animated skin.
explorer = startScene.addPlayer(
name="Tim the Explorer", location=[90, 90, 50], size=[14, 14, 30])
south_facing = image_loader.load(image_glob="explorer/south/small/*.gif")
east_facing = image_loader.load(image_glob="explorer/east/small/*.gif")
# Mirror the images to complete the player animation.
explorer.setSkin(
DirectedAnimatedSkin(south_facing, east_facing))
connectScenes()
return world
def run():
# Setup the pygame display, the window caption and its icon.
world = setupWorld()
# Create an isomyr engine and start it.
engine = Engine(world=world, offset=[202, 182], keys=custom_keys,
titleFile=os.path.join(dirname, "titlebar.png"))
engine.start()
if __name__ == "__main__":
run() | PypiClean |
/DeepWon-0.5-py3-none-any.whl/utils/utils.py | __env__ = '''Envs]
Python 3.9.7 64-bit(conda 4.11.0)
macOS 12.1
'''
__version__ = '''Version]
version 0.01(beta)
'''
__doc__ = '''\
This module contains various utilities.
'''+ __env__+__version__
# print tabular data
def print_table(col_names, *cols, tab_width=50, just='right'):
'''Make and print a table that consists of multiple columns.
The length of 'col_names' should be the same as of 'cols'.
Params]
col_names: A list of column names
cols: Vector like 1D variables, each of which will be a column of the table sequentially
tab_width (optional): An width of a table. default = 50
just (optional): Justification option. 'center' and 'right' are acceptible. default = 'right'
'''
# Assume that len(col_names) == len(cols) holds.
if len(col_names) != len(cols):
raise ValueError('Length of col_names and cols should be same')
# Column names
print("="*tab_width)
cols_str = ''.join([str(name).center(int((tab_width//len(col_names))*0.95))
for name in col_names])
print(cols_str)
print('-'*tab_width)
# get the maximum length of contents, respect to each column
max_data_len = []
for col in cols:
max_len = 0
for i in range(len(col)):
if max_len < len(str(col[i])):
max_len = len(str(col[i]))
max_data_len.append(max_len)
# print data row by row, propotionally indented to the length of each column
for row in range(len(cols[0])):
# right justification
if just =='right':
row_str = ''.join([str(data[row]).rjust(max_data_len[i]).center(tab_width // len(cols))
for i, data in enumerate(cols)])
# center justification
elif just =='center':
row_str = ''.join([str(data[row]).center(max_data_len[i]).center(tab_width // len(cols))
for i, data in enumerate(cols)])
print(row_str)
print('='*tab_width)
# Get model size
# https://stackoverflow.com/questions/43137288/how-to-determine-needed-memory-of-keras-model
def get_model_memory_usage(batch_size, model):
import numpy as np
try:
from keras import backend as K
except:
from tensorflow.keras import backend as K
shapes_mem_count = 0
internal_model_mem_count = 0
for l in model.layers:
layer_type = l.__class__.__name__
if layer_type == 'Model':
internal_model_mem_count += get_model_memory_usage(batch_size, l)
single_layer_mem = 1
out_shape = l.output_shape
if type(out_shape) is list:
out_shape = out_shape[0]
for s in out_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = np.sum([K.count_params(p) for p in model.trainable_weights])
non_trainable_count = np.sum([K.count_params(p) for p in model.non_trainable_weights])
number_size = 4.0
if K.floatx() == 'float16':
number_size = 2.0
if K.floatx() == 'float64':
number_size = 8.0
total_memory = number_size * (batch_size * shapes_mem_count + trainable_count + non_trainable_count)
gbytes = np.round(total_memory / (1024.0 ** 3), 3) + internal_model_mem_count
return gbytes | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/classes/firebug/firebug-lite.js | var firebug = {
version:[1.23,20090309],
el:{},
env:{
"cache":{},
"extConsole":null,
"css":"http://getfirebug.com/releases/lite/1.2/firebug-lite.css",
"debug":true,
"detectFirebug":true,
"dIndex":"console",
"height":295,
"hideDOMFunctions":false,
"init":false,
"isPopup":false,
"liteFilename":"firebug-lite.js",
"minimized":false,
"openInPopup": false,
"override":true,
"ml":false,
"popupWin":null,
"showIconWhenHidden":true,
"targetWindow":undefined,
"popupTop":1,
"popupLeft":1,
"popupWidth":undefined,
"popupHeight":undefined
},
initConsole:function(){
/*
* initialize the console - user defined values are not available within this method because FBLite is not yet initialized
*/
var command;
try{
if((!window.console || (window.console && !window.console.firebug)) || (firebug.env.override && !(/Firefox\/3/i.test(navigator.userAgent)))){
window.console = { "provider":"Firebug Lite" };
for(command in firebug.d.console.cmd){
window.console[command] = firebug.lib.util.Curry(firebug.d.console.run,window,command);
};
}
/*window.onerror = function(_message,_file,_line){
firebug.d.console.run('error',firebug.lib.util.String.format('{0} ({1},{2})',_message,firebug.getFileName(_file),_line));
};*/
} catch(e){}
},
overrideConsole:function(){
with (firebug){
env.override=true;
try{
env.extConsole=window.console;
} catch(e){}
initConsole();
}
},
restoreConsole:function(){
with(firebug){
if(env.extConsole){
env.override=false;
try{
window.console=env.extConsole;
} catch(e){}
env.extConsole=null;
}
}
},
init:function(_css){
var iconTitle = "Click here or press F12, (CTRL|CMD)+SHIFT+L or SHIFT+ENTER to show Firebug Lite. CTRL|CMD click this icon to hide it.";
with(firebug){
if(document.getElementsByTagName('html')[0].attributes.getNamedItem('debug')){
env.debug = document.getElementsByTagName('html')[0].attributes.getNamedItem('debug').nodeValue !== "false";
}
if(env.isPopup) {
env.openInPopup = false;
env.targetWindow = window.opener;
env.popupWidth = window.opener.firebug.env.popupWidth || window.opener.firebug.lib.util.GetViewport().width;
env.popupHeight = window.opener.firebug.env.popupHeight || window.opener.firebug.lib.util.GetViewport().height;
} else {
env.targetWindow = window;
env.popupWidth = env.popupWidth || lib.util.GetViewport().width;
env.popupHeight = env.popupHeight || lib.util.GetViewport().height;
}
settings.readCookie();
if(env.init || (env.detectFirebug && window.console && window.console.firebug)) {
return;
}
document.getElementsByTagName("head")[0].appendChild(
new lib.element("link").attribute.set("rel","stylesheet").attribute.set("type","text/css").attribute.set("href",env.css).element
);
if(env.override){
overrideConsole();
}
/*
* Firebug Icon
*/
el.firebugIcon = new lib.element("div").attribute.set("id","firebugIconDiv").attribute.set("title",iconTitle).attribute.set("alt",iconTitle).event.addListener("mousedown",win.iconClicked).insert(document.body);
/*
* main interface
*/
el.content = {};
el.mainiframe = new lib.element("IFRAME").attribute.set("id","FirebugIFrame").environment.addStyle({ "display":"none", "width":lib.util.GetViewport().width+"px" }).insert(document.body);
el.main = new lib.element("DIV").attribute.set("id","Firebug").environment.addStyle({ "display":"none", "width":lib.util.GetViewport().width+"px" }).insert(document.body);
if(!env.isPopup){
el.resizer = new lib.element("DIV").attribute.addClass("Resizer").event.addListener("mousedown",win.resizer.start).insert(el.main);
}
el.header = new lib.element("DIV").attribute.addClass("Header").insert(el.main);
el.left = {};
el.left.container = new lib.element("DIV").attribute.addClass("Left").insert(el.main);
el.right = {};
el.right.container = new lib.element("DIV").attribute.addClass("Right").insert(el.main);
el.main.child.add(new lib.element("DIV").attribute.addClass('Clear'));
/*
* buttons
*/
el.button = {};
el.button.container = new lib.element("DIV").attribute.addClass("ButtonContainer").insert(el.header);
el.button.logo = new lib.element("A").attribute.set("title","Firebug Lite").attribute.set("target","_blank").attribute.set("href","http://getfirebug.com/lite.html").update(" ").attribute.addClass("Button Logo").insert(el.button.container);
el.button.inspect = new lib.element("A").attribute.addClass("Button").event.addListener("click",env.targetWindow.firebug.d.inspector.toggle).update("Inspect").insert(el.button.container);
el.button.dock = new lib.element("A").attribute.addClass("Button Dock").event.addListener("click", win.dock).insert(el.button.container);
el.button.newWindow = new lib.element("A").attribute.addClass("Button NewWindow").event.addListener("click", win.newWindow).insert(el.button.container);
if(!env.isPopup){
el.button.maximize = new lib.element("A").attribute.addClass("Button Maximize").event.addListener("click",win.maximize).insert(el.button.container);
el.button.minimize = new lib.element("A").attribute.addClass("Button Minimize").event.addListener("click",win.minimize).insert(el.button.container);
el.button.close = new lib.element("A").attribute.addClass("Button Close").event.addListener("click",win.hide).insert(el.button.container);
}
if(lib.env.ie||lib.env.webkit){
el.button.container.environment.addStyle({ "paddingTop":"12px" });
}
/*
* navigation
*/
el.nav = {};
el.nav.container = new lib.element("DIV").attribute.addClass("Nav").insert(el.left.container);
el.nav.console = new lib.element("A").attribute.addClass("Tab Selected").event.addListener("click",lib.util.Curry(d.navigate,window,"console")).update("Console").insert(el.nav.container);
el.nav.html = new lib.element("A").attribute.addClass("Tab").update("HTML").event.addListener("click",lib.util.Curry(d.navigate,window,"html")).insert(el.nav.container);
el.nav.css = new lib.element("A").attribute.addClass("Tab").update("CSS").event.addListener("click",lib.util.Curry(d.navigate,window,"css")).insert(el.nav.container);
if(!env.isPopup){
el.nav.scripts = new lib.element("A").attribute.addClass("Tab").update("Script").event.addListener("click",lib.util.Curry(d.navigate,window,"scripts")).insert(el.nav.container);
}
el.nav.dom = new lib.element("A").attribute.addClass("Tab").update("DOM").event.addListener("click",lib.util.Curry(d.navigate,env.targetWindow,"dom")).insert(el.nav.container);
el.nav.xhr = new lib.element("A").attribute.addClass("Tab").update("XHR").event.addListener("click",lib.util.Curry(d.navigate,window,"xhr")).insert(el.nav.container);
el.nav.optionsdiv = new lib.element("DIV").attribute.addClass("Settings").insert(el.nav.container);
el.nav.options = new lib.element("A").attribute.addClass("Tab").update("Options ∨").event.addListener("click", settings.toggle).insert(el.nav.optionsdiv);
/*
* inspector
*/
el.borderInspector = new lib.element("DIV").attribute.set("id","FirebugBorderInspector").event.addListener("click",listen.inspector).insert(document.body);
el.bgInspector = new lib.element("DIV").attribute.set("id","FirebugBGInspector").insert(document.body);
/*
* console
*/
el.left.console = {};
el.left.console.container = new lib.element("DIV").attribute.addClass("Console").insert(el.left.container);
el.left.console.mlButton = new lib.element("A").attribute.addClass("MLButton").event.addListener("click",d.console.toggleML).insert(el.left.console.container);
el.left.console.monitor = new lib.element("DIV").insert(
new lib.element("DIV").attribute.addClass("Monitor").insert(el.left.console.container)
);
el.left.console.container.child.add(
new lib.element("DIV").attribute.addClass("InputArrow").update(">>>")
);
el.left.console.input = new lib.element("INPUT").attribute.set("type","text").attribute.addClass("Input").event.addListener("keydown",listen.consoleTextbox).insert(
new lib.element("DIV").attribute.addClass("InputContainer").insert(el.left.console.container)
);
el.right.console = {};
el.right.console.container = new lib.element("DIV").attribute.addClass("Console Container").insert(el.right.container);
el.right.console.mlButton = new lib.element("A").attribute.addClass("MLButton CloseML").event.addListener("click",d.console.toggleML).insert(el.right.console.container);
el.right.console.input = new lib.element("TEXTAREA").attribute.addClass("Input").insert(el.right.console.container);
el.right.console.input.event.addListener("keydown",lib.util.Curry(tab,window,el.right.console.input.element));
el.right.console.run = new lib.element("A").attribute.addClass("Button").event.addListener("click",listen.runMultiline).update("Run").insert(el.right.console.container);
el.right.console.clear = new lib.element("A").attribute.addClass("Button").event.addListener("click",lib.util.Curry(d.clean,window,el.right.console.input)).update("Clear").insert(el.right.console.container);
el.button.console = {};
el.button.console.container = new lib.element("DIV").attribute.addClass("ButtonSet").insert(el.button.container);
el.button.console.clear = new lib.element("A").attribute.addClass("Button").event.addListener("click",d.console.clear).update("Clear").insert(el.button.console.container);
/*
* html
*/
el.left.html = {};
el.left.html.container = new lib.element("DIV").attribute.addClass("HTML").insert(el.left.container);
el.right.html = {};
el.right.html.container = new lib.element("DIV").attribute.addClass("HTML Container").insert(el.right.container);
el.right.html.nav = {};
el.right.html.nav.container = new lib.element("DIV").attribute.addClass("Nav").insert(el.right.html.container);
el.right.html.nav.computedStyle = new lib.element("A").attribute.addClass("Tab Selected").event.addListener("click",lib.util.Curry(d.html.navigate,firebug,"computedStyle")).update("Computed Style").insert(el.right.html.nav.container);
el.right.html.nav.dom = new lib.element("A").attribute.addClass("Tab").event.addListener("click",lib.util.Curry(d.html.navigate,firebug,"dom")).update("DOM").insert(el.right.html.nav.container);
el.right.html.content = new lib.element("DIV").attribute.addClass("Content").insert(el.right.html.container);
el.button.html = {};
el.button.html.container = new lib.element("DIV").attribute.addClass("ButtonSet HTML").insert(el.button.container);
/*
* css
*/
el.left.css = {};
el.left.css.container = new lib.element("DIV").attribute.addClass("CSS").insert(el.left.container);
el.right.css = {};
el.right.css.container = new lib.element("DIV").attribute.addClass("CSS Container").insert(el.right.container);
el.right.css.nav = {};
el.right.css.nav.container = new lib.element("DIV").attribute.addClass("Nav").insert(el.right.css.container);
el.right.css.nav.runCSS = new lib.element("A").attribute.addClass("Tab Selected").update("Run CSS").insert(el.right.css.nav.container);
el.right.css.mlButton = new lib.element("A").attribute.addClass("MLButton CloseML").event.addListener("click",d.console.toggleML).insert(el.right.css.container);
el.right.css.input = new lib.element("TEXTAREA").attribute.addClass("Input").insert(el.right.css.container);
el.right.css.input.event.addListener("keydown",lib.util.Curry(firebug.tab,window,el.right.css.input.element));
el.right.css.run = new lib.element("A").attribute.addClass("Button").event.addListener("click",listen.runCSS).update("Run").insert(el.right.css.container);
el.right.css.clear = new lib.element("A").attribute.addClass("Button").event.addListener("click",lib.util.Curry(d.clean,window,el.right.css.input)).update("Clear").insert(el.right.css.container);
el.button.css = {};
el.button.css.container = new lib.element("DIV").attribute.addClass("ButtonSet CSS").insert(el.button.container);
el.button.css.selectbox = new lib.element("SELECT").event.addListener("change",listen.cssSelectbox).insert(el.button.css.container);
/*
* scripts
*/
el.left.scripts = {};
el.left.scripts.container = new lib.element("DIV").attribute.addClass("Scripts").insert(el.left.container);
el.right.scripts = {};
el.right.scripts.container = new lib.element("DIV").attribute.addClass("Scripts Container").insert(el.right.container);
el.button.scripts = {};
el.button.scripts.container = new lib.element("DIV").attribute.addClass("ButtonSet Scripts").insert(el.button.container);
el.button.scripts.selectbox = new lib.element("SELECT").event.addListener("change",listen.scriptsSelectbox).insert(el.button.scripts.container);
el.button.scripts.lineNumbers = new lib.element("A").attribute.addClass("Button").event.addListener("click",d.scripts.toggleLineNumbers).update("Show Line Numbers").insert(el.button.scripts.container);
/*
* dom
*/
el.left.dom = {};
el.left.dom.container = new lib.element("DIV").attribute.addClass("DOM").insert(el.left.container);
el.right.dom = {};
el.right.dom.container = new lib.element("DIV").attribute.addClass("DOM Container").insert(el.right.container);
el.button.dom = {};
el.button.dom.container = new lib.element("DIV").attribute.addClass("ButtonSet DOM").insert(el.button.container);
el.button.dom.label = new lib.element("LABEL").update("Object Path:").insert(el.button.dom.container);
el.button.dom.textbox = new lib.element("INPUT").event.addListener("keydown",listen.domTextbox).update(env.isPopup?"window.opener":"window").insert(el.button.dom.container);
/*
* str
*/
el.left.str = {};
el.left.str.container = new lib.element("DIV").attribute.addClass("STR").insert(el.left.container);
el.right.str = {};
el.right.str.container = new lib.element("DIV").attribute.addClass("STR").insert(el.left.container);
el.button.str = {};
el.button.str.container = new lib.element("DIV").attribute.addClass("ButtonSet XHR").insert(el.button.container);
el.button.str.watch = new lib.element("A").attribute.addClass("Button").event.addListener("click",lib.util.Curry(d.navigate,window,"xhr")).update("Back").insert(el.button.str.container);
/*
* xhr
*/
el.left.xhr = {};
el.left.xhr.container = new lib.element("DIV").attribute.addClass("XHR").insert(el.left.container);
el.right.xhr = {};
el.right.xhr.container = new lib.element("DIV").attribute.addClass("XHR").insert(el.left.container);
el.button.xhr = {};
el.button.xhr.container = new lib.element("DIV").attribute.addClass("ButtonSet XHR").insert(el.button.container);
el.button.xhr.label = new lib.element("LABEL").update("XHR Path:").insert(el.button.xhr.container);
el.button.xhr.textbox = new lib.element("INPUT").event.addListener("keydown",listen.xhrTextbox).insert(el.button.xhr.container);
el.button.xhr.watch = new lib.element("A").attribute.addClass("Button").event.addListener("click",listen.addXhrObject).update("Watch").insert(el.button.xhr.container);
/*
* settings
*/
el.settings = {};
el.settings.container = new lib.element("DIV").child.add(
new lib.element("DIV").attribute.addClass("Header").child.add(
new lib.element().attribute.addClass("Title").update('Firebug Lite Settings')
)
).attribute.addClass("SettingsDiv").insert(el.main);
el.settings.content = new lib.element("DIV").attribute.addClass("Content").insert(el.settings.container);
el.settings.progressDiv = new lib.element("DIV").attribute.addClass("ProgressDiv").insert(el.settings.content);
el.settings.progress = new lib.element("DIV").attribute.addClass("Progress").insert(el.settings.progressDiv);
el.settings.cbxDebug = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Start visible"));
new lib.element("BR").insert(el.settings.content);
el.settings.cbxDetectFirebug = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Hide when Firebug active"));
new lib.element("BR").insert(el.settings.content);
el.settings.cbxHideDOMFunctions = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Hide DOM functions"));
new lib.element("BR").insert(el.settings.content);
el.settings.cbxOverride = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Override window.console"));
new lib.element("BR").insert(el.settings.content);
el.settings.cbxShowIcon = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Show icon when hidden"));
new lib.element("BR").insert(el.settings.content);
el.settings.cbxOpenInPopup = new lib.element("INPUT").attribute.set("type","checkbox").attribute.addClass("SettingsCBX").insert(el.settings.content);
el.settings.content.child.add(document.createTextNode("Open in popup"));
el.settings.buttonDiv = new lib.element("DIV").insert(el.settings.content);
el.settings.buttonLeftDiv = new lib.element("DIV").attribute.addClass("ButtonsLeft").insert(el.settings.buttonDiv);
el.settings.resetButton = new lib.element("INPUT").attribute.set("type","button").update("Reset").event.addListener("click",settings.reset).insert(el.settings.buttonLeftDiv);
el.settings.buttonRightDiv = new lib.element("DIV").attribute.addClass("ButtonsRight").insert(el.settings.buttonDiv);
el.settings.cancelButton = new lib.element("INPUT").attribute.set("type","button").update("Cancel").event.addListener("click",settings.hide).insert(el.settings.buttonRightDiv);
el.settings.buttonRightDiv.child.add(document.createTextNode(" "));
el.settings.saveButton = new lib.element("INPUT").attribute.set("type","button").update("Save").event.addListener("click",settings.saveClicked).insert(el.settings.buttonRightDiv);
lib.util.AddEvent(document,"mousemove",listen.mouse)("mousemove",win.resizer.resize)("mouseup",win.resizer.stop)("keydown",listen.keyboard);
env.init = true;
for(var i=0, len=d.console.cache.length; i<len; i++){
var item = d.console.cache[i];
d.console.cmd[item.command].apply(window,item.arg);
};
if(lib.env.ie6){
window.onscroll = lib.util.Curry(win.setVerticalPosition,window,null);
var buttons = [
el.button.inspect,
el.button.close,
el.button.inspect,
el.button.console.clear,
el.right.console.run,
el.right.console.clear,
el.right.css.run,
el.right.css.clear
];
for(var i=0, len=buttons.length; i<len; i++)
buttons[i].attribute.set("href","#");
win.refreshSize();
}
if(env.showIconWhenHidden) {
if(!env.popupWin) {
el.firebugIcon.environment.addStyle({ "display": env.debug&&'none'||'block' });
}
}
lib.util.AddEvent(window, "unload", win.unload);
if (env.isPopup) {
env.height=lib.util.GetViewport().height;
lib.util.AddEvent(window, "resize", win.fitToPopup);
win.fitToPopup();
} else {
lib.util.AddEvent(window, "resize", win.refreshSize);
}
win.setHeight(env.height);
if(env.openInPopup&&!env.isPopup) {
win.newWindow();
} else {
el.main.environment.addStyle({ "display":env.debug&&'block'||'none' });
el.mainiframe.environment.addStyle({ "display":env.debug&&'block'||'none' });
}
}
},
inspect:function(){
return firebug.d.html.inspect.apply(window,arguments);
},
watchXHR:function(){
with(firebug){
d.xhr.addObject.apply(window,arguments);
if(env.dIndex!="xhr"){
d.navigate("xhr");
}
}
},
settings:{
isVisible:false,
show: function() {
with(firebug){
var posXY=lib.util.Element.getPosition(firebug.el.nav.options.element);
settings.refreshForm();
el.settings.container.environment.addStyle({
"display": "block",
"left": (posXY.offsetLeft-125)+"px"
});
el.settings.progressDiv.environment.addStyle({
"display": "none"
});
firebug.settings.isVisible = true;
}
},
hide: function() {
with(firebug){
firebug.el.settings.container.environment.addStyle({
"display": "none"
});
firebug.settings.isVisible = false;
}
},
toggle: function(){
with(firebug){
settings[!settings.isVisible && 'show' || 'hide']();
}
},
saveClicked: function() {
firebug.el.settings.progressDiv.environment.addStyle({
"display": "block"
});
setTimeout(firebug.settings.formToSettings,0);
},
formToSettings: function() {
var fe=firebug.env,
ofe,
elSet=firebug.el.settings,
exdate;
fe.debug=elSet.cbxDebug.element.checked;
fe.detectFirebug=elSet.cbxDetectFirebug.element.checked;
fe.hideDOMFunctions=elSet.cbxHideDOMFunctions.element.checked;
fe.override=elSet.cbxOverride.element.checked;
fe.showIconWhenHidden=elSet.cbxShowIcon.element.checked;
fe.openInPopup=elSet.cbxOpenInPopup.element.checked;
if(fe.isPopup) {
ofe=window.opener.firebug.env;
ofe.debug=fe.debug;
ofe.detectFirebug=fe.detectFirebug;
ofe.hideDOMFunctions=fe.hideDOMFunctions;
ofe.override=fe.override;
ofe.showIconWhenHidden=fe.showIconWhenHidden;
ofe.openInPopup=fe.openInPopup;
ofe.popupTop=fe.popupTop;
ofe.popupLeft=fe.popupLeft;
ofe.popupWidth=fe.popupWidth;
ofe.popupHeight=fe.popupHeight;
}
with(firebug) {
settings.writeCookie();
settings.hide();
win.refreshDOM();
}
},
reset: function() {
var exdate=new Date();
exdate.setTime(exdate.getTime()-1);
document.cookie='FBLiteSettings=;expires='+exdate.toGMTString();
location.reload(true);
},
readCookie: function() {
var i,cookieArr,valueArr,item,value;
with(firebug.env){
if(targetWindow.document.cookie.length>0) {
cookieArr=targetWindow.document.cookie.split('; ');
for(i=0;i<cookieArr.length;i++) {
if(cookieArr[i].split('=')[0]=='FBLiteSettings') {
valueArr=cookieArr[i].split('=')[1].split(',');
}
}
if(valueArr) {
for(i=0;i<valueArr.length;i++) {
item=valueArr[i].split(':')[0];
value=valueArr[i].split(':')[1];
switch(item) {
case 'debug':
debug=value=="true";
break;
case 'detectFirebug':
detectFirebug=value=="true";
break;
case 'hideDOMFunctions':
hideDOMFunctions=value=="true";
break;
case 'override':
override=value=="true";
break;
case 'showIconWhenHidden':
showIconWhenHidden=value=="true";
break;
case 'openInPopup':
openInPopup=value=="true";
break;
case 'popupTop':
popupTop=parseInt(value,10);
break;
case 'popupLeft':
popupLeft=parseInt(value,10);
break;
case 'popupWidth':
popupWidth=parseInt(value,10);
break;
case 'popupHeight':
popupHeight=parseInt(value,10);
break;
case 'height':
height=parseInt(value,10);
break;
}
}
}
}
}
},
writeCookie: function() {
var values;
with(firebug.env){
values='debug:'+debug+',';
values+='detectFirebug:'+detectFirebug+',';
values+='hideDOMFunctions:'+hideDOMFunctions+',';
values+='override:'+override+',';
values+='showIconWhenHidden:'+showIconWhenHidden+',';
values+='openInPopup:'+openInPopup+',';
if(isPopup) {
if(window.outerWidth===undefined) {
values+='popupTop:'+(window.screenTop-56)+',';
values+='popupLeft:'+(window.screenLeft-8)+',';
values+='popupWidth:'+document.body.clientWidth+',';
values+='popupHeight:'+document.body.clientHeight+',';
} else {
values+='popupTop:'+window.screenY+',';
values+='popupLeft:'+window.screenX+',';
values+='popupWidth:'+window.outerWidth+',';
values+='popupHeight:'+window.outerHeight+',';
}
} else {
values+='popupTop:'+popupTop+',';
values+='popupLeft:'+popupLeft+',';
values+='popupWidth:'+popupWidth+',';
values+='popupHeight:'+popupHeight+',';
}
values+='height:'+(parseInt(targetWindow.firebug.el.main.element.style.height.replace(/px/,''),10)-38);
exdate=new Date();
exdate.setDate(exdate.getDate()+365);
targetWindow.document.cookie='FBLiteSettings='+values+';expires='+exdate.toGMTString();
}
},
refreshForm: function() {
var fe=firebug.env,
elSet=firebug.el.settings;
elSet.cbxDebug.element.checked=fe.debug;
elSet.cbxDetectFirebug.element.checked=fe.detectFirebug;
elSet.cbxHideDOMFunctions.element.checked=fe.hideDOMFunctions;
elSet.cbxOverride.element.checked=fe.override;
elSet.cbxShowIcon.element.checked=fe.showIconWhenHidden;
elSet.cbxOpenInPopup.element.checked=fe.openInPopup;
}
},
win:{
hide:function(){
with(firebug){
el.main.environment.addStyle({
"display": "none"
});
el.mainiframe.environment.addStyle({
"display": "none"
});
if(env.showIconWhenHidden) {
el.firebugIcon.environment.addStyle({
"display": "block"
});
}
}
},
show:function(){
with(firebug){
el.main.environment.addStyle({
"display": "block"
});
el.mainiframe.environment.addStyle({
"display": "block"
});
if(env.showIconWhenHidden) {
el.firebugIcon.environment.addStyle({
"display": "none"
});
}
}
},
iconClicked:function(_event) {
with(firebug) {
if(_event.ctrlKey==true||_event.metaKey==true) {
el.firebugIcon.environment.addStyle({ "display": "none" });
env.showIconWhenHidden=false;
} else {
win.show();
}
}
},
minimize:function(){
with(firebug){
env.minimized=true;
el.main.environment.addStyle({ "height":"35px" });
el.mainiframe.environment.addStyle({ "height":"35px" });
el.button.maximize.environment.addStyle({ "display":"block" });
el.button.minimize.environment.addStyle({ "display":"none" });
win.refreshSize();
}
},
maximize:function(){
with(firebug){
env.minimized=false;
el.button.minimize.environment.addStyle({ "display":"block" });
el.button.maximize.environment.addStyle({ "display":"none" });
win.setHeight(env.height);
}
},
newWindow: function() {
var interval,scripts,script,scriptPath,
fe=firebug.env;
if (!fe.popupWin) {
scripts = document.getElementsByTagName('script');
fe.popupWin = window.open("", "_firebug",
"status=0,menubar=0,resizable=1,top="+fe.popupTop+",left="+fe.popupLeft+",width=" + fe.popupWidth +
",height=" + fe.popupHeight + ",scrollbars=0,addressbar=0,outerWidth="+fe.popupWidth+",outerHeight="+fe.popupHeight+
"toolbar=0,location=0,directories=0,dialog=0");
if(!fe.popupWin) {
alert("Firebug Lite could not open a pop-up window, most likely because of a popup blocker.\nPlease enable popups for this domain");
} else {
firebug.settings.hide();
for (i=0,len=scripts.length; i<len; i++) {
if (scripts[i].src.indexOf(fe.liteFilename) > -1) {
scriptPath = scripts[i].src;
break;
}
}
if (scriptPath) {
script = fe.popupWin.document.createElement('script'), done = false;
script.type = 'text/javascript';
script.src = scriptPath;
script[firebug.lib.env.ie?"onreadystatechange":"onload"] = function(){
if(!done && (!firebug.lib.env.ie || this.readyState == "complete" || this.readyState=="loaded")){
done = true;
if(fe.popupWin.firebug) {
with(fe.popupWin.firebug) {
env.isPopup = true;
env.css = fe.css;
init();
el.button.dock.environment.addStyle({ "display": "block"});
el.button.newWindow.environment.addStyle({ "display": "none"});
}
}
}
};
if (!done && firebug.lib.env.webkit) {
fe.popupWin.document.write('<html><head></head><body></body></html>');
interval = setInterval(function() {
if (fe.popupWin.firebug) {
clearInterval(interval);
done = true;
with(fe.popupWin.firebug) {
env.isPopup = true;
env.css = fe.css;
init();
el.button.dock.environment.addStyle({ "display": "block"});
el.button.newWindow.environment.addStyle({ "display": "none"});
}
}
}, 10);
};
if (!done) {
fe.popupWin.document.getElementsByTagName('head')[0].appendChild(script);
firebug.el.main.environment.addStyle({"display": "none"});
firebug.el.mainiframe.environment.addStyle({"display": "none"});
}
} else {
alert("Unable to detect the following script \"" + fe.liteFilename +
"\" ... if the script has been renamed then please set the value of firebug.env.liteFilename to reflect this change");
fe.popupWin.close();
fe.popupWin=null;
}
}
}
},
dock: function() {
with(opener.firebug) {
env.popupWin = null;
el.main.environment.addStyle({
"display": "block"
});
el.mainiframe.environment.addStyle({
"display": "block"
});
settings.readCookie();
window.close();
};
},
unload: function() {
with(firebug){
if(env.isPopup) {
win.dock();
} else if(env.popupWin) {
env.popupWin.close();
}
}
},
fitToPopup: function() {
with(firebug) {
var viewport = lib.util.GetViewport(window);
win.setHeight((window.innerHeight||viewport.height) - 38);
el.main.environment.addStyle({
"width": (viewport.width) + "px"
});
el.mainiframe.environment.addStyle({
"width": (viewport.width) + "px"
});
}
},
resizer:{
y:[], enabled:false,
start:function(_event){
with(firebug){
if(env.minimized)return;
win.resizer.y=[el.main.element.offsetHeight,_event.clientY];
if(lib.env.ie6){
win.resizer.y[3]=parseInt(el.main.environment.getPosition().top);
}
win.resizer.enabled=true;
}
},
resize:function(_event){
with(firebug){
if(!win.resizer.enabled)return;
win.resizer.y[2]=(win.resizer.y[0]+(win.resizer.y[1]-_event.clientY));
el.main.environment.addStyle({ "height":win.resizer.y[2]+"px" });
el.mainiframe.environment.addStyle({ "height":win.resizer.y[2]+"px" });
if(lib.env.ie6){
el.main.environment.addStyle({ "top":win.resizer.y[3]-(win.resizer.y[1]-_event.clientY)+"px" });
el.mainiframe.environment.addStyle({ "top":win.resizer.y[3]-(win.resizer.y[1]-_event.clientY)+"px" });
}
}
},
stop:function(_event){
with(firebug){
if(win.resizer.enabled){
win.resizer.enabled=false;
win.setHeight(win.resizer.y[2]-35);
}
}
}
},
setHeight:function(_height){
with(firebug){
env.height=_height;
el.left.container.environment.addStyle({ "height":_height+"px" });
el.right.container.environment.addStyle({ "height":_height+"px" });
el.main.environment.addStyle({ "height":_height+38+"px" });
el.mainiframe.environment.addStyle({ "height":_height+38+"px" });
win.refreshSize();
// console
el.left.console.monitor.element.parentNode.style.height=_height-47+"px";
el.left.console.mlButton.environment.addStyle({ "top":_height+19+"px" });
el.right.console.mlButton.environment.addStyle({ "top":_height+19+"px" });
el.right.console.input.environment.addStyle({ "height":_height-29+"px" });
// html
el.left.html.container.environment.addStyle({"height":_height-23+"px"});
el.right.html.content.environment.addStyle({"height":_height-23+"px"});
// css
el.left.css.container.environment.addStyle({"height":_height-33+"px"});
el.right.css.input.environment.addStyle({ "height":_height-55+"px" });
// script
el.left.scripts.container.environment.addStyle({"height":_height-23+"px"});
// dom
el.left.dom.container.environment.addStyle({"height":_height-31+"px"});
// xhr
el.left.xhr.container.environment.addStyle({"height":_height-32+"px"});
// string
el.left.str.container.environment.addStyle({"height":_height-32+"px"});
}
},
refreshDOM:function(){
with(firebug){
d.dom.open(eval(el.button.dom.textbox.environment.getElement().value),el.left.dom.container);
if(d.html.nIndex=="dom"){
firebug.d.html.navigate("dom")
}
}
},
refreshSize:function(){
with(firebug){
if(!env.init)
return;
var dim = lib.util.GetViewport();
el.main.environment.addStyle({ "width":dim.width+"px"});
el.mainiframe.environment.addStyle({ "width":dim.width+"px"});
if(lib.env.ie6)
win.setVerticalPosition(dim);
}
},
setVerticalPosition:function(_dim,_event){
with(firebug){
var dim = _dim||lib.util.GetViewport();
el.main.environment.addStyle({ "top":dim.height-el.main.environment.getSize().offsetHeight+Math.max(document.documentElement.scrollTop,document.body.scrollTop)+"px" });
el.mainiframe.environment.addStyle({ "top":dim.height-el.main.environment.getSize().offsetHeight+Math.max(document.documentElement.scrollTop,document.body.scrollTop)+"px" });
}
}
},
d: {
clean:function(_element){
with(firebug){
_element.update("");
}
},
console:{
addLine:function(){
with (firebug) {
return new lib.element("DIV").attribute.addClass("Row").insert(el.left.console.monitor);
}
},
cache:[],
clear:function(){
with(firebug){
d.clean(el.left.console.monitor);
d.console.cache = [];
}
},
formatArgs:function(){
with(firebug){
var content = [];
for(var i=0, len=arguments.length; i<len; i++){
content.push( d.highlight(arguments[i],false,false,true) );
}
return content.join(" ");
}
},
history:[], historyIndex:0,
openObject:function(_index){
with (firebug) {
d.dom.open(d.console.cache[_index], el.left.dom.container, lib.env.ie);
d.navigate("dom");
}
},
print: function(_cmd,_text){
with (firebug){
d.console.addLine().attribute.addClass("Arrow").update(">>> "+_cmd);
d.console.addLine().update(d.highlight(_text,false,false,true));
d.console.scroll();
}
},
printException: function(_exception){
with(firebug){
var message = _exception.description||_exception.message||_exception;
if(_exception.fileName){
message+=' ('+(_exception.name&&(_exception.name+', ')||'')+getFileName(_exception.fileName)+', '+_exception.lineNumber+')';
}
d.console.addLine().attribute.addClass("Error").update("<strong>Error: </strong>"+message,true);
}
},
eval:function(_cmd){
var result;
with(firebug){
if(_cmd.length==0)
return;
el.left.console.input.environment.getElement().value = "";
d.console.historyIndex = d.console.history.push(_cmd);
try {
if(_cmd==='console.firebug') {
d.console.addLine().attribute.addClass("Arrow").update(firebug.version);
} else {
result = eval.call(window,_cmd);
d.console.print(_cmd,result);
}
} catch(e){
d.console.addLine().attribute.addClass("Arrow").update(">>> "+_cmd);
d.console.printException(e);
}
d.console.scroll();
}
},
scroll:function(){
with(firebug){
el.left.console.monitor.environment.getElement().parentNode.scrollTop = Math.abs(el.left.console.monitor.environment.getSize().offsetHeight-(el.left.console.monitor.element.parentNode.offsetHeight-11));
}
},
run:function(_command){
with(firebug){
if(!env.init){
d.console.cache.push({ "command":_command, "arg":Array.prototype.slice.call(arguments,1) });
} else {
d.console.cmd[_command].apply(window,Array.prototype.slice.call(arguments,1));
}
}
},
toggleML:function(){
with(firebug){
var open = !env.ml;
env.ml = !env.ml;
d.navigateRightColumn("console",open);
el[open?"left":"right"].console.mlButton.environment.addStyle({ display:"none" });
el[!open?"left":"right"].console.mlButton.environment.addStyle({ display:"block" });
el.left.console.mlButton.attribute[(open?"add":"remove")+"Class"]("CloseML");
}
},
countMap:{}, timeMap: {},
cmd:{
log: function(_value){
with(firebug){
var args = d.console.formatArgs.apply(window,arguments);
d.console.addLine().attribute.addClass("Log").update(args);
d.console.scroll();
}
},
warn: function(_value){
with(firebug){
var args = d.console.formatArgs.apply(window,arguments);
d.console.addLine().attribute.addClass("Warn").update(args);
d.console.scroll();
}
},
info: function(_value){
with(firebug){
var args = d.console.formatArgs.apply(window,arguments);
d.console.addLine().attribute.addClass("Info").update(args);
d.console.scroll();
}
},
debug: function(_value){
with(firebug){
var args = d.console.formatArgs.apply(window,arguments);
d.console.addLine().attribute.addClass("Debug").update(args);
d.console.scroll();
}
},
error: function(_value){
with(firebug){
var args = d.console.formatArgs.apply(window,arguments);
d.console.addLine().attribute.addClass("Error").update(args);
d.console.scroll();
}
},
trace: function(_value){
with(firebug){
var stackAmt = 3, f = arguments.caller, isArray = lib.util.IsArray(f); //function that called trace
if((!isArray&&f)||(isArray&&f.length>0)){
d.console.addLine().attribute.addClass("Arrow").update(">>> console.trace(stack)");
for(var i=0;i<stackAmt;i++){
var func = f.toString(), args = f.arguments;
d.dom.open({"function":func, "arguments":args},d.console.addLine());
f = f.caller;
}
}
}
},
dir:function(_value){
with(firebug){
d.console.addLine().attribute.addClass("Arrow").update(">>> console.dir("+_value+")");
d.dom.open(_value,d.console.addLine());
}
},
dirxml: function(){
with(firebug){
d.console.cmd.log.apply(this, arguments);
}
},
time: function(_name){
with(firebug){
d.console.timeMap[_name] = new Date().getTime();
}
},
timeEnd: function(_name){
with(firebug){
if(_name in d.console.timeMap){
var delta = new Date().getTime() - d.console.timeMap[_name],
args = d.console.formatArgs.apply(window,[_name+":", delta+"ms"]);
d.console.addLine().attribute.addClass("log").update(args);
delete d.console.timeMap[_name];
}
}
},
count: function(_name){
with(firebug){
if(!d.console.countMap[_name])
d.console.countMap[_name] = 0;
d.console.countMap[_name]++;
d.console.cmd.log.apply(window, [_name, d.console.countMap[_name]]);
}
},
group:function(){
with(firebug){
d.console.cmd.log.apply(this, ["console.group is not supported"]);
}
},
groupEnd:function(){
with(firebug){
d.console.cmd.log.apply(this, ["console.groupEnd is not supported"]);
}
},
profile:function(){
with(firebug){
d.console.cmd.log.apply(this, ["console.profile is not supported"]);
}
},
profileEnd:function(){
with(firebug){
d.console.cmd.log.apply(this, ["console.profileEnd is not supported"]);
}
}
}
},
css:{
index:-1,
open:function(_index){
with (firebug) {
var item = env.targetWindow.document.styleSheets[_index],
uri = item.href;
try {
var rules = item[lib.env.ie ? "rules" : "cssRules"], str = "";
for (var i=0; i<rules.length; i++) {
var item = rules[i];
var selector = item.selectorText;
var cssText = lib.env.ie?item.style.cssText:item.cssText.match(/\{(.*)\}/)[1];
str+=d.css.printRule(selector, cssText.split(";"), el.left.css.container);
}
} catch(e) {
str="<em>Access to restricted URI denied</em>";
}
el.left.css.container.update(str);
}
},
printRule:function(_selector,_css,_layer){
with(firebug){
var str = "<div class='Selector'>"+_selector+" {</div>";
for(var i=0,len=_css.length; i<len; i++){
var item = _css[i];
str += "<div class='CSSText'>"+item.replace(/(.+\:)(.+)/,"<span class='CSSProperty'>$1</span><span class='CSSValue'>$2;</span>")+"</div>";
}
str+="<div class='Selector'>}</div>";
return str;
}
},
refresh:function(){
with(firebug){
el.button.css.selectbox.update("");
var collection = env.targetWindow.document.styleSheets;
for(var i=0,len=collection.length; i<len; i++){
var uri = getFileName(collection[i].href);
d.css.index=d.css.index<0?i:d.css.index;
el.button.css.selectbox.child.add(
new lib.element("OPTION").attribute.set("value",i).update(uri)
)
};
d.css.open(d.css.index);
}
}
},
dom: {
open: function(_object,_layer){
with (firebug) {
_layer.clean();
var container = new lib.element("DIV").attribute.addClass("DOMContent").insert(_layer);
d.dom.print(_object, container);
}
},
print:function(_object,_parent, _inTree){
with (firebug) {
var obj = _object || window, parentElement = _parent;
parentElement.update("");
if(parentElement.opened&&parentElement!=el.left.dom.container){
parentElement.environment.getParent().lib.child.get()[0].lib.child.get()[0].lib.attribute.removeClass("Opened");
parentElement.opened = false;
parentElement.environment.addStyle({ "display":"none" });
return;
}
if(_inTree)
parentElement.environment.getParent().lib.child.get()[0].lib.child.get()[0].lib.attribute.addClass("Opened");
parentElement.opened = true;
for (var key in obj) {
try {
if (env.hideDOMFunctions && typeof(obj[key]) == "function") continue;
var value = obj[key], property = key, container = new lib.element("DIV").attribute.addClass("DOMRow").insert(parentElement),
left = new lib.element("DIV").attribute.addClass("DOMRowLeft").insert(container), right = new lib.element("DIV").attribute.addClass("DOMRowRight").insert(container);
container.child.add(
new lib.element("DIV").attribute.addClass('Clear')
);
var link = new lib.element("A").attribute.addClass(
typeof value=="object"&&Boolean(value)?"Property Object":"Property"
).update(property).insert(left);
right.update(d.highlight(value,false,true));
var subContainer = new lib.element("DIV").attribute.addClass("DOMRowSubContainer").insert(container);
if(typeof value!="object"||Boolean(value)==false)
continue;
link.event.addListener("click",lib.util.Curry(d.dom.print,window,value, subContainer, true));
}catch(e){
}
}
parentElement.environment.addStyle({ "display":"block" });
}
}
},
highlight:function(_value,_inObject,_inArray,_link){
with(firebug){
var isArray = false, isHash, isElement = false, vtype=typeof _value, result=[];
if(vtype=="object"){
if(Object.prototype.toString.call(_value) === "[object Date]"){
vtype = "date";
} else if(Object.prototype.toString.call(_value) === "[object String]"){
vtype = "string";
} else if(Object.prototype.toString.call(_value) === "[object Boolean]"){
vtype = "boolean";
} else if(Object.prototype.toString.call(_value) === "[object RegExp]"){
vtype = "regexp";
}
}
try {
isArray = lib.util.IsArray(_value);
isHash = lib.util.IsHash(_value);
isElement = _value!=undefined&&Boolean(_value.nodeName)&&Boolean(_value.nodeType);
// number, string, boolean, null, function
if(_value==null||vtype=="number"||vtype=="string"||vtype=="boolean"||vtype=="function"||vtype=="regexp"||vtype=="date"){
if(_value==null){
result.push("<span class='Null'>null</span>");
}else if (vtype=="regexp") {
result.push("<span class='Maroon'>" + _value + "</span>");
}else if (vtype=="date") {
result.push("<span class='DarkBlue'>'" + _value + "'</span>");
} else if (vtype=="boolean"||vtype=="number") {
result.push("<span class='DarkBlue'>" + _value + "</span>");
} else if(vtype=="function"){
result.push("<span class='"+(_inObject?"Italic Gray":"Green")+"'>function()</span>");
} else {
result.push("<span class='Red'>\""+( !_inObject&&!_inArray?_value : _value.substring(0,35)+(_value.length>35?" ...":"") ).replace(/\n/g,"\\n").replace(/\s/g," ").replace(/>/g,">").replace(/</g,"<")+"\"</span>");
}
}
// element
else if(isElement){
if(_value.nodeType==3)
result.push(d.highlight(_value.nodeValue));
else if(_inObject){
result.push("<span class='Gray Italic'>"+_value.nodeName.toLowerCase()+"</span>");
} else {
result.push("<span class='Blue"+ ( !_link?"'":" ObjectLink' onmouseover='this.className=this.className.replace(\"ObjectLink\",\"ObjectLinkHover\")' onmouseout='this.className=this.className.replace(\"ObjectLinkHover\",\"ObjectLink\")' onclick='firebug.d.html.inspect(firebug.d.console.cache[" +( d.console.cache.push( _value ) -1 )+"])'" ) + "'>");
if(_inArray){
result.push(_value.nodeName.toLowerCase());
if(_value.getAttribute){
if(_value.getAttribute&&_value.getAttribute("id"))
result.push("<span class='DarkBlue'>#"+_value.getAttribute("id")+"</span>");
var elClass = _value.getAttribute(lib.env.ie&&!lib.env.ie8?"className":"class")||"";
result.push(!elClass?"":"<span class='Red'>."+elClass.split(" ")[0]+"</span>");
}
result.push("</span>");
} else {
result.push("<span class='DarkBlue'><<span class='Blue TagName'>"+ _value.nodeName.toLowerCase());
if(_value.attributes){
for(var i=0,len=_value.attributes.length; i<len; i++){
var item = _value.attributes[i];
if(!lib.env.ie||item.nodeValue)
result.push(" <span class='DarkBlue'>"+item.nodeName+"=\"<span class='Red'>"+item.nodeValue+"</span>\"</span>");
}
}
result.push("</span>></span>");
}
}
}
// array, hash
else if(isArray||isHash){
if(isArray){
if(_inObject){
result.push("<span class='Gray Italic'>["+_value.length+"]</span>");
} else {
result.push("<span class='Strong'>[ ");
for(var i=0,len=_value.length; i<len; i++){
if((_inObject||_inArray)&&i>3){
result.push(", <span class='Strong Gray'>"+(len-4)+" More...</span>");
break;
}
result.push( (i > 0 ? ", " : "") + d.highlight(_value[i], false, true, true) );
}
result.push(" ]</span>");
}
} else if(_inObject){
result.push("<span class='Gray Italic'>Object</span>");
} else {
result.push("<span class='Strong Green"+ ( !_link?"'":" ObjectLink' onmouseover='this.className=this.className.replace(\"ObjectLink\",\"ObjectLinkHover\")' onmouseout='this.className=this.className.replace(\"ObjectLinkHover\",\"ObjectLink\")' onclick='firebug.d.console.openObject(" +( d.console.cache.push( _value ) -1 )+")'" ) + ">Object");
var i=0;
for(var key in _value){
var value = _value[key];
if((_inObject||_inArray)&&i>3){
result.push(" <span class='Strong Gray'>More...</span>");
break;
}
result.push(" "+key+"="+d.highlight(value,true));
i++;
}
result.push("</span>");
}
} else {
result.push(["<span class'Gray Italic'>"+_value+"</span>"]);
}
} catch(e){
result.push("..");
}
return result.join("");
}
},
html:{
nIndex:"computedStyle",
current:null,
highlight:function(_element,_clear,_event){
with(firebug){
if(_element.firebugElement){
return;
}
if(_clear){
env.targetWindow.firebug.el.bgInspector.environment.addStyle({ "display":"none" });
return;
}
d.inspector.inspect(_element,true);
}
},
inspect:function(_element){
var map = [],
parentLayer,
t,
tagName,
parent = _element;
while (parent) {
map.push(parent);
if (parent == firebug.env.targetWindow.document.body) break;
parent = parent.parentNode;
}
map = map.reverse();
with(firebug) {
if (env.dIndex != "html") {
env.targetWindow.firebug.d.navigate("html");
}
env.targetWindow.firebug.d.inspector.toggle(false);
for (t = 0; t < el.left.html.container.child.get().length; t++) {
if (el.left.html.container.child.get()[t].childNodes[0].childNodes[1].childNodes[0].childNodes[0]) {
if (el.left.html.container.child.get()[t].childNodes[0].childNodes[1].childNodes[0].childNodes[0].innerText) {
tagName = el.left.html.container.child.get()[t].childNodes[0].childNodes[1].childNodes[0].childNodes[0].innerText;
} else {
tagName = el.left.html.container.child.get()[t].childNodes[0].childNodes[1].childNodes[0].childNodes[0].textContent;
}
if (/<body/i.test(tagName)) {
parentLayer = el.left.html.container.child.get()[t].childNodes[1].lib;
break;
}
}
}
if (!parentLayer) {
parentLayer = el.left.html.container.child.get()[3].childNodes[1].lib;
}
for (t = 0, len = map.length; map[t]; t++) {
if (t == len - 1) {
var link = parentLayer.environment.getElement().previousSibling.lib;
link.attribute.addClass("Selected");
if (d.html.current) {
d.html.current[1].attribute.removeClass("Selected");
}
d.html.current = [_element, link];
return;
}
parentLayer = d.html.openHtmlTree(map[t], parentLayer, map[t + 1]);
}
}
},
navigate:function(_index,_element){
with(firebug){
el.right.html.nav[d.html.nIndex].attribute.removeClass("Selected");
el.right.html.nav[_index].attribute.addClass("Selected");
d.html.nIndex = _index;
d.html.openProperties();
}
},
openHtmlTree:function(_element,_parent,_returnParentElementByElement,_event){
with(firebug){
var element = _element || env.targetWindow.document.documentElement,
parent = _parent || el.left.html.container,
returnParentEl = _returnParentElementByElement || null,
returnParentVal = null,
len = element.childNodes.length,
nodeLink;
if(parent!=el.left.html.container){
nodeLink = parent.environment.getParent().lib.child.get()[0].lib;
if (d.html.current) {
d.html.current[1].attribute.removeClass("Selected");
}
nodeLink.attribute.addClass("Selected");
d.html.current = [_element,nodeLink];
d.html.openProperties();
};
if(element.childNodes&&(len==0||(len==1&&element.childNodes[0].nodeType==3)))return;
parent.clean();
if(parent.opened&&Boolean(_returnParentElementByElement)==false){
parent.opened = false;
parent.element.previousSibling.lib.attribute.removeClass("Open");
parent.element.lib.attribute.removeClass("OpenSubContainer");
return;
};
if (parent != el.left.html.container) {
parent.element.previousSibling.lib.attribute.addClass("Open");
parent.element.lib.attribute.addClass("OpenSubContainer");
parent.opened = true;
};
if(element==document.documentElement){
new lib.element("A").attribute.addClass("Block").update("<span class='DarkBlue'><<span class='Blue'>html</span>>").insert(parent);
};
for(var i=0; i<=len; i++){
if(i==len){
new lib.element("A").attribute.addClass("Block").update("<span class='DarkBlue'></<span class='Blue'>"+element.nodeName.toLowerCase()+"</span>>").insert(container);
break;
}
var item = element.childNodes[i];
if (item.nodeType != 3){
var container = new lib.element().attribute.addClass("Block").insert(parent),
link = new lib.element("A").attribute.addClass("Link").insert(container),
spacer = new lib.element("SPAN").attribute.addClass("Spacer").update(" ").insert(link),
html = new lib.element("SPAN").attribute.addClass("Content").update(d.highlight(item)).insert(link),
subContainer = new lib.element("DIV").attribute.addClass("SubContainer").insert(container),
view = lib.util.Element.getView(item);
link.event.addListener("click", lib.util.Curry(d.html.openHtmlTree, window, item, subContainer, false));
link.event.addListener("mouseover", lib.util.Curry(d.html.highlight, window, item, false));
link.event.addListener("mouseout", lib.util.Curry(d.html.highlight, window, item, true));
returnParentVal = returnParentEl == item ? subContainer : returnParentVal;
if(d.html.current==null&&item==document.body){
link.attribute.addClass("Selected");
d.html.current = [item,link];
d.html.openHtmlTree(item,subContainer);
}
if(element.nodeName!="HEAD"&&element!=document.documentElement&&(view.visibility=="hidden"||view.display=="none")){
container.attribute.addClass("Unvisible");
};
if (item.childNodes){
var childLen = item.childNodes.length;
if (childLen == 1 && item.childNodes[0].nodeType == 3) {
html.child.add(document.createTextNode(item.childNodes[0].nodeValue.substring(0, 50)));
html.child.add(document.createTextNode("</"));
html.child.add(new lib.element("span").attribute.addClass("Blue").update(item.nodeName.toLowerCase()).environment.getElement());
html.child.add(document.createTextNode(">"));
continue;
}
else
if (childLen > 0) {
link.attribute.addClass("Parent");
}
}
}
};
return returnParentVal;
}
},
openProperties:function(){
with(firebug){
var index = d.html.nIndex;
var node = d.html.current[0];
d.clean(el.right.html.content);
var str = "";
switch(index){
case "computedStyle":
var property = ["opacity","filter","azimuth","background","backgroundAttachment","backgroundColor","backgroundImage","backgroundPosition","backgroundRepeat","border","borderCollapse","borderColor","borderSpacing","borderStyle","borderTop","borderRight","borderBottom","borderLeft","borderTopColor","borderRightColor","borderBottomColor","borderLeftColor","borderTopStyle","borderRightStyle","borderBottomStyle","borderLeftStyle","borderTopWidth","borderRightWidth","borderBottomWidth","borderLeftWidth","borderWidth","bottom","captionSide","clear","clip","color","content","counterIncrement","counterReset","cue","cueAfter","cueBefore","cursor","direction","display","elevation","emptyCells","cssFloat","font","fontFamily","fontSize","fontSizeAdjust","fontStretch","fontStyle","fontVariant","fontWeight","height","left","letterSpacing","lineHeight","listStyle","listStyleImage","listStylePosition","listStyleType","margin","marginTop","marginRight","marginBottom","marginLeft","markerOffset","marks","maxHeight","maxWidth","minHeight","minWidth","orphans","outline","outlineColor","outlineStyle","outlineWidth","overflow","padding","paddingTop","paddingRight","paddingBottom","paddingLeft","page","pageBreakAfter","pageBreakBefore","pageBreakInside","pause","pauseAfter","pauseBefore","pitch","pitchRange","playDuring","position","quotes","richness","right","size","speak","speakHeader","speakNumeral","speakPunctuation","speechRate","stress","tableLayout","textAlign","textDecoration","textIndent","textShadow","textTransform","top","unicodeBidi","verticalAlign","visibility","voiceFamily","volume","whiteSpace","widows","width","wordSpacing","zIndex"].sort();
var view = document.defaultView?document.defaultView.getComputedStyle(node,null):node.currentStyle;
for(var i=0,len=property.length; i<len; i++){
var item = property[i];
if(!view[item])continue;
str+="<div class='CSSItem'><div class='CSSProperty'>"+item+"</div><div class='CSSValue'>"+d.highlight(view[item])+"</div></div>";
}
el.right.html.content.update(str);
break;
case "dom":
d.dom.open(node,el.right.html.content,lib.env.ie);
break;
}
}
}
},
inspector:{
enabled:false,
el:null,
inspect:function(_element,_bgInspector){
with(firebug){
var pos = env.targetWindow.firebug.lib.util.Element.getPosition(_element);
env.targetWindow.firebug.el[_bgInspector&&"bgInspector"||"borderInspector"].environment.addStyle({
"width":_element.offsetWidth+"px", "height":_element.offsetHeight+"px",
"top":pos.offsetTop-(_bgInspector?0:2)+"px", "left":pos.offsetLeft-(_bgInspector?0:2)+"px",
"display":"block"
});
9
if(!_bgInspector){
d.inspector.el = _element;
}
};
},
toggle:function(_absoluteValue,_event){
with (firebug) {
if(_absoluteValue==d.inspector.enabled)
return;
d.inspector.enabled = _absoluteValue!=undefined&&!_absoluteValue.clientX?_absoluteValue:!d.inspector.enabled;
el.button.inspect.attribute[(d.inspector.enabled ? "add" : "remove") + "Class"]("Enabled");
if(d.inspector.enabled==false){
el.borderInspector.environment.addStyle({ "display":"none" });
d.inspector.el = null;
} else if(lib.env.dIndex!="html") {
if (env.popupWin) {
env.popupWin.firebug.d.navigate("html");
} else {
d.navigate("html");
}
}
}
}
},
scripts:{
index:-1,
lineNumbers:false,
open:function(_index){
with(firebug){
d.scripts.index = _index;
el.left.scripts.container.update("");
var script = document.getElementsByTagName("script")[_index],uri = script.src||document.location.href,source;
try {
if(uri!=document.location.href){
source = env.cache[uri]||lib.xhr.get(uri).responseText;
env.cache[uri] = source;
} else {
source = script.innerHTML;
}
source = source.replace(/<|>/g,function(_ch){
return ({"<":"<",">":">"})[_ch];
});
if(!d.scripts.lineNumbers)
el.left.scripts.container.child.add(
new lib.element("DIV").attribute.addClass("CodeContainer").update(source)
);
else {
source = source.split("<br />");
for (var i = 0; i < source.length; i++) {
el.left.scripts.container.child.add(new lib.element("DIV").child.add(new lib.element("DIV").attribute.addClass("LineNumber").update(i + 1), new lib.element("DIV").attribute.addClass("Code").update(" " + source[i]), new lib.element("DIV").attribute.addClass('Clear')));
};
};
} catch(e){
el.left.scripts.container.child.add(
new lib.element("DIV").attribute.addClass("CodeContainer").update("<em>Access to restricted URI denied</em>")
);
}
}
},
toggleLineNumbers:function(){
with(firebug){
d.scripts.lineNumbers = !d.scripts.lineNumbers;
el.button.scripts.lineNumbers.attribute[(d.scripts.lineNumbers ? "add" : "remove") + "Class"]("Enabled");
d.scripts.open( d.scripts.index );
}
},
refresh:function(){
with(firebug){
el.button.scripts.selectbox.clean();
var collection = env.targetWindow.document.getElementsByTagName("script");
for(var i=0,len=collection.length; i<len; i++){
var item = collection[i],
fileName = getFileName(item.src||item.baseURI||"..");
d.scripts.index=d.scripts.index<0?i:d.scripts.index;
el.button.scripts.selectbox.child.add(
new lib.element("OPTION").attribute.set("value",i).update(fileName)
);
}
d.scripts.open( d.scripts.index );
}
}
},
str: {
open:function(_str){
with(firebug){
d.navigate("str");
el.left.str.container.update(_str.replace(/\n/g,"<br />"))
}
}
},
xhr:{
objects:[],
addObject:function(){
with(firebug){
for(var i=0,len=arguments.length; i<len; i++){
try {
var item = arguments[i],
val = env.targetWindow.eval(item);
d.xhr.objects.push([item, val]);
} catch(e){
continue;
}
}
}
},
open:function(){
with(firebug){
el.left.xhr.container.update("");
el.left.xhr.name = new lib.element("DIV").attribute.addClass("BlockContent").insert(new lib.element("DIV").attribute.addClass("Block").environment.addStyle({ "width":"20%" }).insert(el.left.xhr.container));
el.left.xhr.nameTitle = new lib.element("STRONG").update("Object Name:").insert(el.left.xhr.name);
el.left.xhr.nameContent = new lib.element("DIV").insert(el.left.xhr.name);
el.left.xhr.status = new lib.element("DIV").attribute.addClass("BlockContent").insert(new lib.element("DIV").attribute.addClass("Block").environment.addStyle({ "width":"10%" }).insert(el.left.xhr.container));
el.left.xhr.statusTitle = new lib.element("STRONG").update("Status:").insert(el.left.xhr.status);
el.left.xhr.statusContent = new lib.element("DIV").insert(el.left.xhr.status);
el.left.xhr.readystate = new lib.element("DIV").attribute.addClass("BlockContent").insert(new lib.element("DIV").environment.addStyle({ "width":"15%" }).attribute.addClass("Block").insert(el.left.xhr.container));
el.left.xhr.readystateTitle =el.left.xhr.nameTitle = new lib.element("STRONG").update("Ready State:").insert(el.left.xhr.readystate);
el.left.xhr.readystateContent = new lib.element("DIV").insert(el.left.xhr.readystate);
el.left.xhr.response = new lib.element("DIV").attribute.addClass("BlockContent").insert(new lib.element("DIV").environment.addStyle({ "width":(lib.env.ie?"50":"55")+"%" }).attribute.addClass("Block").insert(el.left.xhr.container));
el.left.xhr.responseTitle = new lib.element("STRONG").update("Response:").insert(el.left.xhr.response);
el.left.xhr.responseContent = new lib.element("DIV").insert(el.left.xhr.response);
setTimeout(d.xhr.refresh,500);
}
},
refresh:function(){
with(firebug){
el.left.xhr.nameContent.update("");
el.left.xhr.statusContent.update("");
el.left.xhr.readystateContent.update("");
el.left.xhr.responseContent.update("");
for(var i=0,len=d.xhr.objects.length; i<len; i++){
var item = d.xhr.objects[i],
response = item[1].responseText;
if(Boolean(item[1])==false)continue;
el.left.xhr.nameContent.child.add(new lib.element("span").update(item[0]));
try {
el.left.xhr.statusContent.child.add(new lib.element("span").update(item[1].status));
} catch(e){ el.left.xhr.statusContent.child.add(new lib.element("span").update(" ")); }
el.left.xhr.readystateContent.child.add(new lib.element("span").update(item[1].readyState));
el.left.xhr.responseContent.child.add(new lib.element("span").child.add(
new lib.element("A").event.addListener("click",lib.util.Curry(d.str.open,window,response)).update(" "+response.substring(0,50))
));
};
if(env.dIndex=="xhr")
setTimeout(d.xhr.refresh,500);
}
}
},
navigateRightColumn:function(_index,_open){
with(firebug){
el.left.container.environment.addStyle({ "width":_open?"70%":"100%" });
el.right.container.environment.addStyle({ "display":_open?"block":"none" });
}
},
navigate:function(_index){
with(firebug){
var open = _index, close = env.dIndex;
env.dIndex = open;
settings.hide();
el.button[close].container.environment.addStyle({ "display":"none" });
el.left[close].container.environment.addStyle({ "display":"none" });
el.right[close].container.environment.addStyle({ "display":"none" });
el.button[open].container.environment.addStyle({ "display":"inline" });
el.left[open].container.environment.addStyle({ "display":"block" });
el.right[open].container.environment.addStyle({ "display":"block" });
if(el.nav[close])
el.nav[close].attribute.removeClass("Selected");
if(el.nav[open])
el.nav[open].attribute.addClass("Selected");
switch(open){
case "console":
d.navigateRightColumn(_index);
break;
case "html":
d.navigateRightColumn(_index,true);
if(!d.html.current){
var t=Number(new Date);
d.html.openHtmlTree();
}
break;
case "css":
d.navigateRightColumn(_index,true);
d.css.refresh();
break;
case "scripts":
d.navigateRightColumn(_index);
d.scripts.refresh();
break;
case "dom":
d.navigateRightColumn(_index);
if(el.left.dom.container.environment.getElement().innerHTML==""){
var t=Number(new Date);
d.dom.open(eval(el.button.dom.textbox.environment.getElement().value),el.left.dom.container);
}
break;
case "xhr":
d.navigateRightColumn(_index);
d.xhr.open();
break;
}
}
}
},
getFileName:function(_path){
var match = _path&&_path.match(/[\w\-\.\?\=\&]+$/);
return match&&match[0]||_path;
},
cancelEvent:function(_event){
if(_event.stopPropagation)
_event.stopPropagation();
if(_event.preventDefault)
_event.preventDefault();
},
getSelection:function(_el){
with(firebug){
if(lib.env.ie){
var range = document.selection.createRange(),stored = range.duplicate();
stored.moveToElementText(_el);
stored.setEndPoint('EndToEnd', range);
_el.selectionStart = stored.text.length - range.text.length;
_el.selectionEnd = _el.selectionStart + range.text.length;
}
return {
start:_el.selectionStart,
length:_el.selectionEnd-_el.selectionStart
}
}
},
tab:function(_el,_event){
with(firebug){
if(_event.keyCode==9){
if(_el.setSelectionRange){
var position = firebug.getSelection(_el);
_el.value = _el.value.substring(0,position.start) + String.fromCharCode(9) + _el.value.substring(position.start+position.length,_el.value.length);
_el.setSelectionRange(position.start+1,position.start+1);
} else if(document.selection) {
var range = document.selection.createRange(), isCollapsed = range.text == '';
range.text = String.fromCharCode(9);
range.moveStart('character', -1);
}
firebug.cancelEvent(_event);
if(lib.env.ie)
setTimeout(_el.focus,100);
};
}
},
listen: {
addXhrObject:function(){
with(firebug){
d.xhr.addObject.apply(env.targetWindow, el.button.xhr.textbox.environment.getElement().value.split(","));
}
},
consoleTextbox:function(_event){
with(firebug){
if(_event.keyCode==13&&(env.multilinemode==false||_event.shiftKey==false)){
d.console.historyIndex = d.console.history.length;
d.console.eval(el.left.console.input.environment.getElement().value);
return false;
}
switch(_event.keyCode){
case 40:
if(d.console.history[d.console.historyIndex+1]){
d.console.historyIndex+=1;
el.left.console.input.update( d.console.history[d.console.historyIndex] );
}
break;
case 38:
if(d.console.history[d.console.historyIndex-1]){
d.console.historyIndex-=1;
el.left.console.input.update( d.console.history[d.console.historyIndex] );
}
break;
}
}
},
cssSelectbox:function(){
with(firebug){
d.css.open(el.button.css.selectbox.environment.getElement().selectedIndex);
}
},
domTextbox:function(_event){
with(firebug){
if(_event.keyCode==13){
d.dom.open(eval(el.button.dom.textbox.environment.getElement().value),el.left.dom.container);
}
}
},
inspector:function(){
with(firebug){
if (env.popupWin) {
env.popupWin.firebug.d.html.inspect(firebug.d.inspector.el);
} else {
firebug.d.html.inspect(firebug.d.inspector.el);
}
}
},
keyboard:function(_event){
with(firebug){
if(_event.keyCode==27 && d.inspector.enabled){
d.inspector.toggle();
} else if(_event.keyCode === 123 && _event.ctrlKey || _event.metaKey) {
if(env.isPopup){
win.dock();
}else {
win.newWindow();
}
} else if(
(_event.keyCode === 123 && !_event.ctrlKey && !_event.metaKey) ||
(_event.keyCode === 76 && (_event.ctrlKey || _event.metaKey) && _event.shiftKey) ||
(_event.keyCode === 13 && _event.shiftKey)) {
if(env.isPopup){
win.dock();
} else if (el.main.environment.getStyle("display") === 'none') {
win.show();
} else {
win.hide();
}
}
}
},
mouse:function(_event){
with(firebug){
var target;
if(document.elementFromPoint) {
target = document.elementFromPoint(_event.clientX, _event.clientY);
} else {
if(lib.env.ie) {
target = _event.srcElement;
} else {
target = _event.explicitOriginalTarget || _event.target;
}
}
if( d.inspector.enabled&&
target!=document.body&&
target!=document.firstChild&&
target!=document.childNodes[1]&&
target!=el.borderInspector.environment.getElement()&&
target!=el.main.environment.getElement()&&
target.offsetParent!=el.main.environment.getElement() ) {
d.inspector.inspect(target);
}
}
},
runMultiline:function(){
with(firebug){
d.console.eval.call(window,el.right.console.input.environment.getElement().value);
}
},
runCSS:function(){
with(firebug){
var source = el.right.css.input.environment.getElement().value.replace(/\n|\t/g,"").split("}");
for(var i=0, len=source.length; i<len; i++){
var item = source[i]+"}", rule = !lib.env.ie?item:item.split(/{|}/),
styleSheet = document.styleSheets[0];
console.log(rule);
if(item.match(/.+\{.+\}/)){
if(lib.env.ie)
styleSheet.addRule(rule[0],rule[1]);
else
styleSheet.insertRule( rule, styleSheet.cssRules.length );
}
}
}
},
scriptsSelectbox:function(){
with(firebug){
d.scripts.open(parseInt(el.button.scripts.selectbox.environment.getElement().value));
}
},
xhrTextbox:function(_event){
with(firebug){
if(_event.keyCode==13){
d.xhr.addObject.apply(env.targetWindow, el.button.xhr.textbox.environment.getElement().value.split(","));
}
}
}
}
};
(function(_scope){
_scope.lib = {};
var pi = _scope.lib; pi.version = [1.1,2008091000];
pi.env = {
ie: /MSIE/i.test(navigator.userAgent),
ie6: /MSIE 6/i.test(navigator.userAgent),
ie7: /MSIE 7/i.test(navigator.userAgent),
ie8: /MSIE 8/i.test(navigator.userAgent),
firefox: /Firefox/i.test(navigator.userAgent),
opera: /Opera/i.test(navigator.userAgent),
webkit: /Webkit/i.test(navigator.userAgent),
camino: /Camino/i.test(navigator.userAgent)
};
pi.get = function(){
return document.getElementById(arguments[0]);
};
pi.get.byTag = function(){
return document.getElementsByTagName(arguments[0]);
};
pi.get.byClass = function(){ return document.getElementsByClassName.apply(document,arguments); };
pi.util = {
Array:{
clone:function(_array,_undeep){
var tmp = [];
Array.prototype.push.apply(tmp,_array);
pi.util.Array.forEach(tmp,function(_item,_index,_source){
if(_item instanceof Array&&!_undeep)
_source[_index] = pi.util.Array.clone(_source[_index]);
});
return tmp;
},
count:function(_array,_value){
var count = 0;
pi.util.Array.forEach(_array,function(){
count+=Number(arguments[0]==_value);
});
return count;
},
forEach:function(_array,_function){
if(_array.forEach)
return _array.forEach(_function);
for(var i=0,len=_array.length; i<len; i++)
_function.apply(_array,[_array[i],i,_array]);
},
getLatest:function(_array){
return _array[_array.length-1];
},
indexOf:function(_array,_value){
if(!pi.env.ie){
return _array.indexOf(_value);
};
var index = -1;
for(var i=0, len=_array.length; i<len; i++){
if(_array[i]==_value){
index = i;
break;
}
}
return index;
},
remove:function(_array,_index){
var result = _array.slice(0,_index);
_array = Array.prototype.push.apply(result,_array.slice(_index+1));
return result;
}
},
Curry:function(_fn,_scope){
var fn = _fn, scope = _scope||window, args = Array.prototype.slice.call(arguments,2);
return function(){
return fn.apply(scope,args.concat( Array.prototype.slice.call(arguments,0) ));
};
},
Extend:function(_superClass,_prototype,_skipClonning){
var object = new pi.base;
if(_prototype["$Init"]){
object.init = _prototype["$Init"];
delete _prototype["$Init"];
};
object.body = _superClass==pi.base?_prototype:pi.util.Hash.merge(_prototype,_superClass.prototype);
object.init=object.init||function(){
if(_superClass!=pi.base)
_superClass.apply(this,arguments);
};
return object.build(_skipClonning);
},
IsArray:function(_object){
if(_object===null){
return false;
}
if(window.NodeList&&window.NamedNodeMap&&!pi.env.ie8){
if(_object instanceof Array||_object instanceof NodeList||_object instanceof NamedNodeMap||(window.HTMLCollection&&_object instanceof HTMLCollection))
return true;
};
if(!_object||_object==window||typeof _object=="function"||typeof _object=="string"||typeof _object.length!="number"){
return false
};
var len = _object.length;
if(len>0&&_object[0]!=undefined&&_object[len-1]!=undefined){
return true;
} else {
for(var key in _object){
if(key!="item"&&key!="length"&&key!="setNamedItemNS"&&key!="setNamedItem"&&key!="getNamedItem"&&key!="removeNamedItem"&&key!="getNamedItemNS"&&key!="removeNamedItemNS"&&key!="tags"){
return false;
}
}
return true
};
},
IsHash:function(_object){
return _object && typeof _object=="object"&&(_object==window||_object instanceof Object)&&!_object.nodeName&&!pi.util.IsArray(_object)
},
Init:[],
AddEvent: function(_element,_eventName,_fn,_useCapture){
_element[pi.env.ie?"attachEvent":"addEventListener"]((pi.env.ie?"on":"")+_eventName,_fn,_useCapture||false);
return pi.util.Curry(pi.util.AddEvent,this,_element);
},
RemoveEvent: function(_element,_eventName,_fn,_useCapture){
_element[pi.env.ie?"detachEvent":"removeEventListener"]((pi.env.ie?"on":"")+_eventName,_fn,_useCapture||false);
return pi.util.Curry(pi.util.RemoveEvent,this,_element);
},
Element:{
addClass:function(_element,_class){
if( !pi.util.Element.hasClass(_element,_class) )
pi.util.Element.setClass(_element, pi.util.Element.getClass(_element) + " " + _class );
},
getClass:function(_element){
return _element.getAttribute(pi.env.ie&&!pi.env.ie8?"className":"class")||"";
},
hasClass:function(_element,_class){
return pi.util.Array.indexOf(pi.util.Element.getClass(_element).split(" "),_class)>-1;
},
removeClass:function(_element,_class){
if( pi.util.Element.hasClass(_element,_class) ){
var names = pi.util.Element.getClass(_element,_class).split(" ");
pi.util.Element.setClass(
_element,
pi.util.Array.remove(names,pi.util.Array.indexOf(names,_class)).join(" ")
);
}
},
setClass:function(_element,_value){
if(pi.env.ie8){
_element.setAttribute("className", _value );
_element.setAttribute("class", _value );
} else {
_element.setAttribute(pi.env.ie?"className":"class", _value );
}
},
toggleClass:function(){
if(pi.util.Element.hasClass.apply(this,arguments))
pi.util.Element.removeClass.apply(this,arguments);
else
pi.util.Element.addClass.apply(this,arguments);
},
getOpacity:function(_styleObject){
var styleObject = _styleObject;
if(!pi.env.ie)
return styleObject["opacity"];
var alpha = styleObject["filter"].match(/opacity\=(\d+)/i);
return alpha?alpha[1]/100:1;
},
setOpacity:function(_element,_value){
if(!pi.env.ie)
return pi.util.Element.addStyle(_element,{ "opacity":_value });
_value*=100;
pi.util.Element.addStyle(_element,{ "filter":"alpha(opacity="+_value+")" });
return this._parent_;
},
getPosition:function(_element){
var parent = _element,offsetLeft = document.body.offsetLeft, offsetTop = document.body.offsetTop, view = pi.util.Element.getView(_element);
while(parent&&parent!=document.body&&parent!=document.firstChild){
offsetLeft +=parseInt(parent.offsetLeft);
offsetTop += parseInt(parent.offsetTop);
parent = parent.offsetParent;
};
return {
"bottom":view["bottom"],
"clientLeft":_element.clientLeft,
"clientTop":_element.clientTop,
"left":view["left"],
"marginTop":view["marginTop"],
"marginLeft":view["marginLeft"],
"offsetLeft":offsetLeft,
"offsetTop":offsetTop,
"position":view["position"],
"right":view["right"],
"top":view["top"],
"zIndex":view["zIndex"]
};
},
getSize:function(_element){
var view = pi.util.Element.getView(_element);
return {
"height":view["height"],
"clientHeight":_element.clientHeight,
"clientWidth":_element.clientWidth,
"offsetHeight":_element.offsetHeight,
"offsetWidth":_element.offsetWidth,
"width":view["width"]
}
},
addStyle:function(_element,_style){
for(var key in _style){
key = key=="float"?pi.env.ie?"styleFloat":"cssFloat":key;
if (key == "opacity" && pi.env.ie) {
pi.util.Element.setOpacity(_element,_style[key]);
continue;
}
try {
_element.style[key] = _style[key];
}catch(e){}
}
},
getStyle:function(_element,_property){
_property = _property=="float"?pi.env.ie?"styleFloat":"cssFloat":_property;
if(_property=="opacity"&&pi.env.ie)
return pi.util.Element.getOpacity(_element.style);
return typeof _property=="string"?_element.style[_property]:_element.style;
},
getValue:function(_element){
switch(_element.nodeName.toLowerCase()){
case "input":
case "textarea":
return _element.value;
case "select":
return _element.options[_element.selectedIndex].value;
default:
return _element.innerHTML;
break;
}
},
getView:function(_element,_property){
var view = document.defaultView?document.defaultView.getComputedStyle(_element,null):_element.currentStyle;
_property = _property=="float"?pi.env.ie?"styleFloat":"cssFloat":_property;
if(_property=="opacity"&&pi.env.ie)
return pi.util.Element.getOpacity(_element,view);
return typeof _property=="string"?view[_property]:view;
}
},
Hash: {
clone:function(_hash,_undeep){
var tmp = {};
for(var key in _hash){
if( !_undeep&&pi.util.IsArray( _hash[key] ) ){
tmp[key] = pi.util.Array.clone( _hash[key] );
} else if( !_undeep&&pi.util.IsHash( _hash[key] ) ){
tmp[ key ] = pi.util.Hash.clone(_hash[key]);
} else {
tmp[key] = _hash[key];
}
}
return tmp;
},
merge:function(_hash,_source,_undeep){
for(var key in _source){
var value = _source[key];
if (!_undeep&&pi.util.IsArray(_source[key])) {
if(pi.util.IsArray( _hash[key] )){
Array.prototype.push.apply( _source[key], _hash[key] )
}
else
value = pi.util.Array.clone(_source[key]);
}
else if (!_undeep&&pi.util.IsHash(_source[key])) {
if (pi.util.IsHash(_hash[key])) {
value = pi.util.Hash.merge(_hash[key], _source[key]);
} else {
value = pi.util.Hash.clone( _source[key] );
}
} else if( _hash[key] )
value = _hash[ key ];
_hash[key] = value;
};
return _hash;
}
},
String:{
format:function(_str){
var values = Array.prototype.slice.call(arguments,1);
return _str.replace(/\{(\d)\}/g,function(){
return values[arguments[1]];
})
}
},
GetViewport:function(){
return {
height:document.documentElement.clientHeight||document.body.clientHeight,
width:document.documentElement.clientWidth||document.body.clientWidth
}
}
};
pi.base = function(){
this.body = {};
this.init = null;
this.build = function(_skipClonning){
var base = this, skipClonning = _skipClonning||false, _private = {},
fn = function(){
var _p = pi.util.Hash.clone(_private);
if(!skipClonning){
for(var key in this){
if(pi.util.IsArray( this[ key ] ) ){
this[key] = pi.util.Array.clone( this[key] );
} else
if( pi.util.IsHash(this[key]) ){
this[key] = pi.util.Hash.clone(
this[ key ],
function(_key,_object){
this[ _key ]._parent_ = this;
}
);
//this[key]._parent_ = this;
}
}
};
base.createAccessors( _p, this );
if(base.init)
return base.init.apply(this,arguments);
return this;
};
this.movePrivateMembers(this.body,_private);
if(this.init){
fn["$Init"] = this.init;
};
fn.prototype = this.body;
return fn;
};
this.createAccessors = function(_p, _branch){
var getter = function(_property){ return this[_property]; },
setter = function(_property,_value){ this[_property] = _value; return _branch._parent_||_branch; };
for (var name in _p) {
var isPrivate = name.substring(0, 1) == "_", title = name.substring(1, 2).toUpperCase() + name.substring(2);
if (isPrivate) {
_branch[(_branch["get" + title]?"_":"")+"get" + title] = pi.util.Curry(getter,_p,name);
_branch[(_branch["set" + title]?"_":"")+"set" + title] = pi.util.Curry(setter,_p,name);
}
else
if (pi.util.IsHash(_p[name])){
_branch[name]._parent_ = _branch;
if(!_branch[name])
_branch[name] = {};
this.createAccessors(_p[name], _branch[name]);
}
};
};
this.movePrivateMembers = function(_object, _branch){
for (var name in _object) {
var isPrivate = name.substring(0, 1) == "_";
if (isPrivate) {
_branch[name] = _object[name];
delete _object[name];
}
else
if (pi.util.IsHash(_object[name])){
_branch[name] = {};
this.movePrivateMembers(_object[name], _branch[name]);
}
};
};
};
pi.element = new pi.base;
pi.element.init = function(_val){
this.environment.setElement(
typeof _val=="string"||!_val?
document.createElement(_val||"DIV"):
_val
);
return this;
};
pi.element.body = {
"addStyle":function(){
return this.environment.addStyle.apply(this.environment,arguments);
},
"clean":function(){
var childs = this.child.get();
while(childs.length){
childs[0].parentNode.removeChild(childs[0]);
}
},
"clone":function(_deep){
return this.environment.getElement().cloneNode(_deep);
},
"insert":function(_element){
_element = _element.environment?_element.environment.getElement():_element;
_element.appendChild(this.environment.getElement());
return this;
},
"insertAfter":function(_referenceElement){
_referenceElement = _referenceElement.environment?_referenceElement.environment.getElement():_referenceElement;
_referenceElement.nextSibling?this.insertBefore(_referenceElement.nextSibling):this.insert(_referenceElement.parentNode);
return this;
},
"insertBefore":function(_referenceElement){
_referenceElement = _referenceElement.environment?_referenceElement.environment.getElement():_referenceElement;
_referenceElement.parentNode.insertBefore(this.environment.getElement(),_referenceElement);
return this;
},
"query":function(_expression,_resultType,namespaceResolver,_result){
return pi.xpath(_expression,_resultType||"ORDERED_NODE_SNAPSHOT_TYPE",this.environment.getElement(),_namespaceResolver,_result);
},
"remove":function(){
if (this.environment.getParent()) {
this.environment.getParent().removeChild(this.environment.getElement());
}
},
"update":function(_value){
this.element[this.element.nodeName.toLowerCase()=="textarea"||this.element.nodeName.toLowerCase()=="input"?"value":"innerHTML"]=_value;
return this;
},
"attribute":{
"getAll":function(){
return this._parent_.environment.getElement().attributes;
},
"clear":function(_name){
this.set(_name,"");
return this._parent_;
},
"get":function(_name){
return this._parent_.environment.getElement().getAttribute(_name);
},
"has":function(_name){
return pi.env.ie?(this.get(_name)!=null):this._parent_.environment.getElement().hasAttribute(_name);
},
"remove":function(_name){
this._parent_.environment.getElement().removeAttribute(_name);
return this._parent_;
},
"set":function(_name,_value){
this._parent_.environment.getElement().setAttribute(_name,_value);
return this._parent_;
},
"addClass":function(_classes){
for(var i=0,len=arguments.length; i<len; i++){
pi.util.Element.addClass(this._parent_.environment.getElement(),arguments[i]);
};
return this._parent_;
},
"clearClass":function(){
this.setClass("");
this._parent_;
},
"getClass":function(){
return pi.util.Element.getClass( this._parent_.environment.getElement() );
},
"hasClass":function(_class){
return pi.util.Element.hasClass( this._parent_.environment.getElement(), _class );
},
"setClass":function(_value){
return pi.util.Element.setClass( this._parent_.environment.getElement(), _value );
},
"removeClass":function(_class){
pi.util.Element.removeClass( this._parent_.environment.getElement(), _class );
return this._parent_;
},
"toggleClass":function(_class){
pi.util.Element.toggleClass( this._parent_.environment.getElement(), _class );
}
},
"child":{
"get":function(){
return this._parent_.environment.getElement().childNodes;
},
"add":function(_elements){
for (var i = 0; i < arguments.length; i++) {
var el = arguments[i];
this._parent_.environment.getElement().appendChild(
el.environment ? el.environment.getElement() : el
);
}
return this._parent_;
},
"addAfter":function(_element,_referenceElement){
this.addBefore(
_element.environment?_element.environment.getElement():_element,
(_referenceElement.environment?_referenceElement.environment.getElement():_referenceElement).nextSibling
);
return this._parent_;
},
"addBefore":function(_element,_referenceElement){
this._parent_.environment.getElement().insertBefore(
_element.environment?_element.environment.getElement():_element,
_referenceElement.environment?_referenceElement.environment.getElement():_referenceElement
);
return this._parent_;
},
"remove":function(_element){
this._parent_.environment.getElement().removeChild(_element.environment?_element.environment.getElement():_element);
}
},
"environment":{
"_element":null,
"setElement":function(_value){
this._parent_.element = _value;
this._parent_.element.lib = this._parent_;
this._parent_.element.firebugElement = true;
this._setElement(_value);
},
"getParent":function(){
return this.getElement().parentNode;
},
"getPosition":function(){
return pi.util.Element.getPosition(this.getElement());
},
"getSize":function(){
return pi.util.Element.getSize( this.getElement() );
},
"addStyle":function(_styleObject){
pi.util.Element.addStyle(this.getElement(),_styleObject);
return this._parent_;
},
"getStyle":function(_property){
return pi.util.Element.getStyle(this.getElement(),_property);
},
"getName":function(){
return this.getElement().nodeName;
},
"getType":function(){
return this.getElement().nodeType;
},
"getValue":function(){
return pi.util.Element.getValue(this.getElement());
},
"getView":function(_property){
return pi.util.Element.getView(this.getElement(),_property);
}
},
"event":{
"addListener":function(_event,_fn,_useCapture){
pi.util.AddEvent(this._parent_.environment.getElement(),_event,_fn,_useCapture);
return this._parent_;
},
"removeListener":function(_event,_fn,_useCapture){
pi.util.RemoveEvent(this._parent_.environment.getElement(),_event,_fn,_useCapture);
return this._parent_;
}
}
};
pi.element = pi.element.build();
pi.xhr = new pi.base;
pi.xhr.init = function(_url){
if(!window.XMLHttpRequest){
var names = ["Msxml2.XMLHTTP.6.0","Msxml2.XMLHTTP.3.0","Msxml2.XMLHTTP","Microsoft.XMLHTTP"];
for (var i = 0; i < names.length; i++) {
try {
this.environment.setApi(new ActiveXObject(names[i]));
break;
} catch (e) { continue; }
}
}
else {
this.environment.setApi(new XMLHttpRequest());
}
this.environment.getApi().onreadystatechange=pi.util.Curry(this.event.readystatechange,this);
this.environment.setUrl(_url);
this.environment.setCallback([]);
return this;
};
pi.xhr.body = {
"addCallback": function(){
return this.environment.addCallback.apply(this.environment,arguments);
},
"addData": function(){
return this.environment.addData.apply(this.environment,arguments);
},
"abort":function(){
this.environment.getApi().abort();
return this;
},
"send":function(){
var url = this.environment.getUrl(), data = this.environment.getData(),dataUrl = "";
if(!this.environment.getCache())
data["forceCache"] = Number(new Date);
for (var key in data)
dataUrl += pi.util.String.format("{0}={1}&",key, data[key]);
if (this.environment.getType()=="GET")
url += (url.search("\\?")==-1?"?":"&")+pi.util.String.format("{0}",dataUrl);
this.api.open(this.environment.getType(),url,this.environment.getAsync());
if(this.environment.getType()=="POST"){
this.api.setRequestHeader("Content-Type","application/x-www-form-urlencoded");
};
this.api.send(this.environment.getType()=="GET"?"":dataUrl);
return this;
}
};
pi.xhr.body.environment = {
"_async":true, "_api":null, "_cache":true, "_callback":null, "_data":{}, "_type":"GET", "_url":"",
"setApi":function(_value){
this._parent_.api = _value;
this._setApi(_value);
},
"addCallback": function(_readyState,_fn){
this.getCallback().push({ "fn":_fn, "readyState":_readyState });
return this._parent_;
},
"addData": function(_key,_value){
this.getData()[_key] = _value;
return this._parent_;
},
"setType": function(_value){
this._setType(_value);
return this._parent_;
}
};
pi.xhr.body.event = {
"readystatechange":function(){
var readyState = this.environment.getApi().readyState, callback=this.environment.getCallback();
for (var i = 0, len=callback.length; i < len; i++) {
if(pi.util.Array.indexOf(callback[i].readyState,readyState)>-1){
callback[i].fn.apply(this);
}
}
}
};
pi.xhr = pi.xhr.build();
/*
* xml.xhr.get
*/
pi.xhr.get = function(_url,_returnPiObject){
var request = new pi.xhr();
request.environment.setAsync(false);
request.environment.setUrl(_url);
request.send();
return _returnPiObject?request:request.environment.getApi();
};
/*
* registering onload event for init functions
*/
pi.util.AddEvent(
pi.env.ie?window:document,
pi.env.ie?"load":"DOMContentLoaded",
function(){
for(var i=0,len=pi.util.Init.length; i<len; i++){
pi.util.Init[ i ]();
}
}
);
})(firebug);
with(firebug){
initConsole();
lib.util.Init.push(firebug.init);
} | PypiClean |
/Apycula-0.9.0a1.tar.gz/Apycula-0.9.0a1/doc/alu.md | # ALU
In Gowin FPGA logic tiles, it is possible to configure a CLS (slice of two LUTs) in ALU mode. In this mode a hard logic carry chain is used in combination with programmable logic to implement fast arithmetic.
ALU mode is available on the 3 CLS in each tile that have a flip-flop, and configured with a single bit. This selects the `F` LUT output to be passed through the ALU. Hard carry logic runs from west to east from LUT0 to LUT5 across tiles.

The ALU hard logic takes the shape of a full adder, where the carry chain is fully hard logic, and the first `XOR` gate is formed by the `LUT4`. But the lower 4 bits are shared with an additional `LUT2` which mostly acts as an input selector between `A` and `B` in front of the carry `AND`. The `C` input is mostly wired to `1` so the main `LUT4` doesn't use the lower bits, but in one case `C=0` and `D=1` to much the same effect.

On the synthesis side, the ALU primitive supports 9 modes, wich correspond to a bit pattern stored in the LUT, as well as which ports are used, and which are set to constant values.
```
add(0) 0011000011001100 A:- B:I0 C:1 D:I1 CIN:0
sub(1) 1010000001011010 A:I0 B:- C:1 D:I1 CIN:1
addsub(2) 0110000010011010 A:I0 B:I1 C:1 D:I3 CIN:??
ne(3) 1001000010011111 A:I0 B:I1 C:1 D:- CIN:??
ge(4) 1001000010011010 A:I0 B:I1 C:1 D:- CIN:??
le(5) 1001000010011010 A:I1 B:I0 C:1 D:- CIN:??
cup(6) 1010000010100000 A:I0 B:I1 C:1 D:- CIN:??
cdn(7) 0101000001011111 A:I0 B:I1 C:1 D:- CIN:??
cupcdn(8) 1010000001011010 A:I0 B:I1 C:1 D:I3 CIN:??
mul(9) 0111100010001000 A:I0 B:I1 C:0 D:1 CIN:??
```
These values should be understood as follows: The lowest 4 bits are shared between the `LUT4` in the carry "selector" `LUT2`, so in the case of `ADD` `1100`, selecting `B`. In almost all cases `C:1` which means the output of the `LUT4` is controlled by `AAAA0000AAAA0000` avoiding the lower bits and explainging the zeros in most modes. In the case of `ADD` the `LUT4` function is therefore `00111100`, which is `B XOR D`. In the case of `MUL` `C:0` and `D:1` so indeed only `0000AAAA00000000` is used for the `LUT4`, having the function of `AND`, like the lower `LUT2`. I have confirmed the funcionality is identical with the other clusters set to `0000`. The full list of implemented logic functions:
```
FN LUT4 LUT2
ADD(0) B XOR D B
SUB(1) !A XOR D A
ADDSUB(2) A XOR B XOR !D A
NE(3) A XOR B 1
GE/LE(4-5) A XOR B A
CUP(6) A 0
CDN(7) !A 1
CUPCDN(8) A XNOR D A
MUL(9) A AND B A AND B
```
It seems counterintuitive that the `LUT2` has an asymetric function, but it all works out in the end. See for yourself.
Armed with this knowledge, we can puzzle out what the total function of each `ALU` mode is. Most of them have obvious names with well known truth tables, but `MUL` is a strange one. When we find its total function analytically or experimentally, we find that it computes `A*B+CI`, in other words, an `AND` into a half adder. Our theory is that this could be used in implementing a Wallace tree or other long multiplication algorithm.
As a challenge, I decided to try a full adder that computes `A*B+D+CI`. To implement this the `LUT4` function should be `(A AND B) XOR D` and the `LUT2` function `A AND B`, giving the total data `0111000010001000` which indeed works as expected for `C=1`. This is compatible with the Gowin `MUL` if it were configured with `C=1` and `D=I3` instead of `C=0` and `D=1`.
| PypiClean |
/Blogofile-0.8.3.tar.gz/Blogofile-0.8.3/blogofile/writer.py | __author__ = "Ryan McGuire (ryan@enigmacurry.com)"
import logging
import os
import re
import shutil
import tempfile
from . import util
from . import config
from . import cache
from . import filter as _filter
from . import controller
from . import plugin
from . import template
logger = logging.getLogger("blogofile.writer")
class Writer(object):
def __init__(self, output_dir):
self.config = config
# Base templates are templates (usually in ./_templates) that are only
# referenced by other templates.
self.base_template_dir = util.path_join(".", "_templates")
self.output_dir = output_dir
def __load_bf_cache(self):
# Template cache object, used to transfer state to/from each template:
self.bf = cache.bf
self.bf.writer = self
self.bf.logger = logger
def write_site(self):
self.__load_bf_cache()
self.__setup_temp_dir()
try:
self.__setup_output_dir()
self.__calculate_template_files()
self.__init_plugins()
self.__init_filters_controllers()
self.__run_controllers()
self.__write_files()
finally:
self.__delete_temp_dir()
def __setup_temp_dir(self):
"""Create a directory for temporary data.
"""
self.temp_proc_dir = tempfile.mkdtemp(prefix="blogofile_")
# Make sure this temp directory is added to each template lookup:
for engine in self.bf.config.templates.engines.values():
try:
engine.add_default_template_path(self.temp_proc_dir)
except AttributeError:
pass
def __delete_temp_dir(self):
"Cleanup and delete temporary directory"
shutil.rmtree(self.temp_proc_dir)
def __setup_output_dir(self):
"""Setup the staging directory"""
if os.path.isdir(self.output_dir):
# I *would* just shutil.rmtree the whole thing and recreate it,
# but I want the output_dir to retain its same inode on the
# filesystem to be compatible with some HTTP servers.
# So this just deletes the *contents* of output_dir
for f in os.listdir(self.output_dir):
f = util.path_join(self.output_dir, f)
try:
os.remove(f)
except OSError:
pass
try:
shutil.rmtree(f)
except OSError:
pass
util.mkdir(self.output_dir)
def __calculate_template_files(self):
"""Build a regex for template file paths"""
endings = []
for ending in self.config.templates.engines.keys():
endings.append("." + re.escape(ending) + "$")
p = "(" + "|".join(endings) + ")"
self.template_file_regex = re.compile(p)
def __write_files(self):
"""Write all files for the blog to _site.
Convert all templates to straight HTML. Copy other
non-template files directly.
"""
for root, dirs, files in os.walk("."):
if root.startswith("./"):
root = root[2:]
for d in list(dirs):
# Exclude some dirs
d_path = util.path_join(root, d)
if util.should_ignore_path(d_path):
logger.debug("Ignoring directory: " + d_path)
dirs.remove(d)
try:
util.mkdir(util.path_join(self.output_dir, root))
except OSError:
pass
for t_fn in files:
t_fn_path = util.path_join(root, t_fn)
if util.should_ignore_path(t_fn_path):
# Ignore this file.
logger.debug("Ignoring file: " + t_fn_path)
continue
elif self.template_file_regex.search(t_fn):
logger.info("Processing template: " + t_fn_path)
# Process this template file
html_path = self.template_file_regex.sub("", t_fn)
template.materialize_template(
t_fn_path,
util.path_join(root, html_path))
else:
# Copy this non-template file
f_path = util.path_join(root, t_fn)
logger.debug("Copying file: " + f_path)
out_path = util.path_join(self.output_dir, f_path)
if self.config.site.overwrite_warning and \
os.path.exists(out_path):
logger.warn("Location is used more than once: {0}"
.format(f_path))
if self.config.site.use_hard_links:
# Try hardlinking first, and if that fails copy
try:
os.link(f_path, out_path)
except Exception:
shutil.copyfile(f_path, out_path)
else:
shutil.copyfile(f_path, out_path)
def __init_plugins(self):
# Run plugin defined init methods
plugin.init_plugins()
def __init_filters_controllers(self):
# Run filter/controller defined init methods
_filter.init_filters()
controller.init_controllers(namespace=self.bf.config.controllers)
def __run_controllers(self):
"""Run all the controllers in the _controllers directory.
"""
namespaces = [self.bf.config]
for plugin in list(self.bf.config.plugins.values()):
if plugin.enabled:
namespaces.append(plugin)
controller.run_all(namespaces) | PypiClean |
/EtherTDD-0.1.5.tar.gz/EtherTDD-0.1.5/ethertdd/__init__.py | from ethereum import abi, tester
from ethereum.utils import is_string
def set_gas_limit(new_limit):
tester.gas_limit = new_limit
class EvmContract(object):
# Most of the code in this class was pulled from the _abi_contract class
# defined in state.abi_contract in https://github.com/ethereum/pyethereum/blob
# /a4d642e3fd100cf8db44b8e7932fba9027c23f3e/ethereum/tester.py
# I just modified it to take precompiled code instead of uncompiled code.
def __init__(self, compiled_abi, compiled_code, name,
constructor_args=[], sender=tester.k0, endowment=0,
gas=None, state=None, log_listener=None):
if not state:
state = tester.state()
self.state = state
if is_string(compiled_abi):
compiled_abi = abi.json_decode(compiled_abi)
for item in compiled_abi:
if item['type'] == 'constructor':
item['type'] = 'function'
item['name'] = name
item['outputs'] = []
break
self._translator = tester.abi.ContractTranslator(compiled_abi)
if log_listener:
self.state.block.log_listeners.append(
lambda x: log_listener(self._translator.listen(x, noprint=True)))
if len(constructor_args) > 0:
compiled_code += self._translator.encode(name, constructor_args)[4:]
self.address = self.state.evm(compiled_code, sender, endowment, gas)
assert len(self.state.block.get_code(self.address)), \
"Contract code empty"
def kall_factory(f):
def kall(*args, **kwargs):
o = self.state._send(kwargs.get('sender', tester.k0),
self.address,
kwargs.get('value', 0),
self._translator.encode(f, args),
**tester.dict_without(kwargs, 'sender',
'value', 'output'))
# Compute output data
if kwargs.get('output', '') == 'raw':
outdata = o['output']
elif not o['output']:
outdata = None
else:
outdata = self._translator.decode(f, o['output'])
outdata = outdata[0] if len(outdata) == 1 \
else outdata
# Format output
if kwargs.get('profiling', ''):
return dict_with(o, output=outdata)
else:
return outdata
return kall
for f in self._translator.function_data:
vars(self)[f] = kall_factory(f)
class FileContractStore(object):
def __init__(self, name='', path='.', caching=True, parent=None):
self._name = name
self._path = path
self._parent = parent
self._stores = {}
self._contents = None
self._caching = caching
def __call__(self, *args, **kwargs):
if self._name == 'create':
return EvmContract(self._parent.abi(), self._parent.binary(),
self._parent._name, args, **kwargs)
if self._contents is not None:
return self._contents
if self._name in ['binary', 'bin']:
try:
with open('%s.bin' % self._path[0:-1], 'r') as f:
self._contents = f.read()
except IOError:
with open('%s.binary' % self._path[0:-1], 'r') as f:
self._contents = f.read()
self._contents = self._contents.decode('hex')
else:
with open('%s.%s' % (self._path[0:-1], self._name), 'r') as f:
self._contents = f.read()
return self._contents
def __getattr__(self, attr):
if not self._caching or attr not in self._stores:
self._stores[attr] = FileContractStore(
name=attr, path='%s%s/' % (self._path, self._name), parent=self
)
return self._stores[attr]
def __getitem__(self, item):
return self.__getattr__(item) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/ticketing/model/ticket.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.ticketing.model.priority_enum import PriorityEnum
from MergePythonSDK.shared.model.remote_data import RemoteData
from MergePythonSDK.ticketing.model.ticket_status_enum import TicketStatusEnum
globals()['PriorityEnum'] = PriorityEnum
globals()['RemoteData'] = RemoteData
globals()['TicketStatusEnum'] = TicketStatusEnum
class Ticket(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('ticket_url',): {
'max_length': 2000,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'id': (str, none_type,), # noqa: E501
'remote_id': (str, none_type, none_type,), # noqa: E501
'name': (str, none_type, none_type,), # noqa: E501
'assignees': ([str, none_type], none_type,), # noqa: E501
'due_date': (datetime, none_type, none_type,), # noqa: E501
'status': (TicketStatusEnum, str, none_type,),
'description': (str, none_type, none_type,), # noqa: E501
'project': (str, none_type, none_type,), # noqa: E501
'ticket_type': (str, none_type, none_type,), # noqa: E501
'account': (str, none_type, none_type,), # noqa: E501
'contact': (str, none_type, none_type,), # noqa: E501
'parent_ticket': (str, none_type, none_type,), # noqa: E501
'attachments': ([str, none_type], none_type,), # noqa: E501
'tags': ([str], none_type,), # noqa: E501
'remote_created_at': (datetime, none_type, none_type,), # noqa: E501
'remote_updated_at': (datetime, none_type, none_type,), # noqa: E501
'completed_at': (datetime, none_type, none_type,), # noqa: E501
'remote_data': ([RemoteData], none_type, none_type,), # noqa: E501
'remote_was_deleted': (bool, none_type,), # noqa: E501
'ticket_url': (str, none_type, none_type,), # noqa: E501
'priority': (PriorityEnum, str, none_type,),
}
expands_types = {"account": "Account", "assignees": "User", "attachments": "Attachment", "contact": "Contact", "parent_ticket": "Ticket", "project": "Project"}
# update types with expands
for key, val in expands_types.items():
if key in defined_types.keys():
expands_model = import_model_by_name(val, "ticketing")
if len(defined_types[key]) > 0 and isinstance(defined_types[key][0], list):
defined_types[key][0].insert(0, expands_model)
defined_types[key] = (*defined_types[key], expands_model)
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'remote_id': 'remote_id', # noqa: E501
'name': 'name', # noqa: E501
'assignees': 'assignees', # noqa: E501
'due_date': 'due_date', # noqa: E501
'status': 'status', # noqa: E501
'description': 'description', # noqa: E501
'project': 'project', # noqa: E501
'ticket_type': 'ticket_type', # noqa: E501
'account': 'account', # noqa: E501
'contact': 'contact', # noqa: E501
'parent_ticket': 'parent_ticket', # noqa: E501
'attachments': 'attachments', # noqa: E501
'tags': 'tags', # noqa: E501
'remote_created_at': 'remote_created_at', # noqa: E501
'remote_updated_at': 'remote_updated_at', # noqa: E501
'completed_at': 'completed_at', # noqa: E501
'remote_data': 'remote_data', # noqa: E501
'remote_was_deleted': 'remote_was_deleted', # noqa: E501
'ticket_url': 'ticket_url', # noqa: E501
'priority': 'priority', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'remote_data', # noqa: E501
'remote_was_deleted', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Ticket - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
name (str, none_type): The ticket's name.. [optional] # noqa: E501
assignees ([str, none_type]): [optional] # noqa: E501
due_date (datetime, none_type): The ticket's due date.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The current status of the ticket.. [optional] # noqa: E501
description (str, none_type): The ticket's description.. [optional] # noqa: E501
project (str, none_type): [optional] # noqa: E501
ticket_type (str, none_type): The ticket's type.. [optional] # noqa: E501
account (str, none_type): [optional] # noqa: E501
contact (str, none_type): [optional] # noqa: E501
parent_ticket (str, none_type): [optional] # noqa: E501
attachments ([str, none_type]): [optional] # noqa: E501
tags ([str]): [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's ticket was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's ticket was updated.. [optional] # noqa: E501
completed_at (datetime, none_type): When the ticket was completed.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): [optional] # noqa: E501
ticket_url (str, none_type): The 3rd party url of the Ticket.. [optional] # noqa: E501
priority (bool, dict, float, int, list, str, none_type): The priority or urgency of the Ticket. Possible values include: URGENT, HIGH, NORMAL, LOW - in cases where there is no clear mapping - the original value passed through.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id = kwargs.get("remote_id", None)
self.name = kwargs.get("name", None)
self.assignees = kwargs.get("assignees", None)
self.due_date = kwargs.get("due_date", None)
self.status = kwargs.get("status", None)
self.description = kwargs.get("description", None)
self.project = kwargs.get("project", None)
self.ticket_type = kwargs.get("ticket_type", None)
self.account = kwargs.get("account", None)
self.contact = kwargs.get("contact", None)
self.parent_ticket = kwargs.get("parent_ticket", None)
self.attachments = kwargs.get("attachments", None)
self.tags = kwargs.get("tags", None)
self.remote_created_at = kwargs.get("remote_created_at", None)
self.remote_updated_at = kwargs.get("remote_updated_at", None)
self.completed_at = kwargs.get("completed_at", None)
self.ticket_url = kwargs.get("ticket_url", None)
self.priority = kwargs.get("priority", None)
# Read only properties
self._id = kwargs.get("id", str())
self._remote_data = kwargs.get("remote_data", None)
self._remote_was_deleted = kwargs.get("remote_was_deleted", bool())
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Ticket - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
name (str, none_type): The ticket's name.. [optional] # noqa: E501
assignees ([str, none_type]): [optional] # noqa: E501
due_date (datetime, none_type): The ticket's due date.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The current status of the ticket.. [optional] # noqa: E501
description (str, none_type): The ticket's description.. [optional] # noqa: E501
project (str, none_type): [optional] # noqa: E501
ticket_type (str, none_type): The ticket's type.. [optional] # noqa: E501
account (str, none_type): [optional] # noqa: E501
contact (str, none_type): [optional] # noqa: E501
parent_ticket (str, none_type): [optional] # noqa: E501
attachments ([str, none_type]): [optional] # noqa: E501
tags ([str]): [optional] # noqa: E501
remote_created_at (datetime, none_type): When the third party's ticket was created.. [optional] # noqa: E501
remote_updated_at (datetime, none_type): When the third party's ticket was updated.. [optional] # noqa: E501
completed_at (datetime, none_type): When the ticket was completed.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): [optional] # noqa: E501
ticket_url (str, none_type): The 3rd party url of the Ticket.. [optional] # noqa: E501
priority (bool, dict, float, int, list, str, none_type): The priority or urgency of the Ticket. Possible values include: URGENT, HIGH, NORMAL, LOW - in cases where there is no clear mapping - the original value passed through.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id: Union[str, none_type] = kwargs.get("remote_id", None)
self.name: Union[str, none_type] = kwargs.get("name", None)
self.assignees: Union[List[str, none_type]] = kwargs.get("assignees", list())
self.due_date: Union[datetime, none_type] = kwargs.get("due_date", None)
self.status: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("status", None)
self.description: Union[str, none_type] = kwargs.get("description", None)
self.project: Union[str, none_type] = kwargs.get("project", None)
self.ticket_type: Union[str, none_type] = kwargs.get("ticket_type", None)
self.account: Union[str, none_type] = kwargs.get("account", None)
self.contact: Union[str, none_type] = kwargs.get("contact", None)
self.parent_ticket: Union[str, none_type] = kwargs.get("parent_ticket", None)
self.attachments: Union[List[str, none_type]] = kwargs.get("attachments", list())
self.tags: Union[List[str]] = kwargs.get("tags", list())
self.remote_created_at: Union[datetime, none_type] = kwargs.get("remote_created_at", None)
self.remote_updated_at: Union[datetime, none_type] = kwargs.get("remote_updated_at", None)
self.completed_at: Union[datetime, none_type] = kwargs.get("completed_at", None)
self.ticket_url: Union[str, none_type] = kwargs.get("ticket_url", None)
self.priority: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("priority", None)
# Read only properties
self._id: Union[str] = kwargs.get("id", str())
self._remote_data: Union[List["RemoteData"]] = kwargs.get("remote_data", None)
self._remote_was_deleted: Union[bool] = kwargs.get("remote_was_deleted", bool())
# Read only property getters
@property
def id(self):
return self._id
@property
def remote_data(self):
return self._remote_data
@property
def remote_was_deleted(self):
return self._remote_was_deleted | PypiClean |
/Cantiz-PyChromecast-3.2.2.tar.gz/Cantiz-PyChromecast-3.2.2/pychromecast/controllers/__init__.py | import logging
from ..error import UnsupportedNamespace, ControllerNotRegistered
class BaseController(object):
""" ABC for namespace controllers. """
def __init__(self, namespace, supporting_app_id=None,
target_platform=False):
"""
Initialize the controller.
namespace: the namespace this controller will act on
supporting_app_id: app to be launched if app is running with
unsupported namespace.
target_platform: set to True if you target the platform instead of
current app.
"""
self.namespace = namespace
self.supporting_app_id = supporting_app_id
self.target_platform = target_platform
self._socket_client = None
self._message_func = None
self.logger = logging.getLogger(__name__)
@property
def is_active(self):
""" True if the controller is connected to a socket client and the
Chromecast is running an app that supports this controller. """
return (self._socket_client is not None and
self.namespace in self._socket_client.app_namespaces)
def launch(self, callback_function=None):
""" If set, launches app related to the controller. """
self._check_registered()
self._socket_client.receiver_controller.launch_app(
self.supporting_app_id, callback_function=callback_function)
def registered(self, socket_client):
""" Called when a controller is registered. """
self._socket_client = socket_client
if self.target_platform:
self._message_func = self._socket_client.send_platform_message
else:
self._message_func = self._socket_client.send_app_message
def channel_connected(self):
""" Called when a channel has been openend that supports the
namespace of this controller. """
pass
def channel_disconnected(self):
""" Called when a channel is disconnected. """
pass
def send_message(self, data, inc_session_id=False,
callback_function=None):
"""
Send a message on this namespace to the Chromecast.
Will raise a NotConnected exception if not connected.
"""
self._check_registered()
if not self.target_platform and \
self.namespace not in self._socket_client.app_namespaces:
if self.supporting_app_id is not None:
self.launch()
else:
raise UnsupportedNamespace(
("Namespace {} is not supported by running"
"application.").format(self.namespace))
return self._message_func(
self.namespace, data, inc_session_id, callback_function)
# pylint: disable=unused-argument,no-self-use
def receive_message(self, message, data):
"""
Called when a message is received that matches the namespace.
Returns boolean indicating if message was handled.
"""
return False
def tear_down(self):
""" Called when we are shutting down. """
self._socket_client = None
self._message_func = None
def _check_registered(self):
""" Helper method to see if we are registered with a Cast object. """
if self._socket_client is None:
raise ControllerNotRegistered((
"Trying to use the controller without it being registered "
"with a Cast object.")) | PypiClean |
/Adafruit_seesaw-1.0.tar.gz/Adafruit_seesaw-1.0/Adafruit_Seesaw/seesaw.py |
import logging
from Adafruit_bitfield import Adafruit_bitfield
import time
SEESAW_STATUS_BASE = 0x00
SEESAW_GPIO_BASE = 0x01
SEESAW_SERCOM0_BASE = 0x02
SEESAW_TIMER_BASE = 0x08
SEESAW_ADC_BASE = 0x09
SEESAW_DAC_BASE = 0x0A
SEESAW_INTERRUPT_BASE = 0x0B
SEESAW_DAP_BASE = 0x0C
SEESAW_EEPROM_BASE = 0x0D
SEESAW_NEOPIXEL_BASE = 0x0E
SEESAW_GPIO_DIRSET_BULK = 0x02
SEESAW_GPIO_DIRCLR_BULK = 0x03
SEESAW_GPIO_BULK = 0x04
SEESAW_GPIO_BULK_SET = 0x05
SEESAW_GPIO_BULK_CLR = 0x06
SEESAW_GPIO_BULK_TOGGLE = 0x07
SEESAW_GPIO_INTENSET = 0x08
SEESAW_GPIO_INTENCLR = 0x09
SEESAW_GPIO_INTFLAG = 0x0A
SEESAW_GPIO_PULLENSET = 0x0B
SEESAW_GPIO_PULLENCLR = 0x0C
SEESAW_STATUS_HW_ID = 0x01
SEESAW_STATUS_VERSION = 0x02
SEESAW_STATUS_OPTIONS = 0x03
SEESAW_STATUS_SWRST = 0x7F
SEESAW_TIMER_STATUS = 0x00
SEESAW_TIMER_PWM = 0x01
SEESAW_ADC_STATUS = 0x00
SEESAW_ADC_INTEN = 0x02
SEESAW_ADC_INTENCLR = 0x03
SEESAW_ADC_WINMODE = 0x04
SEESAW_ADC_WINTHRESH = 0x05
SEESAW_ADC_CHANNEL_OFFSET = 0x07
SEESAW_SERCOM_STATUS = 0x00
SEESAW_SERCOM_INTEN = 0x02
SEESAW_SERCOM_INTENCLR = 0x03
SEESAW_SERCOM_BAUD = 0x04
SEESAW_SERCOM_DATA = 0x05
SEESAW_NEOPIXEL_STATUS = 0x00
SEESAW_NEOPIXEL_PIN = 0x01
SEESAW_NEOPIXEL_SPEED = 0x02
SEESAW_NEOPIXEL_BUF_LENGTH = 0x03
SEESAW_NEOPIXEL_BUF = 0x04
SEESAW_NEOPIXEL_SHOW = 0x05
ADC_INPUT_0_PIN = 0x02
ADC_INPUT_1_PIN = 0x03
ADC_INPUT_2_PIN = 0x04
ADC_INPUT_3_PIN = 0x05
PWM_0_PIN = 0x04
PWM_1_PIN = 0x05
PWM_2_PIN = 0x06
PWM_3_PIN = 0x07
class Seesaw(object):
INPUT = 0x00
OUTPUT = 0x01
INPUT_PULLUP = 0x02
def __init__(self, addr=0x49, i2c=None, **kwargs):
# Create I2C device.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._bus = i2c.get_i2c_device(addr, **kwargs)._bus
self.addr = addr
self._sercom_status = Adafruit_bitfield([('ERROR', 1), ('DATA_RDY', 1)])
self._sercom_inten = Adafruit_bitfield([('ERROR', 1), ('DATA_RDY', 1)])
self.begin()
def begin(self):
self.sw_reset()
time.sleep(.500)
c = self.read8(SEESAW_STATUS_BASE, SEESAW_STATUS_HW_ID)
if c != 0x55:
print(c)
raise RuntimeError("Seesaw hardware ID returned is not correct! Please check your wiring.")
def sw_reset(self):
self.write8(SEESAW_STATUS_BASE, SEESAW_STATUS_SWRST, 0xFF)
def get_options(self):
buf = self.read(SEESAW_STATUS_BASE, SEESAW_STATUS_OPTIONS, 4)
ret = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]
return ret
def get_version(self):
buf = self.read(SEESAW_STATUS_BASE, SEESAW_STATUS_VERSION, 4)
ret = (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]
return ret
def pin_mode(self, pin, mode):
self.pin_mode_bulk(1 << pin, mode)
def digital_write(self, pin, value):
self.digital_write_bulk(1 << pin, value)
def digital_read(self, pin):
return self.digital_read_bulk((1 << pin)) != 0
def digital_read_bulk(self, pins):
buf = self.read(SEESAW_GPIO_BASE, SEESAW_GPIO_BULK, 4)
ret = ( (buf[0] & 0xF) << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3] #TODO: weird overflow error, fix
return ret & pins
def set_GPIO_interrupts(self, pins, enabled):
cmd = bytearray([(pins >> 24) & 0xFF, (pins >> 16) & 0xFF, (pins >> 8) & 0xFF, pins & 0xFF])
if enabled:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_INTENSET, cmd)
else:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_INTENCLR, cmd)
def analog_read(self, pin):
if pin == ADC_INPUT_0_PIN:
p = 0
elif pin == ADC_INPUT_1_PIN:
p = 1
elif pin == ADC_INPUT_2_PIN:
p = 2
elif pin == ADC_INPUT_3_PIN:
p = 3
else:
return 0
buf = self.read(SEESAW_ADC_BASE, SEESAW_ADC_CHANNEL_OFFSET + p, 2)
ret = buf[0] << 8 | buf[1]
time.sleep(.001)
return ret
def pin_mode_bulk(self, pins, mode):
cmd = bytearray([(pins >> 24) & 0xFF, (pins >> 16) & 0xFF, (pins >> 8) & 0xFF, pins & 0xFF ])
if mode == self.OUTPUT:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_DIRSET_BULK, cmd)
elif mode == self.INPUT:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_DIRCLR_BULK, cmd)
elif mode == self.INPUT_PULLUP:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_DIRCLR_BULK, cmd)
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_PULLENSET, cmd)
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_BULK_SET, cmd)
def digital_write_bulk(self, pins, value):
cmd = bytearray([(pins >> 24) & 0xFF, (pins >> 16) & 0xFF, (pins >> 8) & 0xFF, pins & 0xFF])
if value:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_BULK_SET, cmd)
else:
self.write(SEESAW_GPIO_BASE, SEESAW_GPIO_BULK_CLR, cmd)
def analog_write(self, pin, value):
p = -1
if pin == PWM_0_PIN:
p = 0
elif pin == PWM_1_PIN:
p = 1
elif pin == PWM_2_PIN:
p = 2
elif pin == PWM_3_PIN:
p = 3
if p > -1:
cmd = bytearray([p, value])
self.write(SEESAW_TIMER_BASE, SEESAW_TIMER_PWM, cmd)
def enable_sercom_data_rdy_interrupt(self, sercom):
self._sercom_inten.DATA_RDY = 1
self.write8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_INTEN, _sercom_inten.get())
def disable_sercom_data_rdy_interrupt(self, sercom):
_sercom_inten.DATA_RDY = 0
self.write8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_INTEN, _sercom_inten.get())
def read_sercom_data(self, sercom):
return self.read8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_DATA)
def set_i2c_addr(self, addr):
self.eeprom_write8(SEESAW_EEPROM_I2C_ADDR, addr)
time.sleep(.250)
self.begin(addr) #restart w/ the new addr
def get_i2c_addr(self,):
return self.read8(SEESAW_EEPROM_BASE, SEESAW_EEPROM_I2C_ADDR)
def eeprom_write8(self, addr, val):
self.eeprom_write(addr, bytearray([val]))
def eeprom_write(self, addr, buf):
self.write(SEESAW_EEPROM_BASE, addr, buf)
def eeprom_read8(self, addr):
return self.read8(SEESAW_EEPROM_BASE, addr)
def uart_set_baud(self, baud):
cmd = bytearray([(baud >> 24) & 0xFF, (baud >> 16) & 0xFF, (baud >> 8) & 0xFF, baud & 0xFF])
self.write(SEESAW_SERCOM0_BASE, SEESAW_SERCOM_BAUD, cmd)
def write8(self, regHigh, regLow, value):
self.write(regHigh, regLow, bytearray([value]))
def read8(self, regHigh, regLow):
ret = self.read(regHigh, regLow, 1)
return ret[0]
def read(self, regHigh, regLow, length, delay=.001):
self.write(regHigh, regLow)
time.sleep(delay)
ret = self._bus._device.read(length)
return [ord(x) for x in ret]
def write(self, regHigh, regLow, buf = None):
c = bytearray([regHigh, regLow])
if not buf == None:
c = c + buf
self._bus._select_device(self.addr)
self._bus._device.write(c) | PypiClean |
/Gemi3-1.0-cp38-cp38-win_amd64.whl/Gemi3-1.0.dist-info/LICENSE.md | MIT License
Copyright (c) 2022 Lukas Madenach
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/code_generation/OperationCodes.py | from nuitka.nodes.shapes.StandardShapes import tshape_unknown
from .BinaryOperationHelperDefinitions import (
getCodeNameForBinaryOperation,
getNonSpecializedBinaryOperations,
getSpecializedBinaryOperations,
)
from .c_types.CTypeBooleans import CTypeBool
from .c_types.CTypeNuitkaBooleans import CTypeNuitkaBoolEnum
from .c_types.CTypeNuitkaVoids import CTypeNuitkaVoidEnum
from .c_types.CTypePyObjectPointers import CTypePyObjectPtr
from .c_types.CTypeVoids import CTypeVoid
from .CodeHelpers import (
generateChildExpressionsCode,
generateExpressionCode,
withObjectCodeTemporaryAssignment,
)
from .CodeHelperSelection import selectCodeHelper
from .ErrorCodes import (
getErrorExitBoolCode,
getErrorExitCode,
getReleaseCodes,
getTakeReferenceCode,
)
from .ExpressionCTypeSelectionHelpers import decideExpressionCTypes
def generateOperationBinaryCode(to_name, expression, emit, context):
_getBinaryOperationCode(
to_name=to_name,
operator=expression.getOperator(),
inplace=expression.isInplaceSuspect(),
needs_check=expression.mayRaiseExceptionOperation(),
left=expression.subnode_left,
right=expression.subnode_right,
source_ref=expression.source_ref,
emit=emit,
context=context,
)
def generateOperationNotCode(to_name, expression, emit, context):
# TODO: We badly need to support target boolean C type here, or else an object is created from the argument.
(arg_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
res_name = context.getIntResName()
emit("%s = CHECK_IF_TRUE(%s);" % (res_name, arg_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_name=arg_name,
needs_check=expression.subnode_operand.mayRaiseExceptionBool(BaseException),
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s == 0" % res_name, emit=emit
)
def generateOperationUnaryCode(to_name, expression, emit, context):
(arg_name,) = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
_getUnaryOperationCode(
to_name=to_name,
expression=expression,
operator=expression.getOperator(),
arg_name=arg_name,
needs_check=expression.mayRaiseException(BaseException),
emit=emit,
context=context,
)
def _getBinaryOperationCode(
to_name, operator, inplace, left, right, needs_check, source_ref, emit, context
):
# This is detail rich stuff, encoding the complexity of what helpers are
# available, and can be used as a fallback.
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
(
_unknown_types,
needs_argument_swap,
left_shape,
right_shape,
left_c_type,
right_c_type,
) = decideExpressionCTypes(
left=left,
right=right,
may_swap_arguments="never"
if inplace
else (
"number"
if operator in ("Add", "Mult", "BitOr", "BitAnd", "BitXor")
else "never"
),
)
prefix = "%s_OPERATION_%s" % (
"INPLACE" if operator[0] == "I" else "BINARY",
getCodeNameForBinaryOperation(operator),
)
specialized_helpers_set = getSpecializedBinaryOperations(operator)
non_specialized_helpers_set = getNonSpecializedBinaryOperations(operator)
report_missing = True
helper_type = target_type = None if operator[0] == "I" else to_name.getCType()
if helper_type is not None:
if needs_check and helper_type is not None:
# If an exception may occur, we do not have the "NVOID" helpers though, we
# instead can use the CTypeNuitkaBoolEnum that will easily convert to
# it.
if helper_type is CTypeNuitkaVoidEnum:
helper_type = CTypeNuitkaBoolEnum
report_missing = False
else:
# If no exception may occur, we do not have the "VOID" helpers though, we
# instead can use the CTypeBool that will easily convert to it.
# If no exception can occur, do not require a helper that can indicate
# it, but use the one that produces simpler code, this means we can
# avoid the CTypeNuitkaBoolEnum (NBOOL) helpers except for things that
# can really raise. Once we have expression for types depending on the
# value to raise or not, this will get us into trouble, due to using a
# fallback
# TODO: For now to achieve old behavior, we are going to change to
# CBOOL for those that cannot raise later.
if helper_type is CTypeVoid:
helper_type = CTypeNuitkaBoolEnum
# helper_type = CTypeBool
report_missing = False
# If a more specific C type was picked that "PyObject *" then we can use that to have the helper.
helper_type, helper_function = selectCodeHelper(
prefix=prefix,
specialized_helpers_set=specialized_helpers_set,
non_specialized_helpers_set=non_specialized_helpers_set,
result_type=helper_type,
left_shape=left_shape,
right_shape=right_shape,
left_c_type=left_c_type,
right_c_type=right_c_type,
argument_swap=needs_argument_swap,
report_missing=report_missing,
source_ref=source_ref,
)
# If we failed to find CTypeBool, that should be OK.
if helper_function is None and target_type is CTypeBool:
helper_type, helper_function = selectCodeHelper(
prefix=prefix,
specialized_helpers_set=specialized_helpers_set,
non_specialized_helpers_set=non_specialized_helpers_set,
result_type=CTypeNuitkaBoolEnum,
left_shape=left_shape,
right_shape=right_shape,
left_c_type=left_c_type,
right_c_type=right_c_type,
argument_swap=needs_argument_swap,
report_missing=True,
source_ref=source_ref,
)
if helper_function is None:
# Give up and warn about it.
left_c_type = CTypePyObjectPtr
right_c_type = CTypePyObjectPtr
helper_type, helper_function = selectCodeHelper(
prefix=prefix,
specialized_helpers_set=specialized_helpers_set,
non_specialized_helpers_set=non_specialized_helpers_set,
result_type=CTypePyObjectPtr if helper_type is not None else None,
left_shape=tshape_unknown,
right_shape=tshape_unknown,
left_c_type=left_c_type,
right_c_type=right_c_type,
argument_swap=False,
report_missing=True,
source_ref=source_ref,
)
assert helper_function is not None, (left, right)
left_name = context.allocateTempName(
"%s_expr_left" % operator.lower(), type_name=left_c_type.c_type
)
right_name = context.allocateTempName(
"%s_expr_right" % operator.lower(), type_name=right_c_type.c_type
)
generateExpressionCode(
to_name=left_name, expression=left, emit=emit, context=context
)
generateExpressionCode(
to_name=right_name, expression=right, emit=emit, context=context
)
# We must assume to write to a variable if "inplace" is active, not e.g.
# a constant reference. That was asserted before calling us.
if inplace or "INPLACE" in helper_function:
assert not needs_argument_swap
res_name = context.getBoolResName()
# For module variable C type to reference later.
if left.isExpressionVariableRef() and left.getVariable().isModuleVariable():
emit("%s = %s;" % (context.getInplaceLeftName(), left_name))
if (
not left.isExpressionVariableRef()
and not left.isExpressionTempVariableRef()
):
if not context.needsCleanup(left_name):
getTakeReferenceCode(left_name, emit)
emit("%s = %s(&%s, %s);" % (res_name, helper_function, left_name, right_name))
getErrorExitBoolCode(
condition="%s == false" % res_name,
release_names=(left_name, right_name),
needs_check=needs_check,
emit=emit,
context=context,
)
emit("%s = %s;" % (to_name, left_name))
if (
not left.isExpressionVariableRef()
and not left.isExpressionTempVariableRef()
):
context.addCleanupTempName(to_name)
else:
if needs_argument_swap:
arg1_name = right_name
arg2_name = left_name
else:
arg1_name = left_name
arg2_name = right_name
# May need to convert return value.
if helper_type is not target_type:
value_name = context.allocateTempName(
to_name.code_name + "_" + helper_type.helper_code.lower(),
type_name=helper_type.c_type,
unique=to_name.code_name == "tmp_unused",
)
else:
value_name = to_name
emit(
"%s = %s(%s, %s);"
% (
value_name,
helper_function,
arg1_name,
arg2_name,
)
)
if value_name.getCType().hasErrorIndicator():
getErrorExitCode(
check_name=value_name,
release_names=(left_name, right_name),
needs_check=needs_check,
emit=emit,
context=context,
)
else:
# Otherwise we picked the wrong kind of helper.
assert not needs_check, value_name.getCType()
getReleaseCodes(
release_names=(left_name, right_name), emit=emit, context=context
)
# TODO: Depending on operation, we could not produce a reference, if result *must*
# be boolean, but then we would have some helpers that do it, and some that do not
# do it.
if helper_type is CTypePyObjectPtr:
context.addCleanupTempName(value_name)
if value_name is not to_name:
target_type.emitAssignConversionCode(
to_name=to_name,
value_name=value_name,
# TODO: Right now we don't do conversions here that could fail.
needs_check=False,
emit=emit,
context=context,
)
unary_operator_codes = {
"UAdd": ("PyNumber_Positive", 1),
"USub": ("PyNumber_Negative", 1),
"Invert": ("PyNumber_Invert", 1),
"Repr": ("PyObject_Repr", 1),
"Not": ("UNARY_NOT", 0),
}
def _getUnaryOperationCode(
to_name, expression, operator, arg_name, needs_check, emit, context
):
impl_helper, ref_count = unary_operator_codes[operator]
helper = "UNARY_OPERATION"
prefix_args = (impl_helper,)
with withObjectCodeTemporaryAssignment(
to_name, "op_%s_res" % operator.lower(), expression, emit, context
) as value_name:
emit(
"%s = %s(%s);"
% (
value_name,
helper,
", ".join(str(arg_name) for arg_name in prefix_args + (arg_name,)),
)
)
getErrorExitCode(
check_name=value_name,
release_name=arg_name,
needs_check=needs_check,
emit=emit,
context=context,
)
if ref_count:
context.addCleanupTempName(value_name) | PypiClean |
/Beads-0.1.4-py3-none-any.whl/beads/modules/gui.py | from meta import *
from modules.logic import process
from modules.errors import ParsingError, ValidationError, UnsupportedLanguageError
from modules import defaults, reader
from typing import IO
from distutils.util import strtobool
import logging
import eel
import json
browser_options: dict = {
'window_size': (1200, 800),
'extensions': ['.js', '.html'],
}
file_to_load: dict or None = None
def start(file: IO, opts: dict) -> None:
"""
Start the local server for the Graphical User Interface.
Method utilizing eel library to provide entry point for a GUI.
If given FILE is None the GUI will start empty.
:param file: File to open on start or None
:param opts: Dictionary with available gui options
"""
startup_options = {
PORT: opts[PORT]
}
if file:
global file_to_load
file_to_load = reader.parse_file(file)
eel.init(GUI_PATH, allowed_extensions=browser_options['extensions'])
if opts[NO_WINDOW]:
startup_options[MODE] = None
try:
eel.start(HTML, options=startup_options, size=browser_options['window_size'])
except Exception:
startup_options[MODE] = 'edge'
eel.start(HTML, options=startup_options, size=browser_options['window_size'])
""" GUI API
API for the graphical user interface provided with web technologies.
All functions listed below that are annotated with @eel.expose can be called on javascript side
via the exposed 'eel' object.
"""
@eel.expose
def parse(graph: str, language: str, opts: str) -> (str, str):
"""
Parse the given json string in the provided language and generate code.
:param graph: fsm as json
:param language: Programming language to use for code generation
:param opts: JSON string of available options
"""
logging.debug(f'GUI: parse called with JSON: {graph} and language: {language}')
logging.debug(f'GUI: provided options: {opts}')
code: str or None = None
error: str or None = None
try:
fsm: dict = json.loads(graph)
logging.debug(f'GUI: Parsed provided GRAPH into state machine:\n {fsm}')
options: dict = json.loads(opts)
logging.debug(f'GUI: Parsed provided OPTS into options:\n {options}')
skip_validation = options[SKIP_VALIDATION]
if type(skip_validation) is str:
skip_validation = bool(strtobool(skip_validation))
if skip_validation:
logging.debug('GUI: Validation of internal logic will be skipped!')
code = process(fsm, language, skip_validation)
logging.debug(f'GUI: Processed state machine to code:\n{code}')
except ParsingError as pe:
error = f'Content could not be parsed into a valid format!\n{pe.cause}'
except ValidationError as ve:
error = f'Validation of state machine logic failed: {ve.cause}'
except UnsupportedLanguageError:
error = f'Provided language: "{language}" is not supported!'
if error is not None:
logging.error(f'GUI: {error}')
logging.warning('GUI: Code generation aborted. Returning error!')
return code, error
@eel.expose
def load_file() -> str:
"""
Send the file to load to the GUI.
"""
return json.dumps(file_to_load)
@eel.expose
def set_option(key: str, value: str) -> None:
"""
Set options via GUI.
"""
if defaults.set_default(key, value):
defaults.save()
@eel.expose
def unset_option(key: str) -> None:
"""
Unset options via GUI.
"""
if defaults.unset_default(key):
defaults.save()
@eel.expose
def unset_all_options() -> None:
"""
Unset all options via GUI.
"""
return defaults.unset_all()
@eel.expose
def available_languages() -> str:
"""
Provide all available languages to the GUI.
"""
return json.dumps({'languages': [lang for lang in supported_languages]})
@eel.expose
def available_defaults() -> str:
"""
Provide all available defaults to the GUI.
Defaults follow the schema below:
[key: string]: {
'typevalue': any,
'current': any,
'description': string
}
"""
current: dict = defaults.current_defaults
descriptions: dict = defaults.keys_help
processed: dict = {}
for key, value in defaults.keys_values.items():
if value is str:
value = ''
elif value is bool:
value = False
elif value is int:
value = 0
processed[key] = {
'typevalue': value,
'current': current[key] if key in current else None,
'description': descriptions[key]
}
try:
return json.dumps(processed)
except TypeError as te:
logging.warning(f'GUI: json error: {te}')
@eel.expose
def get_version() -> str:
"""
Provide version information to the GUI.
"""
return VERSION
@eel.expose
def get_info() -> str:
"""
Provide tool documentation the GUI.
"""
resource: dict = reader.load_resource('info', 'README.md')
return json.dumps(resource[CONTENT]) | PypiClean |
/CLAMServices-2.2.3.tar.gz/CLAMServices-2.2.3/clamservices/config/alpino.py |
###############################################################
# CLAM: Computational Linguistics Application Mediator
# -- Service Configuration File (Template) --
# by Maarten van Gompel (proycon)
# Centre for Language and Speech Technology / Language Machines
# Radboud University Nijmegen
#
# https://proycon.github.io/clam
#
# Licensed under GPLv3
#
###############################################################
#Consult the CLAM manual for extensive documentation
from clam.common.parameters import *
from clam.common.formats import *
from clam.common.converters import *
from clam.common.viewers import *
from clam.common.data import *
from clam.common.digestauth import pwhash
import clamservices.wrappers
from base64 import b64decode as D
import clam
import sys
import os
REQUIRE_VERSION = 3.0
WRAPPERDIR = clamservices.wrappers.__path__[0]
# ======== GENERAL INFORMATION ===========
# General information concerning your system.
#The System ID, a short alphanumeric identifier for internal use only
SYSTEM_ID = "alpino"
#System name, the way the system is presented to the world
SYSTEM_NAME = "Alpino"
#An informative description for this system (this should be fairly short, about one paragraph, and may not contain HTML)
SYSTEM_DESCRIPTION = "Alpino is a dependency parser for Dutch, developed in the context of the PIONIER Project Algorithms for Linguistic Processing, developed by Gertjan van Noord at the University of Groningen. You can upload either tokenised or untokenised files (which will be automatically tokenised for you using ucto), the output will consist of a zip file containing XML files, one for each sentence in the input document."
SYSTEM_AUTHOR = "Gertjan van Noord"
SYSTEM_AFFILIATION = "Rijksuniversieit Groningen"
SYSTEM_URL = "http://www.let.rug.nl/vannoord/alp/Alpino/"
SYSTEM_EMAIL = "lamasoftware@science.ru.nl"
SYSTEM_LICENSE = "GNU Lesser General Public License v2.1"
INTERFACEOPTIONS = "centercover,coverheight100"
# ======== AUTHENTICATION & SECURITY ===========
#Users and passwords
#set security realm, a required component for hashing passwords (will default to SYSTEM_ID if not set)
#REALM = SYSTEM_ID
USERS = None #no user authentication/security (this is not recommended for production environments!)
DEBUG = False
FLATURL = None
if 'ALPINO_HOME' in os.environ:
ALPINO_HOME = os.environ['ALPINO_HOME']
#Load external configuration file
loadconfig(__name__)
# ======== ENABLED FORMATS ===========
class AlpinoXMLCollection(CLAMMetaData):
attributes = {}
name = "Alpino XML Collection"
mimetype = 'application/zip'
scheme = '' #for later perhaps
CUSTOM_FORMATS = [ AlpinoXMLCollection ]
# ======== PROFILE DEFINITIONS ===========
#Define your profiles here. This is required for the project paradigm, but can be set to an empty list if you only use the action paradigm.
PROFILES = [
Profile(
InputTemplate('tokinput', PlainTextFormat,"Plaintext tokenised input, one sentence per line",
StaticParameter(id='encoding',name='Encoding',description='The character encoding of the file', value='utf-8'), #note that encoding is required if you work with PlainTextFormat
extension='.tok',
multi=True
),
#------------------------------------------------------------------------------------------------------------------------
OutputTemplate('alpinooutput',AlpinoXMLCollection,'Alpino XML output (XML files per sentence)',
extension='.alpinoxml.zip', #set an extension or set a filename:
removeextension='.tok',
multi=True,
),
OutputTemplate('foliaoutput',FoLiAXMLFormat,'FoLiA XML Output',
FoLiAViewer(),
extension='.folia.xml', #set an extension or set a filename:
removeextension='.tok',
multi=True,
),
),
Profile(
InputTemplate('untokinput', PlainTextFormat,"Plaintext document (untokenised)",
StaticParameter(id='encoding',name='Encoding',description='The character encoding of the file', value='utf-8'), #note that encoding is required if you work with PlainTextFormat
#MSWordConverter(id='docconv',label='Convert from MS Word Document'),
extension='.txt',
multi=True, #set unique=True if the user may only upload a file for this input template once. Set multi=True if you the user may upload multiple of such files
),
#------------------------------------------------------------------------------------------------------------------------
OutputTemplate('tokoutput', PlainTextFormat,"Plaintext tokenised output, one sentence per line",
SetMetaField('encoding','utf-8'),
removeextensions='.txt',
extension='.tok',
multi=True,
),
OutputTemplate('alpinooutput',AlpinoXMLCollection,'Alpino XML output (XML files per sentence)',
extension='.alpinoxml.zip', #set an extension or set a filename:
removeextensions='.txt',
multi=True,
),
OutputTemplate('foliaoutput',FoLiAXMLFormat,'FoLiA XML Output',
FoLiAViewer(),
FLATViewer(url=FLATURL, mode='viewer') if FLATURL else None,
extension='.folia.xml', #set an extension or set a filename:
removeextension='.txt',
multi=True,
),
)
]
# ======== COMMAND ===========
#The system command for the project paradigm.
#It is recommended you set this to small wrapper
#script around your actual system. Full shell syntax is supported. Using
#absolute paths is preferred. The current working directory will be
#set to the project directory.
#
#You can make use of the following special variables,
#which will be automatically set by CLAM:
# $INPUTDIRECTORY - The directory where input files are uploaded.
# $OUTPUTDIRECTORY - The directory where the system should output
# its output files.
# $STATUSFILE - Filename of the .status file where the system
# should output status messages.
# $DATAFILE - Filename of the clam.xml file describing the
# system and chosen configuration.
# $USERNAME - The username of the currently logged in user
# (set to "anonymous" if there is none)
# $PARAMETERS - List of chosen parameters, using the specified flags
#
COMMAND = WRAPPERDIR + "/alpino_wrapper.py $DATAFILE $STATUSFILE $OUTPUTDIRECTORY " + ALPINO_HOME
#COMMAND = None #Set to none if you only use the action paradigm
# ======== PARAMETER DEFINITIONS ===========
#The global parameters (for the project paradigm) are subdivided into several
#groups. In the form of a list of (groupname, parameters) tuples. The parameters
#are a list of instances from common/parameters.py
PARAMETERS = []
#PARAMETERS = [
# ('Alpino Parameters', [
# #BooleanParameter(id='veryfast',name='Very fast',description='Improves the speed of the parser, returns only the first (best) analysis.'),
# #BooleanParameter(id='slow',name='slow',description='Provide all possible parses'),
# #ChoiceParameter(id='casesensitive',name='Case Sensitivity',description='Enable case sensitive behaviour?', choices=['yes','no'],default='no'),
# #StringParameter(id='author',name='Author',description='Sign output metadata with the specified author name',maxlength=255),
# ] )
#]
# ======= ACTIONS =============
#The action paradigm is an independent Remote-Procedure-Call mechanism that
#allows you to tie scripts (command=) or Python functions (function=) to URLs.
#It has no notion of projects or files and must respond in real-time. The syntax
#for commands is equal to those of COMMAND above, any file or project specific
#variables are not available though, so there is no $DATAFILE, $STATUSFILE, $INPUTDIRECTORY, $OUTPUTDIRECTORY or $PROJECT.
ACTIONS = [
#Action(id='multiply',name='Multiply',parameters=[IntegerParameter(id='x',name='Value'),IntegerParameter(id='y',name='Multiplier'), command=sys.path[0] + "/actions/multiply.sh $PARAMETERS" ])
#Action(id='multiply',name='Multiply',parameters=[IntegerParameter(id='x',name='Value'),IntegerParameter(id='y',name='Multiplier'), function=lambda x,y: x*y ])
]
# ======== DISPATCHING (ADVANCED! YOU CAN SAFELY SKIP THIS!) ========
#The dispatcher to use (defaults to clamdispatcher.py), you almost never want to change this
#DISPATCHER = 'clamdispatcher.py'
#DISPATCHER_POLLINTERVAL = 30 #interval at which the dispatcher polls for resource consumption (default: 30 secs)
#DISPATCHER_MAXRESMEM = 0 #maximum consumption of resident memory (in megabytes), processes that exceed this will be automatically aborted. (0 = unlimited, default)
#DISPATCHER_MAXTIME = 0 #maximum number of seconds a process may run, it will be aborted if this duration is exceeded. (0=unlimited, default)
#DISPATCHER_PYTHONPATH = [] #list of extra directories to add to the python path prior to launch of dispatcher
#Run background process on a remote host? Then set the following (leave the lambda in):
#REMOTEHOST = lambda: return 'some.remote.host'
#REMOTEUSER = 'username'
#For this to work, the user under which CLAM runs must have (passwordless) ssh access (use ssh keys) to the remote host using the specified username (ssh REMOTEUSER@REMOTEHOST)
#Moreover, both systems must have access to the same filesystem (ROOT) under the same mountpoint. | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/grid/enhanced/plugins/IndirectSelection.js | define("dojox/grid/enhanced/plugins/IndirectSelection",["dojo/_base/declare","dojo/_base/array","dojo/_base/event","dojo/_base/lang","dojo/_base/html","dojo/_base/window","dojo/_base/connect","dojo/_base/sniff","dojo/query","dojo/keys","dojo/string","../_Plugin","../../EnhancedGrid","../../cells/dijit"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b,_c,_d){
var _e=_4.getObject("dojox.grid.cells");
var _f=_1("dojox.grid.cells.RowSelector",_e._Widget,{inputType:"",map:null,disabledMap:null,isRowSelector:true,_connects:null,_subscribes:null,checkedText:"✓",unCheckedText:"O",constructor:function(){
this.map={};
this.disabledMap={},this.disabledCount=0;
this._connects=[];
this._subscribes=[];
this.inA11YMode=_5.hasClass(_6.body(),"dijit_a11y");
this.baseClass="dojoxGridRowSelector dijitReset dijitInline dijit"+this.inputType;
this.checkedClass=" dijit"+this.inputType+"Checked";
this.disabledClass=" dijit"+this.inputType+"Disabled";
this.checkedDisabledClass=" dijit"+this.inputType+"CheckedDisabled";
this.statusTextClass=" dojoxGridRowSelectorStatusText";
this._connects.push(_7.connect(this.grid,"dokeyup",this,"_dokeyup"));
this._connects.push(_7.connect(this.grid.selection,"onSelected",this,"_onSelected"));
this._connects.push(_7.connect(this.grid.selection,"onDeselected",this,"_onDeselected"));
this._connects.push(_7.connect(this.grid.scroller,"invalidatePageNode",this,"_pageDestroyed"));
this._connects.push(_7.connect(this.grid,"onCellClick",this,"_onClick"));
this._connects.push(_7.connect(this.grid,"updateRow",this,"_onUpdateRow"));
},formatter:function(_10,_11,_12){
var _13=_12;
var _14=_13.baseClass;
var _15=_13.getValue(_11);
var _16=!!_13.disabledMap[_11];
if(_15){
_14+=_13.checkedClass;
if(_16){
_14+=_13.checkedDisabledClass;
}
}else{
if(_16){
_14+=_13.disabledClass;
}
}
return ["<div tabindex = -1 ","id = '"+_13.grid.id+"_rowSelector_"+_11+"' ","name = '"+_13.grid.id+"_rowSelector' class = '"+_14+"' ","role = 'presentation' aria-pressed = '"+_15+"' aria-disabled = '"+_16+"' aria-label = '"+_b.substitute(_13.grid._nls["indirectSelection"+_13.inputType],[_11+1])+"'>","<span class = '"+_13.statusTextClass+"'>"+(_15?_13.checkedText:_13.unCheckedText)+"</span>","</div>"].join("");
},setValue:function(_17,_18){
},getValue:function(_19){
return this.grid.selection.isSelected(_19);
},toggleRow:function(_1a,_1b){
this._nativeSelect(_1a,_1b);
},setDisabled:function(_1c,_1d){
if(_1c<0){
return;
}
this._toggleDisabledStyle(_1c,_1d);
},disabled:function(_1e){
return !!this.disabledMap[_1e];
},_onClick:function(e){
if(e.cell===this){
this._selectRow(e);
}
},_dokeyup:function(e){
if(e.cellIndex==this.index&&e.rowIndex>=0&&e.keyCode==_a.SPACE){
this._selectRow(e);
}
},focus:function(_1f){
var _20=this.map[_1f];
if(_20){
_20.focus();
}
},_focusEndingCell:function(_21,_22){
var _23=this.grid.getCell(_22);
this.grid.focus.setFocusCell(_23,_21);
},_nativeSelect:function(_24,_25){
this.grid.selection[_25?"select":"deselect"](_24);
},_onSelected:function(_26){
this._toggleCheckedStyle(_26,true);
},_onDeselected:function(_27){
this._toggleCheckedStyle(_27,false);
},_onUpdateRow:function(_28){
delete this.map[_28];
},_toggleCheckedStyle:function(_29,_2a){
var _2b=this._getSelector(_29);
if(_2b){
_5.toggleClass(_2b,this.checkedClass,_2a);
if(this.disabledMap[_29]){
_5.toggleClass(_2b,this.checkedDisabledClass,_2a);
}
_2b.setAttribute("aria-pressed",_2a);
if(this.inA11YMode){
_2b.firstChild.innerHTML=(_2a?this.checkedText:this.unCheckedText);
}
}
},_toggleDisabledStyle:function(_2c,_2d){
var _2e=this._getSelector(_2c);
if(_2e){
_5.toggleClass(_2e,this.disabledClass,_2d);
if(this.getValue(_2c)){
_5.toggleClass(_2e,this.checkedDisabledClass,_2d);
}
_2e.setAttribute("aria-disabled",_2d);
}
this.disabledMap[_2c]=_2d;
if(_2c>=0){
this.disabledCount+=_2d?1:-1;
}
},_getSelector:function(_2f){
var _30=this.map[_2f];
if(!_30){
var _31=this.view.rowNodes[_2f];
if(_31){
_30=_9(".dojoxGridRowSelector",_31)[0];
if(_30){
this.map[_2f]=_30;
}
}
}
return _30;
},_pageDestroyed:function(_32){
var _33=this.grid.scroller.rowsPerPage;
var _34=_32*_33,end=_34+_33-1;
for(var i=_34;i<=end;i++){
if(!this.map[i]){
continue;
}
_5.destroy(this.map[i]);
delete this.map[i];
}
},destroy:function(){
for(var i in this.map){
_5.destroy(this.map[i]);
delete this.map[i];
}
for(i in this.disabledMap){
delete this.disabledMap[i];
}
_2.forEach(this._connects,_7.disconnect);
_2.forEach(this._subscribes,_7.unsubscribe);
delete this._connects;
delete this._subscribes;
}});
var _35=_1("dojox.grid.cells.SingleRowSelector",_f,{inputType:"Radio",_selectRow:function(e){
var _36=e.rowIndex;
if(this.disabledMap[_36]){
return;
}
this._focusEndingCell(_36,0);
this._nativeSelect(_36,!this.grid.selection.selected[_36]);
}});
var _37=_1("dojox.grid.cells.MultipleRowSelector",_f,{inputType:"CheckBox",swipeStartRowIndex:-1,swipeMinRowIndex:-1,swipeMaxRowIndex:-1,toSelect:false,lastClickRowIdx:-1,toggleAllTrigerred:false,unCheckedText:"□",constructor:function(){
this._connects.push(_7.connect(_6.doc,"onmouseup",this,"_domouseup"));
this._connects.push(_7.connect(this.grid,"onRowMouseOver",this,"_onRowMouseOver"));
this._connects.push(_7.connect(this.grid.focus,"move",this,"_swipeByKey"));
this._connects.push(_7.connect(this.grid,"onCellMouseDown",this,"_onMouseDown"));
if(this.headerSelector){
this._connects.push(_7.connect(this.grid.views,"render",this,"_addHeaderSelector"));
this._connects.push(_7.connect(this.grid,"_onFetchComplete",this,"_addHeaderSelector"));
this._connects.push(_7.connect(this.grid,"onSelectionChanged",this,"_onSelectionChanged"));
this._connects.push(_7.connect(this.grid,"onKeyDown",this,function(e){
if(e.rowIndex==-1&&e.cellIndex==this.index&&e.keyCode==_a.SPACE){
this._toggletHeader();
}
}));
}
},toggleAllSelection:function(_38){
var _39=this.grid,_3a=_39.selection;
if(_38){
_3a.selectRange(0,_39.rowCount-1);
}else{
_3a.deselectAll();
}
this.toggleAllTrigerred=true;
},_onMouseDown:function(e){
if(e.cell==this){
this._startSelection(e.rowIndex);
_3.stop(e);
}
},_onRowMouseOver:function(e){
this._updateSelection(e,0);
},_domouseup:function(e){
if(_8("ie")){
this.view.content.decorateEvent(e);
}
var _3b=e.cellIndex>=0&&this.inSwipeSelection()&&!this.grid.edit.isEditRow(e.rowIndex);
if(_3b){
this._focusEndingCell(e.rowIndex,e.cellIndex);
}
this._finishSelect();
},_dokeyup:function(e){
this.inherited(arguments);
if(!e.shiftKey){
this._finishSelect();
}
},_startSelection:function(_3c){
this.swipeStartRowIndex=this.swipeMinRowIndex=this.swipeMaxRowIndex=_3c;
this.toSelect=!this.getValue(_3c);
},_updateSelection:function(e,_3d){
if(!this.inSwipeSelection()){
return;
}
var _3e=_3d!==0;
var _3f=e.rowIndex,_40=_3f-this.swipeStartRowIndex+_3d;
if(_40>0&&this.swipeMaxRowIndex<_3f+_3d){
this.swipeMaxRowIndex=_3f+_3d;
}
if(_40<0&&this.swipeMinRowIndex>_3f+_3d){
this.swipeMinRowIndex=_3f+_3d;
}
var min=_40>0?this.swipeStartRowIndex:_3f+_3d;
var max=_40>0?_3f+_3d:this.swipeStartRowIndex;
for(var i=this.swipeMinRowIndex;i<=this.swipeMaxRowIndex;i++){
if(this.disabledMap[i]||i<0){
continue;
}
if(i>=min&&i<=max){
this._nativeSelect(i,this.toSelect);
}else{
if(!_3e){
this._nativeSelect(i,!this.toSelect);
}
}
}
},_swipeByKey:function(_41,_42,e){
if(!e||_41===0||!e.shiftKey||e.cellIndex!=this.index||this.grid.focus.rowIndex<0){
return;
}
var _43=e.rowIndex;
if(this.swipeStartRowIndex<0){
this.swipeStartRowIndex=_43;
if(_41>0){
this.swipeMaxRowIndex=_43+_41;
this.swipeMinRowIndex=_43;
}else{
this.swipeMinRowIndex=_43+_41;
this.swipeMaxRowIndex=_43;
}
this.toSelect=this.getValue(_43);
}
this._updateSelection(e,_41);
},_finishSelect:function(){
this.swipeStartRowIndex=-1;
this.swipeMinRowIndex=-1;
this.swipeMaxRowIndex=-1;
this.toSelect=false;
},inSwipeSelection:function(){
return this.swipeStartRowIndex>=0;
},_nativeSelect:function(_44,_45){
this.grid.selection[_45?"addToSelection":"deselect"](_44);
},_selectRow:function(e){
var _46=e.rowIndex;
if(this.disabledMap[_46]){
return;
}
_3.stop(e);
this._focusEndingCell(_46,0);
var _47=_46-this.lastClickRowIdx;
var _48=!this.grid.selection.selected[_46];
if(this.lastClickRowIdx>=0&&!e.ctrlKey&&!e.altKey&&e.shiftKey){
var min=_47>0?this.lastClickRowIdx:_46;
var max=_47>0?_46:this.lastClickRowIdx;
for(var i=min;i>=0&&i<=max;i++){
this._nativeSelect(i,_48);
}
}else{
this._nativeSelect(_46,_48);
}
this.lastClickRowIdx=_46;
},getValue:function(_49){
if(_49==-1){
var g=this.grid;
return g.rowCount>0&&g.rowCount<=g.selection.getSelectedCount();
}
return this.inherited(arguments);
},_addHeaderSelector:function(){
var _4a=this.view.getHeaderCellNode(this.index);
if(!_4a){
return;
}
_5.empty(_4a);
var g=this.grid;
var _4b=_4a.appendChild(_5.create("div",{"aria-label":g._nls["selectAll"],"tabindex":-1,"id":g.id+"_rowSelector_-1","class":this.baseClass,"role":"presentation","innerHTML":"<span class = '"+this.statusTextClass+"'></span><span style='height: 0; width: 0; overflow: hidden; display: block;'>"+g._nls["selectAll"]+"</span>"}));
this.map[-1]=_4b;
var idx=this._headerSelectorConnectIdx;
if(idx!==undefined){
_7.disconnect(this._connects[idx]);
this._connects.splice(idx,1);
}
this._headerSelectorConnectIdx=this._connects.length;
this._connects.push(_7.connect(_4b,"onclick",this,"_toggletHeader"));
this._onSelectionChanged();
},_toggletHeader:function(){
if(!!this.disabledMap[-1]){
return;
}
this.grid._selectingRange=true;
this.toggleAllSelection(!this.getValue(-1));
this._onSelectionChanged();
this.grid._selectingRange=false;
},_onSelectionChanged:function(){
var g=this.grid;
if(!this.map[-1]||g._selectingRange){
return;
}
g.allItemsSelected=this.getValue(-1);
this._toggleCheckedStyle(-1,g.allItemsSelected);
},_toggleDisabledStyle:function(_4c,_4d){
this.inherited(arguments);
if(this.headerSelector){
var _4e=(this.grid.rowCount==this.disabledCount);
if(_4e!=!!this.disabledMap[-1]){
arguments[0]=-1;
arguments[1]=_4e;
this.inherited(arguments);
}
}
}});
var _4f=_1("dojox.grid.enhanced.plugins.IndirectSelection",_c,{name:"indirectSelection",constructor:function(){
var _50=this.grid.layout;
this.connect(_50,"setStructure",_4.hitch(_50,this.addRowSelectCell,this.option));
},addRowSelectCell:function(_51){
if(!this.grid.indirectSelection||this.grid.selectionMode=="none"){
return;
}
var _52=false,_53=["get","formatter","field","fields"],_54={type:_37,name:"",width:"30px",styles:"text-align: center;"};
if(_51.headerSelector){
_51.name="";
}
if(this.grid.rowSelectCell){
this.grid.rowSelectCell.destroy();
}
_2.forEach(this.structure,function(_55){
var _56=_55.cells;
if(_56&&_56.length>0&&!_52){
var _57=_56[0];
if(_57[0]&&_57[0].isRowSelector){
_52=true;
return;
}
var _58,_59=this.grid.selectionMode=="single"?_35:_37;
_58=_4.mixin(_54,_51,{type:_59,editable:false,notselectable:true,filterable:false,navigatable:true,nosort:true});
_2.forEach(_53,function(_5a){
if(_5a in _58){
delete _58[_5a];
}
});
if(_56.length>1){
_58.rowSpan=_56.length;
}
_2.forEach(this.cells,function(_5b,i){
if(_5b.index>=0){
_5b.index+=1;
}else{
console.warn("Error:IndirectSelection.addRowSelectCell()- cell "+i+" has no index!");
}
});
var _5c=this.addCellDef(0,0,_58);
_5c.index=0;
_57.unshift(_5c);
this.cells.unshift(_5c);
this.grid.rowSelectCell=_5c;
_52=true;
}
},this);
this.cellCount=this.cells.length;
},destroy:function(){
this.grid.rowSelectCell.destroy();
delete this.grid.rowSelectCell;
this.inherited(arguments);
}});
_d.registerPlugin(_4f,{"preInit":true});
return _4f;
}); | PypiClean |
/Carillon-1.0.2.tar.gz/Carillon-1.0.2/PROJECT.rst | ========================================
Carillon - ultra simple keyboard layouts
========================================
**Carillon** is a simple GTK3 application for switching your keyboard layout.
If offers a basic systray icon and menu for switching between a set defined
manually.
I created this because there aren't that many applications like this out there
for light desktop environments such as Openbox et al.
Quick Start
===========
This assumes you have a modern Python installation available (Python 2.7+ or
3.4+). To install, get via pip:
.. code:: console
$ pip install carillon
Now run the command ``carillon``.
Configuration
=============
Carillon looks for a YAML configuration file named ``default.yml`` in a number
of locations and uses the first match:
- current working directory
- XDG config directory e.g. ``~/.config/carillon``
- ``/etc/carillon/conf.d``
- ``/etc/carillon``
- directory of package install
This YAML file defines a map of possible keyboard layouts and the selected
layout. For example:
.. code:: yaml
---
# map of keyboard layouts with unique key name for each
keyboards:
# Irish keyboard with Macintosh variant
en_mac_ie:
name: English Mac (IE)
icon: ie.png
variant: mac
model: pc105
layout: gb
# International US English layout
en_us:
name: English (US)
icon: us.png
model: pc105
layout: us
# Default selected on startup is Irish keyboard
selected: en_mac_ie
| PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas_plotters/artists/ellipseartist.py | from typing import Tuple
from typing import List
from typing import Any
from typing_extensions import Literal
from matplotlib.patches import Ellipse as EllipsePatch
from compas.geometry import Ellipse
from compas.artists import PrimitiveArtist
from .artist import PlotterArtist
Color = Tuple[float, float, float]
class EllipseArtist(PlotterArtist, PrimitiveArtist):
"""Artist for COMPAS ellipses.
Parameters
----------
ellipse : :class:`~compas.geometry.Ellipse`
A COMPAS ellipse.
linewidth : float, optional
Width of the ellipse boundary.
linestyle : {'solid', 'dotted', 'dashed', 'dashdot'}, optional
Style of the ellipse boundary.
facecolor : tuple[float ,float, float], optional
Color of the interior of the ellipse.
edgecolor : tuple[float, float, float], optional
Color of the boundary of the ellipse.
fill : bool, optional
If True, draw the interior of the ellipse.
alpha : float, optional
Transparency of the ellipse.
zorder : int, optional
Stacking order of the ellipse on the canvas.
**kwargs : dict, optional
Additional keyword arguments.
See :class:`~compas_plotters.artists.PlotterArtist` and :class:`~compas.artists.PrimitiveArtist` for more info.
Attributes
----------
ellipse : :class:`~compas.geometry.Ellipse`
The ellipse associated with the artist.
"""
def __init__(
self,
ellipse: Ellipse,
linewidth: float = 1.0,
linestyle: Literal["solid", "dotted", "dashed", "dashdot"] = "solid",
facecolor: Color = (1.0, 1.0, 1.0),
edgecolor: Color = (0, 0, 0),
fill: bool = True,
alpha: float = 1.0,
zorder: int = 1000,
**kwargs: Any
):
super().__init__(primitive=ellipse, **kwargs)
self._mpl_ellipse = None
self.linewidth = linewidth
self.linestyle = linestyle
self.facecolor = facecolor
self.edgecolor = edgecolor
self.fill = fill
self.alpha = alpha
self.zorder = zorder
@property
def ellipse(self):
return self.primitive
@ellipse.setter
def ellipse(self, ellipse):
self.primitive = ellipse
@property
def data(self) -> List[List[float]]:
points = [
self.ellipse.center[:2],
self.ellipse.center[:2],
self.ellipse.center[:2],
self.ellipse.center[:2],
]
points[0][0] -= self.ellipse.major
points[1][0] += self.ellipse.major
points[2][1] -= self.ellipse.minor
points[3][1] += self.ellipse.minor
return points
def draw(self) -> None:
"""Draw the ellipse on the plotter canvas.
Returns
-------
None
"""
ellipse = EllipsePatch(
self.ellipse.center[:2],
width=2 * self.ellipse.major,
height=2 * self.ellipse.minor,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
fill=self.fill,
alpha=self.alpha,
zorder=self.zorder,
)
self._mpl_ellipse = self.plotter.axes.add_artist(ellipse)
def redraw(self) -> None:
"""Update the ellipse using the current geometry and visualization settings.
Returns
-------
None
"""
self._mpl_ellipse.center = self.ellipse.center[:2]
self._mpl_ellipse.set_width(2 * self.ellipse.major)
self._mpl_ellipse.set_height(2 * self.ellipse.minor)
self._mpl_ellipse.set_edgecolor(self.edgecolor)
self._mpl_ellipse.set_facecolor(self.facecolor) | PypiClean |
/Biomatters-Azimuth-2-0.1.tar.gz/Biomatters-Azimuth-2-0.1/azimuth/predict.py | import numpy as np
import sklearn
from sklearn.metrics import roc_curve, auc
import sklearn.metrics
import sklearn.cross_validation
import copy
import util
import time
import metrics as ranking_metrics
import azimuth.models.regression
import azimuth.models.ensembles
import azimuth.models.DNN
import azimuth.models.baselines
import multiprocessing
def fill_in_truth_and_predictions(truth, predictions, fold, y_all, y_pred, learn_options, test):
truth[fold]['ranks'] = np.hstack((truth[fold]['ranks'],
y_all[learn_options['rank-transformed target name']].values[test].flatten()))
truth[fold]['thrs'] = np.hstack((truth[fold]['thrs'],
y_all[learn_options['binary target name']].values[test].flatten()))
if 'raw_target_name' in learn_options.keys():
truth[fold]['raw'] = np.hstack((truth[fold]['raw'],
y_all[learn_options['raw target name']].values[test].flatten()))
predictions[fold] = np.hstack((predictions[fold], y_pred.flatten()))
return truth, predictions
def construct_filename(learn_options, TEST):
if learn_options.has_key("V"):
filename = "V%s" % learn_options["V"]
else:
filename = "offV1"
if TEST:
filename = "TEST."
filename += learn_options["method"]
filename += '.order%d' % learn_options["order"]
# try:
# learn_options["target_name"] = ".%s" % learn_options["target_name"].split(" ")[1]
# except:
# pass
filename += learn_options["target_name"]
if learn_options["method"] == "GPy":
pass
# filename += ".R%d" % opt_options['num_restarts']
# filename += ".K%s" % learn_options['kerntype']
# if learn_options.has_key('degree'):
# filename += "d%d" % learn_options['degree']
# if learn_options['warped']:
# filename += ".Warp"
elif learn_options["method"] == "linreg":
filename += "." + learn_options["penalty"]
filename += "." + learn_options["cv"]
if learn_options["training_metric"] == "NDCG":
filename += ".NDGC_%d" % learn_options["NDGC_k"]
elif learn_options["training_metric"] == "AUC":
filename += ".AUC"
elif learn_options["training_metric"] == 'spearmanr':
filename += ".spearman"
print "filename = %s" % filename
return filename
def print_summary(global_metric, results, learn_options, feature_sets, flags):
print "\nSummary:"
print learn_options
print "\t\tglobal %s=%.2f" % (learn_options['metric'], global_metric)
print "\t\tmedian %s across folds=%.2f" % (learn_options['metric'], np.median(results[0]))
print "\t\torder=%d" % learn_options["order"]
if learn_options.has_key('kerntype'): "\t\tkern type = %s" % learn_options['kerntype']
if learn_options.has_key('degree'): print "\t\tdegree=%d" % learn_options['degree']
print "\t\ttarget_name=%s" % learn_options["target_name"]
for k in flags.keys():
print '\t\t' + k + '=' + str(learn_options[k])
print "\t\tfeature set:"
for set in feature_sets.keys():
print "\t\t\t%s" % set
print "\t\ttotal # features=%d" % results[4]
def extract_fpr_tpr_for_fold(aucs, fold, i, predictions, truth, y_binary, test, y_pred):
assert len(np.unique(y_binary))<=2, "if using AUC need binary targets"
fpr, tpr, _ = roc_curve(y_binary[test], y_pred)
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
def extract_NDCG_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
NDCG_fold = ranking_metrics.ndcg_at_k_ties(y_ground_truth[test].flatten(), y_pred.flatten(), learn_options["NDGC_k"])
metrics.append(NDCG_fold)
def extract_spearman_for_fold(metrics, fold, i, predictions, truth, y_ground_truth, test, y_pred, learn_options):
spearman = util.spearmanr_nonan(y_ground_truth[test].flatten(), y_pred.flatten())[0]
assert not np.isnan(spearman), "found nan spearman"
metrics.append(spearman)
def get_train_test(test_gene, y_all, train_genes=None):
# this is a bit convoluted because the train_genes+test_genes may not add up to all genes
# for e.g. when we load up V3, but then use only V2, etc.
not_test = (y_all.index.get_level_values('Target gene').values != test_gene)
if train_genes is not None:
in_train_genes = np.zeros(not_test.shape, dtype=bool)
for t_gene in train_genes:
in_train_genes = np.logical_or(in_train_genes, (y_all.index.get_level_values('Target gene').values == t_gene))
train = np.logical_and(not_test, in_train_genes)
else:
train = not_test
#y_all['test'] as to do with extra pairs in V2
if test_gene == 'dummy':
test = train
else:
test = (y_all.index.get_level_values('Target gene').values== test_gene)
# convert to indices
test = np.where(test == True)[0]
train = np.where(train == True)[0]
return train, test
def cross_validate(y_all, feature_sets, learn_options=None, TEST=False, train_genes=None, CV=True):
'''
feature_sets is a dictionary of "set name" to pandas.DataFrame
one set might be single-nucleotide, position-independent features of order X, for e.g.
Method: "GPy" or "linreg"
Metric: NDCG (learning to rank metric, Normalized Discounted Cumulative Gain); AUC
Output: cv_score_median, gene_rocs
When CV=False, it trains on everything (and tests on everything, just to fit the code)
'''
print "range of y_all is [%f, %f]" % (np.min(y_all[learn_options['target_name']].values), np.max(y_all[learn_options['target_name']].values))
allowed_methods = ["GPy", "linreg", "AdaBoostRegressor", "AdaBoostClassifier",
"DecisionTreeRegressor", "RandomForestRegressor",
"ARDRegression", "GPy_fs", "mean", "random", "DNN",
"lasso_ensemble", "doench", "logregL1", "sgrna_from_doench", 'SVC', 'xu_et_al']
assert learn_options["method"] in allowed_methods,"invalid method: %s" % learn_options["method"]
assert learn_options["method"] == "linreg" and learn_options['penalty'] == 'L2' or learn_options["weighted"] is None, "weighted only works with linreg L2 right now"
# construct filename from options
filename = construct_filename(learn_options, TEST)
print "Cross-validating genes..."
t2 = time.time()
y = np.array(y_all[learn_options["target_name"]].values[:,None],dtype=np.float64)
# concatenate feature sets in to one nparray, and get dimension of each
inputs, dim, dimsum, feature_names = util.concatenate_feature_sets(feature_sets)
#import pickle; pickle.dump([y, inputs, feature_names, learn_options], open("saved_models/inputs.p", "wb" )); import ipdb; ipdb.set_trace()
if not CV:
assert learn_options['cv'] == 'gene', 'Must use gene-CV when CV is False (I need to use all of the genes and stratified complicates that)'
# set-up for cross-validation
## for outer loop, the one Doench et al use genes for
if learn_options["cv"] == "stratified":
assert not learn_options.has_key("extra_pairs") or learn_options['extra pairs'], "can't use extra pairs with stratified CV, need to figure out how to properly account for genes affected by two drugs"
label_encoder = sklearn.preprocessing.LabelEncoder()
label_encoder.fit(y_all['Target gene'].values)
gene_classes = label_encoder.transform(y_all['Target gene'].values)
if 'n_folds' in learn_options.keys():
n_folds = learn_options['n_folds']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
n_folds = len(learn_options["test_genes"])
else:
n_folds = len(learn_options['all_genes'])
cv = sklearn.cross_validation.StratifiedKFold(gene_classes, n_folds=n_folds, shuffle=True)
fold_labels = ["fold%d" % i for i in range(1,n_folds+1)]
if learn_options['num_genes_remove_train'] is not None:
raise NotImplementedException()
elif learn_options["cv"]=="gene":
cv = []
if not CV:
train_test_tmp = get_train_test('dummy', y_all) # get train, test split using a dummy gene
#train_tmp, test_tmp = train_test_tmp
# not a typo, using training set to test on as well, just for this case. Test set is not used
# for internal cross-val, etc. anyway.
#train_test_tmp = (train_tmp, train_tmp)
cv.append(train_test_tmp)
fold_labels = ["dummy_for_no_cv"]#learn_options['all_genes']
elif learn_options['train_genes'] is not None and learn_options["test_genes"] is not None:
assert learn_options['train_genes'] is not None and learn_options['test_genes'] is not None, "use both or neither"
for i, gene in enumerate(learn_options['test_genes']):
cv.append(get_train_test(gene, y_all, learn_options['train_genes']))
fold_labels = learn_options["test_genes"]
# if train and test genes are seperate, there should be only one fold
train_test_disjoint = set.isdisjoint(set(learn_options["train_genes"].tolist()), set(learn_options["test_genes"].tolist()))
else:
for i, gene in enumerate(learn_options['all_genes']):
train_test_tmp = get_train_test(gene, y_all)
cv.append(train_test_tmp)
fold_labels = learn_options['all_genes']
if learn_options['num_genes_remove_train'] is not None:
for i, (train,test) in enumerate(cv):
unique_genes = np.random.permutation(np.unique(np.unique(y_all['Target gene'][train])))
genes_to_keep = unique_genes[0:len(unique_genes) - learn_options['num_genes_remove_train']]
guides_to_keep = []
filtered_train = []
for j, gene in enumerate(y_all['Target gene']):
if j in train and gene in genes_to_keep:
filtered_train.append(j)
cv_i_orig = copy.deepcopy(cv[i])
cv[i] = (filtered_train, test)
if learn_options['num_genes_remove_train']==0:
assert np.all(cv_i_orig[0]==cv[i][0])
assert np.all(cv_i_orig[1]==cv[i][1])
print "# train/train after/before is %s, %s" % (len(cv[i][0]), len(cv_i_orig[0]))
print "# test/test after/before is %s, %s" % (len(cv[i][1]), len(cv_i_orig[1]))
else:
raise Exception("invalid cv options given: %s" % learn_options["cv"])
cv = [c for c in cv] #make list from generator, so can subset for TEST case
if TEST:
ind_to_use = [0]#[0,1]
cv = [cv[i] for i in ind_to_use]
fold_labels = [fold_labels[i] for i in ind_to_use]
truth = dict([(t, dict([(m, np.array([])) for m in ['raw', 'ranks', 'thrs']])) for t in fold_labels])
predictions = dict([(t, np.array([])) for t in fold_labels])
m = {}
metrics = []
#do the cross-validation
num_proc = learn_options["num_proc"]
if num_proc > 1:
num_proc = np.min([num_proc,len(cv)])
print "using multiprocessing with %d procs--one for each fold" % num_proc
jobs = []
pool = multiprocessing.Pool(processes=num_proc)
for i,fold in enumerate(cv):
train,test = fold
print "working on fold %d of %d, with %d train and %d test" % (i, len(cv), len(train), len(test))
if learn_options["method"]=="GPy":
job = pool.apply_async(azimuth.models.GP.gp_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="linreg":
job = pool.apply_async(azimuth.models.regression.linreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="logregL1":
job = pool.apply_async(azimuth.models.regression.logreg_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="AdaBoostRegressor":
job = pool.apply_async(azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, False))
elif learn_options["method"]=="AdaBoostClassifier":
job = pool.apply_async(azimuth.models.ensembles.adaboost_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, True))
elif learn_options["method"]=="DecisionTreeRegressor":
job = pool.apply_async(azimuth.models.ensembles.decisiontree_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="RandomForestRegressor":
job = pool.apply_async(azimuth.models.ensembles.randomforest_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"]=="ARDRegression":
job = pool.apply_async(azimuth.models.regression.ARDRegression_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "random":
job = pool.apply_async(azimuth.models.baselines.random_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "mean":
job = pool.apply_async(azimuth.models.baselines.mean_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "SVC":
job = pool.apply_async(azimuth.models.baselines.SVC_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "DNN":
job = pool.apply_async(azimuth.models.DNN.DNN_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "lasso_ensemble":
job = pool.apply_async(azimuth.models.ensembles.LASSOs_ensemble_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "doench":
job = pool.apply_async(azimuth.models.baselines.doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "sgrna_from_doench":
job = pool.apply_async(azimuth.models.baselines.sgrna_from_doench_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
elif learn_options["method"] == "xu_et_al":
job = pool.apply_async(azimuth.models.baselines.xu_et_al_on_fold, args=(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options))
else:
raise Exception("did not find method=%s" % learn_options["method"])
jobs.append(job)
pool.close()
pool.join()
for i,fold in enumerate(cv):#i in range(0,len(jobs)):
y_pred, m[i] = jobs[i].get()
train,test = fold
if learn_options["training_metric"]=="AUC":
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
else:
raise Exception("invalid 'training_metric' in learn_options: %s" % learn_options["training_metric"])
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
pool.terminate()
else:
# non parallel version
for i,fold in enumerate(cv):
train,test = fold
if learn_options["method"]=="GPy":
y_pred, m[i] = gp_on_fold(azimuth.models.GP.feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="linreg":
y_pred, m[i] = azimuth.models.regression.linreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="logregL1":
y_pred, m[i] = azimuth.models.regression.logreg_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="AdaBoostRegressor":
y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=False)
elif learn_options["method"]=="AdaBoostClassifier":
y_pred, m[i] = azimuth.models.ensembles.adaboost_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options, classification=True)
elif learn_options["method"]=="DecisionTreeRegressor":
y_pred, m[i] = azimuth.models.ensembles.decisiontree_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="RandomForestRegressor":
y_pred, m[i] = azimuth.models.ensembles.randomforest_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="ARDRegression":
y_pred, m[i] = azimuth.models.regression.ARDRegression_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"]=="GPy_fs":
y_pred, m[i] = azimuth.models.GP.gp_with_fs_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "random":
y_pred, m[i] = azimuth.models.baselines.random_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "mean":
y_pred, m[i] = azimuth.models.baselines.mean_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "SVC":
y_pred, m[i] = azimuth.models.baselines.SVC_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "DNN":
y_pred, m[i] = azimuth.models.DNN.DNN_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "lasso_ensemble":
y_pred, m[i] = azimuth.models.ensembles.LASSOs_ensemble_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "doench":
y_pred, m[i] = azimuth.models.baselines.doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "sgrna_from_doench":
y_pred, m[i] = azimuth.models.baselines.sgrna_from_doench_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
elif learn_options["method"] == "xu_et_al":
y_pred, m[i] = azimuth.models.baselines.xu_et_al_on_fold(feature_sets, train, test, y, y_all, inputs, dim, dimsum, learn_options)
else:
raise Exception("invalid method found: %s" % learn_options["method"])
if learn_options["training_metric"]=="AUC":
# fills in truth and predictions
extract_fpr_tpr_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options['ground_truth_label']].values, test, y_pred)
elif learn_options["training_metric"]=="NDCG":
extract_NDCG_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
elif learn_options["training_metric"] == 'spearmanr':
extract_spearman_for_fold(metrics, fold_labels[i], i, predictions, truth, y_all[learn_options["ground_truth_label"]].values, test, y_pred, learn_options)
truth, predictions = fill_in_truth_and_predictions(truth, predictions, fold_labels[i], y_all, y_pred, learn_options, test)
print "\t\tRMSE: ", np.sqrt(((y_pred - y[test])**2).mean())
print "\t\tSpearman correlation: ", util.spearmanr_nonan(y[test], y_pred)[0]
print "\t\tfinished fold/gene %i of %i" % (i+1, len(fold_labels))
cv_median_metric =[np.median(metrics)]
gene_pred = [(truth, predictions)]
print "\t\tmedian %s across gene folds: %.3f" % (learn_options["training_metric"], cv_median_metric[-1])
t3 = time.time()
print "\t\tElapsed time for cv is %.2f seconds" % (t3-t2)
return metrics, gene_pred, fold_labels, m, dimsum, filename, feature_names | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_transactions_id/put.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.transaction_single import TransactionSingle
from firefly_iii_client.model.validation_error import ValidationError
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.transaction_update import TransactionUpdate
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from . import path
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
# Path params
IdSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
'id': typing.Union[IdSchema, str, ],
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_id = api_client.PathParameter(
name="id",
style=api_client.ParameterStyle.SIMPLE,
schema=IdSchema,
required=True,
)
# body param
SchemaForRequestBodyApplicationJson = TransactionUpdate
SchemaForRequestBodyApplicationXWwwFormUrlencoded = TransactionUpdate
request_body_transaction_update = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
'application/x-www-form-urlencoded': api_client.MediaType(
schema=SchemaForRequestBodyApplicationXWwwFormUrlencoded),
},
required=True,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationVndApijson = TransactionSingle
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationVndApijson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/vnd.api+json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationVndApijson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor422ResponseBodyApplicationJson = ValidationError
@dataclass
class ApiResponseFor422(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor422ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_422 = api_client.OpenApiResponse(
response_cls=ApiResponseFor422,
content={
'application/json': api_client.MediaType(
schema=SchemaFor422ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'422': _response_for_422,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/vnd.api+json',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _update_transaction_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Update existing transaction. For more information, see https://docs.firefly-iii.org/firefly-iii/api/specials
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_id,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_transaction_update.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='put'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class UpdateTransaction(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def update_transaction(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._update_transaction_oapg(
body=body,
header_params=header_params,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForput(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: typing_extensions.Literal["application/x-www-form-urlencoded"],
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = ...,
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def put(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,],
content_type: str = 'application/json',
header_params: RequestHeaderParams = frozendict.frozendict(),
path_params: RequestPathParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._update_transaction_oapg(
body=body,
header_params=header_params,
path_params=path_params,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/XVI.py |
import genice2.lattices
from genice2.cell import cellvectors
from genice2.valueparser import parse_cages
desc = {
"ref": {
"ice 16": "Falenty 2014",
"C15": "Sikiric 2010",
"sII": "Jeffrey 1984",
"CS2": "Kosyakov 1999",
"MTN": "IZA Database",
},
"usage": "No options available.",
"brief": "Ultralow-density Ice XVI.",
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.density = 0.81 # default self.density
self.bondlen = 1.1 # bond threshold
self.cell = """
6.20577 6.20577 6.20577
"""
self.waters = """
3.5250014754 5.4040465737 3.5250014754
4.2322110246 2.3011615737 1.1293260246
1.9735589754 5.7836535246 0.7497190737
5.7836535246 3.9046084263 2.6807685246
5.7836535246 5.7836535246 0.8017234263
2.3011615737 4.2322110246 1.1293260246
1.9735589754 3.8526040737 2.6807685246
5.0764439754 2.6807685246 0.7497190737
5.43004875 2.32716375 5.43004875
1.9735589754 3.9046084263 5.0764439754
2.6807685246 0.7497190737 5.0764439754
1.3526096292 1.3526096292 4.4554946292
3.9046084263 2.6807685246 5.7836535246
3.8526040737 2.6807685246 1.9735589754
0.8017234263 5.0764439754 5.0764439754
1.7502753708 4.8531603708 4.8531603708
2.6807685246 2.6807685246 0.8017234263
1.3526096292 0.1988328708 3.3017178708
3.8526040737 5.0764439754 5.7836535246
1.1293260246 5.4560509263 3.5250014754
1.1293260246 0.4221164754 2.3531659263
2.9040521292 2.9040521292 1.7502753708
5.7836535246 0.7497190737 1.9735589754
5.43004875 5.43004875 2.32716375
0.8017234263 2.6807685246 2.6807685246
1.1293260246 5.4040465737 1.1293260246
2.6807685246 5.7836535246 3.9046084263
0.7497190737 1.9735589754 5.7836535246
4.2322110246 0.4221164754 5.4560509263
5.0764439754 5.0764439754 0.8017234263
5.4560509263 0.4221164754 4.2322110246
6.0069371292 6.0069371292 1.7502753708
0.4221164754 4.2322110246 5.4560509263
4.4554946292 4.4554946292 4.4554946292
5.4040465737 1.1293260246 1.1293260246
3.3017178708 3.3017178708 4.4554946292
4.2322110246 5.4040465737 4.2322110246
0.4221164754 5.4560509263 4.2322110246
3.5250014754 2.3531659263 4.2322110246
1.9735589754 1.9735589754 0.8017234263
1.9735589754 0.8017234263 1.9735589754
0.7497190737 2.6807685246 5.0764439754
5.4040465737 0.4221164754 0.4221164754
0.4221164754 0.4221164754 5.4040465737
5.4560509263 1.1293260246 3.5250014754
1.7502753708 1.7502753708 1.7502753708
2.3011615737 3.5250014754 0.4221164754
0.77572125 3.87860625 0.77572125
5.4040465737 4.2322110246 4.2322110246
6.0069371292 2.9040521292 4.8531603708
4.2322110246 4.2322110246 5.4040465737
4.2322110246 2.3531659263 3.5250014754
2.9040521292 4.8531603708 6.0069371292
0.4221164754 1.1293260246 2.3531659263
1.9735589754 2.6807685246 3.8526040737
2.6807685246 3.9046084263 5.7836535246
3.3017178708 1.3526096292 0.1988328708
3.9046084263 5.7836535246 2.6807685246
2.6807685246 5.0764439754 0.7497190737
4.4554946292 1.3526096292 1.3526096292
3.5250014754 2.3011615737 0.4221164754
1.1293260246 4.2322110246 2.3011615737
4.2322110246 1.1293260246 2.3011615737
0.4221164754 2.3531659263 1.1293260246
5.7836535246 1.9735589754 0.7497190737
0.4221164754 3.5250014754 2.3011615737
6.0069371292 1.7502753708 6.0069371292
3.3017178708 0.1988328708 1.3526096292
5.0764439754 3.9046084263 1.9735589754
2.3011615737 0.4221164754 3.5250014754
0.8017234263 1.9735589754 1.9735589754
5.4040465737 3.5250014754 3.5250014754
5.7836535246 0.8017234263 5.7836535246
2.32716375 2.32716375 2.32716375
1.7502753708 6.0069371292 6.0069371292
1.1293260246 2.3011615737 4.2322110246
5.0764439754 5.7836535246 3.8526040737
2.32716375 5.43004875 5.43004875
5.0764439754 3.8526040737 5.7836535246
3.87860625 0.77572125 0.77572125
3.87860625 3.87860625 3.87860625
1.1293260246 3.5250014754 5.4560509263
3.5250014754 4.2322110246 2.3531659263
4.8531603708 6.0069371292 2.9040521292
0.7497190737 5.7836535246 1.9735589754
1.1293260246 1.1293260246 5.4040465737
2.3531659263 0.4221164754 1.1293260246
5.4560509263 4.2322110246 0.4221164754
4.8531603708 2.9040521292 6.0069371292
4.4554946292 3.3017178708 3.3017178708
5.7836535246 3.8526040737 5.0764439754
0.4221164754 2.3011615737 3.5250014754
1.3526096292 4.4554946292 1.3526096292
4.2322110246 5.4560509263 0.4221164754
5.0764439754 0.7497190737 2.6807685246
6.0069371292 4.8531603708 2.9040521292
3.9046084263 1.9735589754 5.0764439754
2.6807685246 1.9735589754 3.8526040737
1.9735589754 0.7497190737 5.7836535246
2.9040521292 1.7502753708 2.9040521292
0.8017234263 5.7836535246 5.7836535246
0.1988328708 4.4554946292 0.1988328708
0.4221164754 5.4040465737 0.4221164754
2.9040521292 6.0069371292 4.8531603708
0.1988328708 0.1988328708 4.4554946292
5.7836535246 2.6807685246 3.9046084263
2.6807685246 0.8017234263 2.6807685246
5.7836535246 5.0764439754 3.8526040737
1.7502753708 2.9040521292 2.9040521292
3.8526040737 5.7836535246 5.0764439754
2.3011615737 1.1293260246 4.2322110246
3.3017178708 4.4554946292 3.3017178708
2.3531659263 3.5250014754 4.2322110246
2.3531659263 1.1293260246 0.4221164754
5.0764439754 0.8017234263 5.0764439754
4.4554946292 0.1988328708 0.1988328708
1.3526096292 3.3017178708 0.1988328708
2.3531659263 4.2322110246 3.5250014754
3.5250014754 1.1293260246 5.4560509263
0.1988328708 1.3526096292 3.3017178708
4.8531603708 4.8531603708 1.7502753708
3.5250014754 0.4221164754 2.3011615737
2.6807685246 3.8526040737 1.9735589754
3.5250014754 5.4560509263 1.1293260246
3.9046084263 5.0764439754 1.9735589754
3.5250014754 3.5250014754 5.4040465737
5.4560509263 3.5250014754 1.1293260246
0.1988328708 3.3017178708 1.3526096292
5.0764439754 1.9735589754 3.9046084263
3.8526040737 1.9735589754 2.6807685246
0.77572125 0.77572125 3.87860625
1.1293260246 2.3531659263 0.4221164754
1.9735589754 5.0764439754 3.9046084263
0.7497190737 5.0764439754 2.6807685246
4.8531603708 1.7502753708 4.8531603708
4.2322110246 3.5250014754 2.3531659263
"""
self.coord = "absolute"
self.cagepos, self.cagetype = parse_cages(
"""
12 0.5000 0.2500 0.2500
12 0.5000 0.5000 0.5000
12 0.2500 0.5000 0.2500
12 0.0000 0.2500 0.7500
12 0.2500 0.0000 0.7500
12 0.0000 0.5000 0.0000
12 0.2500 0.2500 0.5000
12 0.5000 0.0000 0.0000
12 0.7500 0.5000 0.7500
12 0.0000 0.0000 0.5000
12 0.0000 0.7500 0.2500
12 0.2500 0.7500 0.0000
12 0.7500 0.2500 0.0000
12 0.7500 0.0000 0.2500
12 0.5000 0.7500 0.7500
12 0.7500 0.7500 0.5000
16 0.6250 0.1250 0.6250
16 0.1250 0.6250 0.6250
16 0.8750 0.3750 0.3750
16 0.3750 0.8750 0.3750
16 0.3750 0.3750 0.8750
16 0.6250 0.6250 0.1250
16 0.8750 0.8750 0.8750
16 0.1250 0.1250 0.1250
"""
)
self.cell = cellvectors(a=6.20577, b=6.20577, c=6.20577) | PypiClean |
/Gbtestapi0.4-0.1a10.tar.gz/Gbtestapi0.4-0.1a10/src/gailbot/api.py | from typing import List, Dict, Union, Tuple, Callable
from gailbot.services import ServiceController, SettingDict
from gailbot.workspace import WorkspaceManager
from .plugins.suite import PluginSuite
from gailbot.core.utils.logger import makelogger
logger = makelogger("gb_api")
class GailBot:
"""
Class for API wrapper
"""
def __init__(self, ws_root: str):
"""initialize an gailbot object that provides a suite of functions
to interact with gailbot
Args:
ws_root (str): the path to workspace root
"""
self.ws_manager: WorkspaceManager = WorkspaceManager(ws_root)
self.init_workspace()
logger.info("workspace manager initialized")
self.gb: ServiceController = ServiceController(
self.ws_manager, load_exist_setting=True
)
logger.info("gailbot service controller initialized")
def init_workspace(self):
"""
Resets the workspace: clears the old workspace and initializes a new one.
Returns:
No return but instantiates a new workspace.
"""
try:
self.ws_manager.clear_gb_temp_dir()
self.ws_manager.init_workspace()
return True
except Exception as e:
logger.error(f"failed to reset workspace due to the error {e}", exc_info=e)
return False
def clear_workspace(self):
"""
Clears current workspace
Returns: None
"""
try:
self.ws_manager.clear_gb_temp_dir()
return True
except Exception as e:
logger.error(f"failed to reset workspace due to the error {e}", exc_info=e)
return False
def transcribe(self, sources: List[str] = None) -> Tuple[List[str], List[str]]:
"""given a list of the source name, and transcribe the sources
Args:
sources (List[str], optional): a list of source name, which
can be either a list of source paths or the file name of the
source file without the file extension
if sources is None, the default is to transcribe all sources
that have been configured
Returns:
Tuple[bool, List[str]]:
returns a tuple of two lists of string
the first lists consist of files that are not valid input
the second lists consist of files that fails to be processed
"""
return self.gb.transcribe(sources)
def clear_source_memory(self) -> bool:
"""
Clears source memory
Returns:
bool: True if successfully cleared, False if not
"""
return self.gb.clear_source_memory()
def add_sources(self, src_output_pairs: List[Tuple[str, str]]) -> bool:
"""
Adds a given list of sources
Args:
src_output_pairs: List [Tuple [str, str]]: List of Tuples of strings
to strings, each representing the source path and output path of
a source to add
Returns:
Bool: True if each given source was successfully added, false if not
"""
return self.gb.add_sources(src_output_pairs)
def add_source(self, source_path: str, output_dir: str) -> bool:
"""
Adds a given source
Args:
source_path : str: Source path of the given source
output_dir : str: Path to the output directory of the given source
Returns:
Union[str, bool]: return the name if successfully added, false if not
"""
return self.gb.add_source(source_path, output_dir)
def remove_source(self, source_name: str) -> bool:
"""
Removes the given source
Args:
source_name : str: Name of the existing source to remove
Returns:
Bool: True if source was successfully removed, false if not
"""
return self.gb.remove_source(source_name)
def remove_sources(self, source_names: List[str]) -> bool:
"""
Removes the given list of sources
Args:
source_name : str: Name of the existing sources to remove
Returns:
Bool: True if all sources were successfully removed, false if not
"""
return self.gb.remove_sources(source_names)
def is_source(self, name: str) -> bool:
"""
Determines if a given name corresponds to an existing source
Args:
name: str: Name of the source to look for
Returns:
Bool: True if the given name corresponds to an existing source,
false if not
"""
return self.gb.is_source(name)
def get_source_outdir(self, name: str) -> str:
"""
Accesses source output directory with a given name
Args:
source_name: str: source name to access
Returns:
a string stores the output path of the source
"""
return self.gb.get_source_out_dir(name)
def create_new_setting(
self, name: str, setting: Dict[str, str], overwrite: bool = True
) -> bool:
"""
Creates a new setting profile
Args:
name: str: Name to assign to the newly created setting profile
setting : Dict[str, str]: Dictionary representation of the setting
overwrite : bool
Returns:
None
"""
return self.gb.create_new_setting(name, setting, overwrite)
def get_src_setting_name(self, source_name: str) -> Union[bool, str]:
"""given a source name, return the setting name applied to the source
Args:
source_name (str): the name that identify the source
Returns:
Union[bool, str]: if the source is found, return the setting name
applied to the source, else return false
"""
return self.gb.get_src_setting_name(source_name)
def save_setting(self, setting_name: str) -> bool:
"""
Saves the given setting
Args:
setting_name: str: Name of the setting to save
Returns:
Bool: True if setting was successfully saved, false if not
"""
return self.gb.save_setting(setting_name)
def get_source_setting_dict(self, source_name) -> Union[bool, SettingDict]:
"""
Given a source, returns its setting content as a dictionary
Args:
source_name (str): the name of the source
Returns:
Union[bool, Dict[str, Union[str, Dict]]]: a dictionary that stores
the source setting content
"""
return self.gb.get_source_setting_dict(source_name)
def get_all_source_names(self) -> List[str]:
"""
Returns list of all source names
Returns: List[str] : list of source names
"""
return self.gb.get_all_source_names()
def get_setting_dict(self, setting_name: str) -> Union[bool, SettingDict]:
"""
Given a setting name, returns the setting content in a dictionary
Args:
setting_name (str): name that identifies a setting
Returns:
Union[bool, SettingDict]: if the setting is found, returns its setting
content stored in a dictionary, else returns false
"""
return self.gb.get_setting_dict(setting_name)
def get_all_settings_data(self) -> Dict[str, SettingDict]:
"""
given a setting name, return the setting content in a dictionary format
Args:
setting_name (string): setting name
Returns:
Union[bool, Dict[str, Union[str, Dict]]]: the setting content
"""
return self.gb.get_all_settings_data()
def get_all_profile_names(self) -> List[str]:
"""get the names fo available settings
Returns:
List[str]: a list of available setting names
"""
return self.gb.get_all_settings_names()
def rename_setting(self, old_name: str, new_name: str) -> bool:
"""
Renames a given setting to a given new name
Args:
old_name: str: original name of the setting to rename
new_name: str: name to rename the setting to
Returns:
Bool: True if setting was successfully renamed, false if not
"""
return self.gb.rename_setting(old_name, new_name)
def update_setting(self, setting_name: str, new_setting: Dict[str, str]) -> bool:
"""
Updates a given setting to a newly given structure
Args:
setting_name: str: name of the setting to update
new_setting: Dict[str, str]: dictionary representation of
the new structure of the setting
Returns:
Bool: true if setting was successfully updated, false if not
"""
return self.gb.update_setting(setting_name, new_setting)
def get_plugin_setting(self, setting_name: str) -> Dict[str, str]:
"""
Accesses the plugin setting of a given setting
Args:
setting_name: str: name of the setting to get the plugin setting of
Returns:
Dict[str, str]: dictionary representation of the plugin setting
"""
return self.gb.get_plugin_setting(setting_name)
def remove_setting(self, setting_name: str) -> bool:
"""
Removes the given setting
Args:
setting_name: str: name of the setting to remove
Returns:
Bool: True if setting was successfully removed, false if not
"""
return self.gb.remove_setting(setting_name)
def remove_multiple_settings(self, setting_names: List[str]) -> bool:
"""
Removes the given list of settings
Args:
setting_name: List[str]: names of the setting to remove
Returns:
Bool: True if all settings were successfully removed, false if not
"""
return self.gb.remove_multiple_settings(setting_names)
def is_setting(self, name: str) -> bool:
"""
Determines if a given setting name corresponds to an existing setting
Args:
name: str: name of the setting to search fort
Returns:
Bool: True if given setting is an existing setting, false if not
"""
return self.gb.is_setting(name)
def apply_setting_to_source(
self, source: str, setting: str, overwrite: bool = True
) -> bool:
"""
Applies a given setting to a given source
Args:
source: str: name of the source to which to apply the given setting
setting: str: name of the setting to apply to the given source
overwrite: bool: determines if it should overwrite from an existing setting
Defaults to true
Returns:
Bool: true if setting was successfully applied, false if not
"""
return self.gb.apply_setting_to_source(source, setting, overwrite)
def apply_setting_to_sources(
self, sources: List[str], setting: str, overwrite: bool = True
) -> bool:
"""
Applies a given setting to a given list of sources
Args:
sources: List[str]: list of names of the sources to which to apply the given setting
setting: str: name of the setting to apply to the given sources
overwrite: bool: determines if it should overwrite from an existing setting
Defaults to true
Returns:
Bool: true if setting was successfully applied, false if not
"""
return self.gb.apply_setting_to_sources(sources, setting, overwrite)
def is_setting_in_use(self, setting_name: str) -> bool:
"""check if a setting is being used by any source
Args:
setting_name (str): the name of the setting
Returns:
bool: return true if the setting is being used, false otherwise
"""
return self.gb.is_setting_in_use(setting_name)
def get_default_profile_setting_name(self) -> str:
"""get the name of current default setting
Returns:
str: the name of current default setting
"""
return self.gb.get_default_profile_setting_name()
def get_default_engine_setting_name(self) -> str:
"""get the default engine setting name
Returns:
str: a string that represent the default engine setting
"""
return self.gb.get_default_engine_setting_name()
def set_default_setting(self, setting_name) -> bool:
"""set the default setting to setting name
Args:
setting_name (str): the name of the default setting
Returns:
bool: true if default setting is set correctly
"""
return self.gb.set_default_setting(setting_name)
def register_plugin_suite(self, plugin_source: str) -> Union[List[str], str]:
"""
Registers a gailbot plugin suite
Args:
plugin_source : str: Name of the plugin suite to register
Returns:
return the a list of plugin name if the plugin is registered,
return the string that stores the error message if the pluginsuite
is not registered
"""
return self.gb.register_plugin_suite(plugin_source)
def get_plugin_suite(self, suite_name) -> PluginSuite:
"""
Gets the plugin suite with a given name
Args:
suite_name: string name of the given plugin suite
Returns:
PluginSuite with the given name
"""
return self.gb.get_plugin_suite(suite_name)
def is_plugin_suite(self, suite_name: str) -> bool:
"""
Determines if a given plugin suite is an existing plugin suite
Args:
suite_name: str: name of the plugin suite of which to determine existence
Returns:
Bool: true if given plugin suite exists, false if not
"""
return self.gb.is_plugin_suite(suite_name)
def delete_plugin_suite(self, suite_name: str) -> bool:
"""
Removes the given plugin suite
Args:
suite_name: str: name of the plugin suite to delete
Returns:
Bool: true if plugin suite was successfully removed, false if not
"""
return self.gb.delete_plugin_suite(suite_name)
def delete_plugin_suites(self, suite_names: List[str]) -> bool:
"""
Removes the given list of plugin suites
Args:
suite_name: List[str]: list of names of the plugin suites to delete
Returns:
Bool: true if all plugin suites were successfully removed, false if not
"""
return self.gb.delete_plugin_suites(suite_names)
def add_progress_display(self, source: str, displayer: Callable) -> bool:
"""
Add a function displayer to track for the progress of source,
Args:
source (str): the name of the source
displayer (Callable): displayer is a function that takes in a string as
argument, and the string encodes the progress of
the source
Returns:
bool: return true if the displayer is added, false otherwise
"""
return self.gb.add_progress_display(source, displayer)
def get_all_plugin_suites(self) -> List[str]:
"""get names of available plugin suites
Returns:
List[str]: a list of available plugin suites name
"""
return self.gb.get_all_plugin_suites()
def get_plugin_suite_metadata(self, suite_name: str) -> Dict[str, str]:
"""get the metadata of a plugin suite identified by suite name
Args:
suite_name (str): the name of the suite
Returns:
MetaData: a MetaData object that stores the suite's metadata,
"""
return self.gb.get_plugin_suite_metadata(suite_name)
def get_plugin_suite_dependency_graph(
self, suite_name: str
) -> Dict[str, List[str]]:
"""get the dependency map of the plugin suite identified by suite_name
Args:
suite_name (str): the name of the suite
Returns:
Dict[str, List[str]]: the dependency graph of the suite
"""
return self.gb.get_plugin_suite_dependency_graph(suite_name)
def get_plugin_suite_documentation_path(self, suite_name: str) -> str:
"""get the path to the documentation map of the plugin suite identified by suite_name
Args:
suite_name (str): the name of the suite
Returns:
str: the path to the documentation file
"""
return self.gb.get_plugin_suite_documentation_path(suite_name)
def is_suite_in_use(self, suite_name: str) -> bool:
"""given a suite_name, check if this suite is used
in any of the setting
Args:
suite_name (str): the name of the plugin suite
Returns:
bool: return true if the suite is used in any of the setting,
false otherwise
"""
return self.gb.is_suite_in_use(suite_name)
def is_official_suite(self, suite_name: str) -> bool:
"""given a suite_name, check if the suite identified by the suite_name
is official
Args:
suite_name (str): the name of the suite
Returns:
bool: true if the suite is official false otherwise
"""
return self.gb.is_official_suite(suite_name)
def reset_workspace(self) -> bool:
"""reset the gailbot workspace"""
return self.ws_manager.reset_workspace()
def get_suite_source_path(self, suite_name: str) -> str:
"""
given the name of the suite , return the path to the source
code of the suite
"""
return self.gb.get_suite_path(suite_name)
def get_engine_setting_names(self) -> List[str]:
"""get a list of available engine setting name
Returns:
List[str]: the list of engine setting name
"""
return self.gb.get_engine_setting_names()
def add_new_engine(self, name, setting, overwrite=False) -> bool:
"""add a new engine setting
Args:
name (str): the name of the engine setting
setting (Dict[str, str]): the setting data stored in a dictionary
overwrite (bool, optional): if True, overwrite the existing
engine setting with the same name. Defaults to False.
Returns:
bool: return True if the engine setting is successfully created
"""
return self.gb.add_new_engine(name, setting, overwrite)
def remove_engine_setting(self, name) -> bool:
"""remove the engine setting identified by nanme
Args:
name (str): the name of the engine setting to be removed
Returns:
bool: return True if the engine setting is successfully removed
"""
return self.gb.remove_engine_setting(name)
def update_engine_setting(self, name, setting_data: Dict[str, str]) -> bool:
"""update the engine setting identified by name
Args:
name (str): the name of the engine setting to be updated
setting_data (Dict[str, str]): the content of the new setting
Returns:
bool: return True if the engine setting is successfully updated
"""
return self.gb.update_engine_setting(name, setting_data)
def get_engine_setting_data(self, name: str) -> Union[bool, Dict[str, str]]:
"""get the enigine setting data
Args:
name (str): the name of the engine setting
Returns:
Union[bool, Dict[str, str]]: if the engine setting name is available
return the engine setting data as stored in a dictionary, else return False
"""
return self.gb.get_engine_setting_data(name)
def is_engine_setting_in_use(self, name: str) -> bool:
"""check if the engine setting identified by name is in use
Args:
name (str): the name of the engine setting
Returns:
bool: return true if the engine setting is in use, false other wise
"""
return self.gb.is_engine_setting_in_use(name)
def is_engine_setting(self, name: str):
"""check if the given engine name is engine setting
Args:
name (str): the name of the engine setting
"""
return self.gb.is_engine_setting(name)
def get_profile_src_path(self, name: str):
"""get the path to the profile setting source
Args:
name (str): the name of the profile
"""
return self.gb.get_profile_src_path(name)
def get_engine_src_path(self, name: str):
"""get the path to the engine setting source
Args:
name (str): the name of the engine
"""
return self.gb.get_plugin_suite(suite_name)
# keep, user prof need
def is_plugin_suite(self, suite_name: str) -> bool:
"""
Determines if a given plugin suite is an existing plugin suite
Args:
suite_name: str: name of the plugin suite of which to determine existence
Returns:
Bool: true if given plugin suite exists, false if not
"""
return self.gb.is_plugin_suite(suite_name)
# def keep
def delete_plugin_suite(self, suite_name: str) -> bool:
"""
Removes the given plugin suite
Args:
suite_name: str: name of the plugin suite to delete
Returns:
Bool: true if plugin suite was successfully removed, false if not
"""
return self.gb.delete_plugin_suite(suite_name)
# plugin suite related functions are nice to have
def get_all_plugin_suites(self) -> List[str]:
"""get names of available plugin suites
Returns:
List[str]: a list of available plugin suites name
"""
return self.gb.get_all_plugin_suites()
# plugin suite related functions are nice to have
def get_plugin_suite_metadata(self, suite_name: str) -> Dict[str, str]:
"""get the metadata of a plugin suite identified by suite name
Args:
suite_name (str): the name of the suite
Returns:
MetaData: a MetaData object that stores the suite's metadata,
"""
return self.gb.get_plugin_suite_metadata(suite_name)
# not too redundant, can keep
def get_plugin_suite_dependency_graph(
self, suite_name: str
) -> Dict[str, List[str]]:
"""get the dependency map of the plugin suite identified by suite_name
Args:
suite_name (str): the name of the suite
Returns:
Dict[str, List[str]]: the dependency graph of the suite
"""
return self.gb.get_plugin_suite_dependency_graph(suite_name)
# can delete, used for front end checking
def is_suite_in_use(self, suite_name: str) -> bool:
"""given a suite_name, check if this suite is used
in any of the setting
Args:
suite_name (str): the name of the plugin suite
Returns:
bool: return true if the suite is used in any of the setting,
false otherwise
"""
return self.gb.is_suite_in_use(suite_name)
# def can be deleted bc its for front end
def is_official_suite(self, suite_name: str) -> bool:
"""given a suite_name, check if the suite identified by the suite_name
is official
Args:
suite_name (str): the name of the suite
Returns:
bool: true if the suite is official false otherwise
"""
return self.gb.is_official_suite(suite_name)
# nice to hav bc user might just reset workspace
def reset_workspace(self) -> bool:
"""reset the gailbot workspace"""
return self.ws_manager.reset_workspace()
########################### removed ####################################
# TODO: removing these from the GailBot API will cause test cases to fail because
# these functions are removed
# # can get rid of because its a front end thing
# def get_profile_src_path(self, name: str):
# """get the path to the profile setting source
# Args:
# name (str): the name of the profile
# """
# return self.gb.get_profile_src_path(name)
# # can get rid of because its a front end thing
# def get_engine_src_path(self, name: str):
# """get the path to the engine setting source
# Args:
# name (str): the name of the engine
# """
# return self.gb.get_engine_src_path(name)
# # no need to expose to user
# # make sure its not used in other places
# # either make it private or move it into the constructor
# # remove
# def init_workspace(self):
# """
# Resets the workspace: clears the old workspace and initializes a new one.
# Returns:
# No return but instantiates a new workspace.
# """
# try:
# self.ws_manager.clear_gb_temp_dir()
# self.ws_manager.init_workspace()
# return True
# except Exception as e:
# logger.error(f"failed to reset workspace due to the error {e}", exc_info=e)
# return False
# # can be removed
# def get_suite_source_path(self, suite_name: str) -> str:
# """
# given the name of the suite , return the path to the source
# code of the suite
# """
# return self.gb.get_suite_path(suite_name)
# # deleted bc its just for front end
# def get_plugin_suite_documentation_path(self, suite_name: str) -> str:
# """get the path to the documentation map of the plugin suite identified by suite_name
# Args:
# suite_name (str): the name of the suite
# Returns:
# str: the path to the documentation file
# """
# return self.gb.get_plugin_suite_documentation_path(suite_name)
# # remove, but nice to have if refactor to log all the progress
# # remove, bc it doenst log anything useful anyway
# def add_progress_display(self, source: str, displayer: Callable) -> bool:
# """
# Add a function displayer to track for the progress of source,
# Args:
# source (str): the name of the source
# displayer (Callable): displayer is a function that takes in a string as
# argument, and the string encodes the progress of
# the source
# Returns:
# bool: return true if the displayer is added, false otherwise
# """
# return self.gb.add_progress_display(source, displayer)
# # remove, provided by user; front end needs this for display purposes
# def get_source_outdir(self, name: str) -> str:
# """
# Accesses source output directory with a given name
# Args:
# source_name: str: source name to access
# Returns:
# a string stores the output path of the source
# """
# return self.gb.get_source_out_dir(name) | PypiClean |
/Bookkeeper-0.0.2.tar.gz/Bookkeeper-0.0.2/bookkeeper/persist.py | import sqlite3
from bookkeeper.util import get_path
DB_FILE = '~/.bookkeeper.db'
class DB(object):
""" Manages the sqlite connection. """
_instance = None
@classmethod
def get_instance(cls):
""" Singleton instance. """
if DB._instance is None:
DB._instance = cls()
return DB._instance
@classmethod
def set_verbose(cls, verbose):
""" Toggle verbosity. """
cls.get_instance().verbose = verbose
def __init__(self, verbose=False):
""" Create sqlite connection. """
self.path = get_path(DB_FILE)
self.verbose = verbose
def exc(self, command, *args):
""" Wrapper for sqlite exec. """
with sqlite3.connect(self.path) as connection:
if self.verbose:
print(command, args)
try:
cursor = connection.cursor()
cursor.execute(command, *args)
except sqlite3.Error as e:
print(e)
connection.rollback()
else:
connection.commit()
def qry(self, query, *args):
""" Wrapper for sqlite exec. """
with sqlite3.connect(self.path) as connection:
if self.verbose:
print(query, args)
return connection.execute(query, *args)
def list_app_items(self, app):
""" Return all items listed in the app. """
query = """
SELECT app, object, type
FROM inner
INDEXED BY ix_app
WHERE app = ?
"""
return self.qry(query, (app, ))
def count_target_path(self, path="/"):
""" Check if path exists on the db.
Given the example:
test /opt/dotfiles/test/ /home/user/test/
vim /opt/dotfiles/vim/ /home/user/
other /opt/share/other/ /home/user/
/home/user/ will retrun 3
/home/user/test/ will return 1
"""
query = """
SELECT COUNT(1)
FROM apps
WHERE target_path GLOB ?
"""
path += '*'
return self.qry(query, (path, ))
def get_app_for_folder(self, target_folder):
""" Return app. """
query = """
SELECT app
FROM apps
WHERE target_path = ?
"""
return self.qry(query, (target_folder, ))
def fetch_app(self, app=None):
""" Return app row.
If app is None, return all apps.
"""
query = """
SELECT app, source_path, target_path
FROM apps
"""
if app is not None:
query += """ WHERE app = ? """
return self.qry(query, (app, ))
return self.qry(query)
def add_item(self, app, item, item_type):
""" Simple wrapper for inserting item. """
self.exc(
""" INSERT OR IGNORE INTO inner (app, object, type)
VALUES (?, ?, ?) """,
(app, item, item_type, )
)
def add_app(self, app, source, target):
""" Simple wrapper for inserting app. """
self.exc(
""" INSERT OR IGNORE INTO apps (app, source_path, target_path)
VALUES (?, ?, ?) """,
(app, source, target, )
)
def install():
""" Create basic db. """
db = DB.get_instance()
db.exc("""CREATE TABLE IF NOT EXISTS apps
(
app TEXT NOT NULL UNIQUE,
source_path TEXT NOT NULL UNIQUE,
target_path TEXT NOT NULL UNIQUE
)
""")
db.exc("""CREATE TABLE IF NOT EXISTS inner
(
app TEXT NOT NULL UNIQUE,
object TEXT NOT NULL UNIQUE,
type TEXT NOT NULL UNIQUE
)
""")
db.exc("""CREATE INDEX ix_app ON inner (app) """) | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/model/validators.py | import logging
import os
import re
from collections import defaultdict
import formencode
import ipaddr
import sqlalchemy
from formencode.validators import CIDR, Bool, Email, FancyValidator, Int, IPAddress, NotEmpty, Number, OneOf, Regex, Set, String, StringBoolean, UnicodeString
from sqlalchemy import func
from tg.i18n import ugettext as _
import kallithea
from kallithea.lib import auth
from kallithea.lib.compat import OrderedSet
from kallithea.lib.exceptions import InvalidCloneUriException, LdapImportError
from kallithea.lib.utils import is_valid_repo_uri
from kallithea.lib.utils2 import asbool, aslist, repo_name_slug
from kallithea.model import db
# silence warnings and pylint
UnicodeString, OneOf, Int, Number, Regex, Email, Bool, StringBoolean, Set, \
NotEmpty, IPAddress, CIDR, String, FancyValidator
log = logging.getLogger(__name__)
def UniqueListFromString():
class _UniqueListFromString(formencode.FancyValidator):
"""
Split value on ',' and make unique while preserving order
"""
messages = dict(
empty=_('Value cannot be an empty list'),
missing_value=_('Value cannot be an empty list'),
)
def _convert_to_python(self, value, state):
value = aslist(value, ',')
seen = set()
return [c for c in value if not (c in seen or seen.add(c))]
def empty_value(self, value):
return []
return _UniqueListFromString
def ValidUsername(edit=False, old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'username_exists': _('Username "%(username)s" already exists'),
'system_invalid_username':
_('Username "%(username)s" cannot be used'),
'invalid_username':
_('Username may only contain alphanumeric characters '
'underscores, periods or dashes and must begin with an '
'alphanumeric character or underscore')
}
def _validate_python(self, value, state):
if value in ['default', 'new_user']:
msg = self.message('system_invalid_username', state, username=value)
raise formencode.Invalid(msg, value, state)
# check if user is unique
old_un = None
if edit:
old_un = db.User.get(old_data.get('user_id')).username
if old_un != value or not edit:
if db.User.get_by_username(value, case_insensitive=True):
msg = self.message('username_exists', state, username=value)
raise formencode.Invalid(msg, value, state)
if re.match(r'^[a-zA-Z0-9\_]{1}[a-zA-Z0-9\-\_\.]*$', value) is None:
msg = self.message('invalid_username', state)
raise formencode.Invalid(msg, value, state)
return _validator
def ValidRegex(msg=None):
class _validator(formencode.validators.Regex):
messages = dict(invalid=msg or _('The input is not valid'))
return _validator
def ValidRepoUser():
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_username': _('Username %(username)s is not valid')
}
def _validate_python(self, value, state):
try:
db.User.query().filter(db.User.active == True) \
.filter(db.User.username == value).one()
except sqlalchemy.exc.InvalidRequestError: # NoResultFound/MultipleResultsFound
msg = self.message('invalid_username', state, username=value)
raise formencode.Invalid(msg, value, state,
error_dict=dict(username=msg)
)
return _validator
def ValidUserGroup(edit=False, old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_group': _('Invalid user group name'),
'group_exist': _('User group "%(usergroup)s" already exists'),
'invalid_usergroup_name':
_('user group name may only contain alphanumeric '
'characters underscores, periods or dashes and must begin '
'with alphanumeric character')
}
def _validate_python(self, value, state):
if value in ['default']:
msg = self.message('invalid_group', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(users_group_name=msg)
)
# check if group is unique
old_ugname = None
if edit:
old_id = old_data.get('users_group_id')
old_ugname = db.UserGroup.get(old_id).users_group_name
if old_ugname != value or not edit:
is_existing_group = db.UserGroup.get_by_group_name(value,
case_insensitive=True)
if is_existing_group:
msg = self.message('group_exist', state, usergroup=value)
raise formencode.Invalid(msg, value, state,
error_dict=dict(users_group_name=msg)
)
if re.match(r'^[a-zA-Z0-9]{1}[a-zA-Z0-9\-\_\.]+$', value) is None:
msg = self.message('invalid_usergroup_name', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(users_group_name=msg)
)
return _validator
def ValidRepoGroup(edit=False, old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'parent_group_id': _('Cannot assign this group as parent'),
'group_exists': _('Group "%(group_name)s" already exists'),
'repo_exists':
_('Repository with name "%(group_name)s" already exists')
}
def _validate_python(self, value, state):
# TODO WRITE VALIDATIONS
group_name = value.get('group_name')
parent_group_id = value.get('parent_group_id')
# slugify repo group just in case :)
slug = repo_name_slug(group_name)
# check for parent of self
if edit and parent_group_id and old_data['group_id'] == parent_group_id:
msg = self.message('parent_group_id', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(parent_group_id=msg)
)
old_gname = None
if edit:
old_gname = db.RepoGroup.get(old_data.get('group_id')).group_name
if old_gname != group_name or not edit:
# check group
gr = db.RepoGroup.query() \
.filter(func.lower(db.RepoGroup.group_name) == func.lower(slug)) \
.filter(db.RepoGroup.parent_group_id == parent_group_id) \
.scalar()
if gr is not None:
msg = self.message('group_exists', state, group_name=slug)
raise formencode.Invalid(msg, value, state,
error_dict=dict(group_name=msg)
)
# check for same repo
repo = db.Repository.query() \
.filter(func.lower(db.Repository.repo_name) == func.lower(slug)) \
.scalar()
if repo is not None:
msg = self.message('repo_exists', state, group_name=slug)
raise formencode.Invalid(msg, value, state,
error_dict=dict(group_name=msg)
)
return _validator
def ValidPassword():
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_password':
_('Invalid characters (non-ascii) in password')
}
def _validate_python(self, value, state):
try:
(value or '').encode('ascii')
except UnicodeError:
msg = self.message('invalid_password', state)
raise formencode.Invalid(msg, value, state,)
return _validator
def ValidOldPassword(username):
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_password': _('Invalid old password')
}
def _validate_python(self, value, state):
from kallithea.lib import auth_modules
if auth_modules.authenticate(username, value, '') is None:
msg = self.message('invalid_password', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(current_password=msg)
)
return _validator
def ValidPasswordsMatch(password_field, password_confirmation_field):
class _validator(formencode.validators.FancyValidator):
messages = {
'password_mismatch': _('Passwords do not match'),
}
def _validate_python(self, value, state):
if value.get(password_field) != value[password_confirmation_field]:
msg = self.message('password_mismatch', state)
raise formencode.Invalid(msg, value, state,
error_dict={password_field: msg, password_confirmation_field: msg}
)
return _validator
def ValidAuth():
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_auth': _('Invalid username or password'),
}
def _validate_python(self, value, state):
from kallithea.lib import auth_modules
password = value['password']
username = value['username']
# authenticate returns unused dict but has called
# plugin._authenticate which has create_or_update'ed the username user in db
if auth_modules.authenticate(username, password) is None:
user = db.User.get_by_username_or_email(username)
if user and not user.active:
log.warning('user %s is disabled', username)
msg = self.message('invalid_auth', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(username=' ', password=msg)
)
else:
log.warning('user %s failed to authenticate', username)
msg = self.message('invalid_auth', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(username=' ', password=msg)
)
return _validator
def ValidRepoName(edit=False, old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_repo_name':
_('Repository name %(repo)s is not allowed'),
'repository_exists':
_('Repository named %(repo)s already exists'),
'repository_in_group_exists': _('Repository "%(repo)s" already '
'exists in group "%(group)s"'),
'same_group_exists': _('Repository group with name "%(repo)s" '
'already exists')
}
def _convert_to_python(self, value, state):
repo_name = repo_name_slug(value.get('repo_name', ''))
repo_group = value.get('repo_group')
if repo_group:
gr = db.RepoGroup.get(repo_group)
group_path = gr.full_path
group_name = gr.group_name
# value needs to be aware of group name in order to check
# db key This is an actual just the name to store in the
# database
repo_name_full = group_path + kallithea.URL_SEP + repo_name
else:
group_name = group_path = ''
repo_name_full = repo_name
value['repo_name'] = repo_name
value['repo_name_full'] = repo_name_full
value['group_path'] = group_path
value['group_name'] = group_name
return value
def _validate_python(self, value, state):
repo_name = value.get('repo_name')
repo_name_full = value.get('repo_name_full')
group_path = value.get('group_path')
group_name = value.get('group_name')
if repo_name in [kallithea.ADMIN_PREFIX, '']:
msg = self.message('invalid_repo_name', state, repo=repo_name)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_name=msg)
)
rename = old_data.get('repo_name') != repo_name_full
create = not edit
if rename or create:
repo = db.Repository.get_by_repo_name(repo_name_full, case_insensitive=True)
repo_group = db.RepoGroup.get_by_group_name(repo_name_full, case_insensitive=True)
if group_path != '':
if repo is not None:
msg = self.message('repository_in_group_exists', state,
repo=repo.repo_name, group=group_name)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_name=msg)
)
elif repo_group is not None:
msg = self.message('same_group_exists', state,
repo=repo_name)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_name=msg)
)
elif repo is not None:
msg = self.message('repository_exists', state,
repo=repo.repo_name)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_name=msg)
)
return value
return _validator
def ValidForkName(*args, **kwargs):
return ValidRepoName(*args, **kwargs)
def SlugifyName():
class _validator(formencode.validators.FancyValidator):
def _convert_to_python(self, value, state):
return repo_name_slug(value)
def _validate_python(self, value, state):
pass
return _validator
def ValidCloneUri():
from kallithea.lib.utils import make_ui
class _validator(formencode.validators.FancyValidator):
messages = {
'clone_uri': _('Invalid repository URL'),
'invalid_clone_uri': _('Invalid repository URL. It must be a '
'valid http, https, or ssh URL'),
}
def _validate_python(self, value, state):
repo_type = value.get('repo_type')
url = value.get('clone_uri')
if url and url != value.get('clone_uri_hidden'):
try:
is_valid_repo_uri(repo_type, url, make_ui())
except InvalidCloneUriException as e:
log.warning('validation of clone URL %r failed: %s', url, e)
msg = self.message('clone_uri', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(clone_uri=msg)
)
return _validator
def ValidForkType(old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_fork_type': _('Fork has to be the same type as parent')
}
def _validate_python(self, value, state):
if old_data['repo_type'] != value:
msg = self.message('invalid_fork_type', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_type=msg)
)
return _validator
def CanWriteGroup(old_data=None):
class _validator(formencode.validators.FancyValidator):
messages = {
'permission_denied': _("You don't have permissions "
"to create repository in this group"),
'permission_denied_root': _("no permission to create repository "
"in root location")
}
def _convert_to_python(self, value, state):
# root location
if value == -1:
return None
return value
def _validate_python(self, value, state):
gr = db.RepoGroup.get(value)
gr_name = gr.group_name if gr is not None else None # None means ROOT location
# create repositories with write permission on group is set to true
group_admin = auth.HasRepoGroupPermissionLevel('admin')(gr_name,
'can write into group validator')
group_write = auth.HasRepoGroupPermissionLevel('write')(gr_name,
'can write into group validator')
forbidden = not (group_admin or group_write)
can_create_repos = auth.HasPermissionAny('hg.admin', 'hg.create.repository')
gid = (old_data['repo_group'].get('group_id')
if (old_data and 'repo_group' in old_data) else None)
value_changed = gid != value
new = not old_data
# do check if we changed the value, there's a case that someone got
# revoked write permissions to a repository, he still created, we
# don't need to check permission if he didn't change the value of
# groups in form box
if value_changed or new:
# parent group need to be existing
if gr and forbidden:
msg = self.message('permission_denied', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_type=msg)
)
## check if we can write to root location !
elif gr is None and not can_create_repos():
msg = self.message('permission_denied_root', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(repo_type=msg)
)
return _validator
def CanCreateGroup(can_create_in_root=False):
class _validator(formencode.validators.FancyValidator):
messages = {
'permission_denied': _("You don't have permissions "
"to create a group in this location")
}
def to_python(self, value, state):
# root location
if value == -1:
return None
return value
def _validate_python(self, value, state):
gr = db.RepoGroup.get(value)
gr_name = gr.group_name if gr is not None else None # None means ROOT location
if can_create_in_root and gr is None:
# we can create in root, we're fine no validations required
return
forbidden_in_root = gr is None and not can_create_in_root
forbidden = not auth.HasRepoGroupPermissionLevel('admin')(gr_name, 'can create group validator')
if forbidden_in_root or forbidden:
msg = self.message('permission_denied', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(parent_group_id=msg)
)
return _validator
def ValidPerms(type_='repo'):
if type_ == 'repo_group':
EMPTY_PERM = 'group.none'
elif type_ == 'repo':
EMPTY_PERM = 'repository.none'
elif type_ == 'user_group':
EMPTY_PERM = 'usergroup.none'
class _validator(formencode.validators.FancyValidator):
messages = {
'perm_new_member_name':
_('This username or user group name is not valid')
}
def to_python(self, value, state):
perms_update = OrderedSet()
perms_new = OrderedSet()
# build a list of permission to update and new permission to create
# CLEAN OUT ORG VALUE FROM NEW MEMBERS, and group them using
new_perms_group = defaultdict(dict)
for k, v in value.copy().items():
if k.startswith('perm_new_member'):
del value[k]
_type, part = k.split('perm_new_member_')
args = part.split('_')
if len(args) == 1:
new_perms_group[args[0]]['perm'] = v
elif len(args) == 2:
_key, pos = args
new_perms_group[pos][_key] = v
# fill new permissions in order of how they were added
for k in sorted(new_perms_group, key=lambda k: int(k)):
perm_dict = new_perms_group[k]
new_member = perm_dict.get('name')
new_perm = perm_dict.get('perm')
new_type = perm_dict.get('type')
if new_member and new_perm and new_type:
perms_new.add((new_member, new_perm, new_type))
for k, v in value.items():
if k.startswith('u_perm_') or k.startswith('g_perm_'):
member_name = k[7:]
t = {'u': 'user',
'g': 'users_group'
}[k[0]]
if member_name == db.User.DEFAULT_USER_NAME:
if asbool(value.get('repo_private')):
# set none for default when updating to
# private repo protects against form manipulation
v = EMPTY_PERM
perms_update.add((member_name, v, t))
value['perms_updates'] = list(perms_update)
value['perms_new'] = list(perms_new)
# update permissions
for k, v, t in perms_new:
try:
if t == 'user':
_user_db = db.User.query() \
.filter(db.User.active == True) \
.filter(db.User.username == k).one()
if t == 'users_group':
_user_db = db.UserGroup.query() \
.filter(db.UserGroup.users_group_active == True) \
.filter(db.UserGroup.users_group_name == k).one()
except Exception as e:
log.warning('Error validating %s permission %s', t, k)
msg = self.message('perm_new_member_type', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(perm_new_member_name=msg)
)
return value
return _validator
def ValidSettings():
class _validator(formencode.validators.FancyValidator):
def _convert_to_python(self, value, state):
# settings form for users that are not admin
# can't edit certain parameters, it's extra backup if they mangle
# with forms
forbidden_params = [
'user', 'repo_type',
'repo_enable_downloads', 'repo_enable_statistics'
]
for param in forbidden_params:
if param in value:
del value[param]
return value
def _validate_python(self, value, state):
pass
return _validator
def ValidPath():
class _validator(formencode.validators.FancyValidator):
messages = {
'invalid_path': _('This is not a valid path')
}
def _validate_python(self, value, state):
if not os.path.isdir(value):
msg = self.message('invalid_path', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(paths_root_path=msg)
)
return _validator
def UniqSystemEmail(old_data=None):
old_data = old_data or {}
class _validator(formencode.validators.FancyValidator):
messages = {
'email_taken': _('This email address is already in use')
}
def _convert_to_python(self, value, state):
return value.lower()
def _validate_python(self, value, state):
if (old_data.get('email') or '').lower() != value:
user = db.User.get_by_email(value)
if user is not None:
msg = self.message('email_taken', state)
raise formencode.Invalid(msg, value, state,
error_dict=dict(email=msg)
)
return _validator
def ValidSystemEmail():
class _validator(formencode.validators.FancyValidator):
messages = {
'non_existing_email': _('Email address "%(email)s" not found')
}
def _convert_to_python(self, value, state):
return value.lower()
def _validate_python(self, value, state):
user = db.User.get_by_email(value)
if user is None:
msg = self.message('non_existing_email', state, email=value)
raise formencode.Invalid(msg, value, state,
error_dict=dict(email=msg)
)
return _validator
def LdapLibValidator():
class _validator(formencode.validators.FancyValidator):
messages = {
}
def _validate_python(self, value, state):
try:
import ldap
ldap # pyflakes silence !
except ImportError:
raise LdapImportError()
return _validator
def AttrLoginValidator():
class _validator(formencode.validators.UnicodeString):
messages = {
'invalid_cn':
_('The LDAP Login attribute of the CN must be specified - '
'this is the name of the attribute that is equivalent '
'to "username"')
}
messages['empty'] = messages['invalid_cn']
return _validator
def ValidIp():
class _validator(CIDR):
messages = dict(
badFormat=_('Please enter a valid IPv4 or IPv6 address'),
illegalBits=_('The network size (bits) must be within the range'
' of 0-32 (not %(bits)r)')
)
def to_python(self, value, state):
v = super(_validator, self).to_python(value, state)
v = v.strip()
net = ipaddr.IPNetwork(address=v)
if isinstance(net, ipaddr.IPv4Network):
# if IPv4 doesn't end with a mask, add /32
if '/' not in value:
v += '/32'
if isinstance(net, ipaddr.IPv6Network):
# if IPv6 doesn't end with a mask, add /128
if '/' not in value:
v += '/128'
return v
def _validate_python(self, value, state):
try:
addr = value.strip()
# this raises an ValueError if address is not IPv4 or IPv6
ipaddr.IPNetwork(address=addr)
except ValueError:
raise formencode.Invalid(self.message('badFormat', state),
value, state)
return _validator
def FieldKey():
class _validator(formencode.validators.FancyValidator):
messages = dict(
badFormat=_('Key name can only consist of letters, '
'underscore, dash or numbers')
)
def _validate_python(self, value, state):
if not re.match('[a-zA-Z0-9_-]+$', value):
raise formencode.Invalid(self.message('badFormat', state),
value, state)
return _validator
def BasePath():
class _validator(formencode.validators.FancyValidator):
messages = dict(
badPath=_('Filename cannot be inside a directory')
)
def _convert_to_python(self, value, state):
return value
def _validate_python(self, value, state):
if value != os.path.basename(value):
raise formencode.Invalid(self.message('badPath', state),
value, state)
return _validator
def ValidAuthPlugins():
class _validator(formencode.validators.FancyValidator):
messages = dict(
import_duplicate=_('Plugins %(loaded)s and %(next_to_load)s both export the same name')
)
def _convert_to_python(self, value, state):
# filter empty values
return [s for s in value if s not in [None, '']]
def _validate_python(self, value, state):
from kallithea.lib import auth_modules
module_list = value
unique_names = {}
try:
for module in module_list:
plugin = auth_modules.loadplugin(module)
plugin_name = plugin.name
if plugin_name in unique_names:
msg = self.message('import_duplicate', state,
loaded=unique_names[plugin_name],
next_to_load=plugin_name)
raise formencode.Invalid(msg, value, state)
unique_names[plugin_name] = plugin
except (ImportError, AttributeError, TypeError) as e:
raise formencode.Invalid(str(e), value, state)
return _validator | PypiClean |
/Authlib-1.2.1.tar.gz/Authlib-1.2.1/authlib/oauth1/client.py | from authlib.common.urls import (
url_decode,
add_params_to_uri,
urlparse,
)
from authlib.common.encoding import json_loads
from .rfc5849 import (
SIGNATURE_HMAC_SHA1,
SIGNATURE_TYPE_HEADER,
ClientAuth,
)
class OAuth1Client(object):
auth_class = ClientAuth
def __init__(self, session, client_id, client_secret=None,
token=None, token_secret=None,
redirect_uri=None, rsa_key=None, verifier=None,
signature_method=SIGNATURE_HMAC_SHA1,
signature_type=SIGNATURE_TYPE_HEADER,
force_include_body=False, realm=None, **kwargs):
if not client_id:
raise ValueError('Missing "client_id"')
self.session = session
self.auth = self.auth_class(
client_id, client_secret=client_secret,
token=token, token_secret=token_secret,
redirect_uri=redirect_uri,
signature_method=signature_method,
signature_type=signature_type,
rsa_key=rsa_key,
verifier=verifier,
realm=realm,
force_include_body=force_include_body
)
self._kwargs = kwargs
@property
def redirect_uri(self):
return self.auth.redirect_uri
@redirect_uri.setter
def redirect_uri(self, uri):
self.auth.redirect_uri = uri
@property
def token(self):
return dict(
oauth_token=self.auth.token,
oauth_token_secret=self.auth.token_secret,
oauth_verifier=self.auth.verifier
)
@token.setter
def token(self, token):
"""This token setter is designed for an easy integration for
OAuthClient. Make sure both OAuth1Session and OAuth2Session
have token setters.
"""
if token is None:
self.auth.token = None
self.auth.token_secret = None
self.auth.verifier = None
elif 'oauth_token' in token:
self.auth.token = token['oauth_token']
if 'oauth_token_secret' in token:
self.auth.token_secret = token['oauth_token_secret']
if 'oauth_verifier' in token:
self.auth.verifier = token['oauth_verifier']
else:
message = 'oauth_token is missing: {!r}'.format(token)
self.handle_error('missing_token', message)
def create_authorization_url(self, url, request_token=None, **kwargs):
"""Create an authorization URL by appending request_token and optional
kwargs to url.
This is the second step in the OAuth 1 workflow. The user should be
redirected to this authorization URL, grant access to you, and then
be redirected back to you. The redirection back can either be specified
during client registration or by supplying a callback URI per request.
:param url: The authorization endpoint URL.
:param request_token: The previously obtained request token.
:param kwargs: Optional parameters to append to the URL.
:returns: The authorization URL with new parameters embedded.
"""
kwargs['oauth_token'] = request_token or self.auth.token
if self.auth.redirect_uri:
kwargs['oauth_callback'] = self.auth.redirect_uri
return add_params_to_uri(url, kwargs.items())
def fetch_request_token(self, url, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the first step in the OAuth 1 workflow. A request token is
obtained by making a signed post request to url. The token is then
parsed from the application/x-www-form-urlencoded response and ready
to be used to construct an authorization url.
:param url: Request Token endpoint.
:param kwargs: Extra parameters to include for fetching token.
:return: A Request Token dict.
"""
return self._fetch_token(url, **kwargs)
def fetch_access_token(self, url, verifier=None, **kwargs):
"""Method for fetching an access token from the token endpoint.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
:param url: Access Token endpoint.
:param verifier: A verifier string to prove authorization was granted.
:param kwargs: Extra parameters to include for fetching access token.
:return: A token dict.
"""
if verifier:
self.auth.verifier = verifier
if not self.auth.verifier:
self.handle_error('missing_verifier', 'Missing "verifier" value')
return self._fetch_token(url, **kwargs)
def parse_authorization_response(self, url):
"""Extract parameters from the post authorization redirect
response URL.
:param url: The full URL that resulted from the user being redirected
back from the OAuth provider to you, the client.
:returns: A dict of parameters extracted from the URL.
"""
token = dict(url_decode(urlparse.urlparse(url).query))
self.token = token
return token
def _fetch_token(self, url, **kwargs):
resp = self.session.post(url, auth=self.auth, **kwargs)
token = self.parse_response_token(resp.status_code, resp.text)
self.token = token
self.auth.verifier = None
return token
def parse_response_token(self, status_code, text):
if status_code >= 400:
message = (
"Token request failed with code {}, "
"response was '{}'."
).format(status_code, text)
self.handle_error('fetch_token_denied', message)
try:
text = text.strip()
if text.startswith('{'):
token = json_loads(text)
else:
token = dict(url_decode(text))
except (TypeError, ValueError) as e:
error = (
"Unable to decode token from token response. "
"This is commonly caused by an unsuccessful request where"
" a non urlencoded error message is returned. "
"The decoding error was {}"
).format(e)
raise ValueError(error)
return token
@staticmethod
def handle_error(error_type, error_description):
raise ValueError('{}: {}'.format(error_type, error_description)) | PypiClean |
/Django-Yummy-0.2.6.tar.gz/Django-Yummy-0.2.6/yummy/migrations/0012_auto__del_consumer.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Consumer'
db.delete_table('yummy_consumer')
def backwards(self, orm):
# Adding model 'Consumer'
db.create_table('yummy_consumer', (
('title', self.gf('django.db.models.fields.CharField')(max_length=64)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=64, unique=True)),
))
db.send_create_signal('yummy', ['Consumer'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'ordering': "['namespace', 'name']", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'yummy.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Category']", 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Photo']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'yummy.cookbook': {
'Meta': {'unique_together': "(('owner', 'slug'),)", 'object_name': 'CookBook'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'recipes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['yummy.Recipe']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'yummy.cookingtype': {
'Meta': {'object_name': 'CookingType'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'})
},
'yummy.cuisine': {
'Meta': {'object_name': 'Cuisine'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'})
},
'yummy.ingredient': {
'Meta': {'object_name': 'Ingredient'},
'default_unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'genitive': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.IngredientGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ndb_no': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'})
},
'yummy.ingredientgroup': {
'Meta': {'object_name': 'IngredientGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
'yummy.ingredientinrecipe': {
'Meta': {'unique_together': "(('recipe', 'order'),)", 'object_name': 'IngredientInRecipe'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.IngredientInRecipeGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingredient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Ingredient']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Recipe']"}),
'unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'yummy.ingredientinrecipegroup': {
'Meta': {'unique_together': "(('recipe', 'order'),)", 'object_name': 'IngredientInRecipeGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Recipe']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'yummy.photo': {
'Meta': {'object_name': 'Photo'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255'}),
'is_redaction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'yummy.recipe': {
'Meta': {'object_name': 'Recipe'},
'caloric_value': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Category']"}),
'cooking_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.CookingType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'cuisines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['yummy.Cuisine']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'difficulty': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '3', 'db_index': 'True'}),
'hint': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'preparation': ('django.db.models.fields.TextField', [], {}),
'preparation_time': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'price': ('django.db.models.fields.SmallIntegerField', [], {'default': '3', 'db_index': 'True'}),
'servings': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
'yummy.recipephoto': {
'Meta': {'unique_together': "(('recipe', 'photo'), ('recipe', 'order'))", 'object_name': 'RecipePhoto'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1', 'db_index': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Photo']"}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Recipe']"})
},
'yummy.reciperecommendation': {
'Meta': {'object_name': 'RecipeRecommendation'},
'day_from': ('django.db.models.fields.DateField', [], {}),
'day_to': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipe': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['yummy.Recipe']"})
},
'yummy.unitconversion': {
'Meta': {'unique_together': "(('from_unit', 'to_unit'),)", 'object_name': 'UnitConversion'},
'from_unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ratio': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '5'}),
'to_unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'yummy.weekmenu': {
'Meta': {'unique_together': "(('day', 'even_week'),)", 'object_name': 'WeekMenu'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'dessert': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_dessert'", 'null': 'True', 'to': "orm['yummy.Recipe']"}),
'even_week': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_meal'", 'null': 'True', 'to': "orm['yummy.Recipe']"}),
'soup': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'menu_soup'", 'null': 'True', 'to': "orm['yummy.Recipe']"})
}
}
complete_apps = ['yummy'] | PypiClean |
/Gnotty-0.2.7.tar.gz/Gnotty-0.2.7/gnotty/server.py |
from __future__ import with_statement
from gevent import monkey, spawn, sleep
monkey.patch_all()
from Cookie import SimpleCookie
from cgi import FieldStorage
from logging import getLogger, StreamHandler
from mimetypes import guess_type
import os
import sys
from tempfile import gettempdir
from traceback import format_exc
from daemon import daemonize
from socketio import socketio_manage
from socketio.server import SocketIOServer
from socketio.namespace import BaseNamespace
from gnotty.client import WebSocketIRCClient
from gnotty.conf import settings
HTTP_STATUS_TEXT = {
200: "OK",
301: "MOVED PERMANENTLY",
401: "UNAUTHORIZED",
404: "NOT FOUND",
500: "INTERNAL SERVER ERROR",
}
class IRCNamespace(BaseNamespace):
"""
gevent-socketio namespace that's bridged with an IRC client.
"""
def on_start(self, host, port, channel, nickname, password):
"""
A WebSocket session has started - create a greenlet to host
the IRC client, and start it.
"""
self.client = WebSocketIRCClient(host, port, channel, nickname,
password, self)
self.spawn(self.client.start)
def on_message(self, message):
"""
Message received from a WebSocket - send it to the IRC channel.
"""
if hasattr(self, "client"):
self.client.emit_message(message)
def disconnect(self, *args, **kwargs):
"""
WebSocket was disconnected - leave the IRC channel.
"""
quit_message = "%s %s" % (settings.GNOTTY_VERSION_STRING,
settings.GNOTTY_PROJECT_URL)
self.client.connection.quit(quit_message)
super(IRCNamespace, self).disconnect(*args, **kwargs)
class IRCApplication(object):
def __init__(self, django=False):
"""
Loads and starts the IRC bot for the entire application.
"""
self.django = django
self.bot = None
if settings.BOT_CLASS:
module_name, class_name = settings.BOT_CLASS.rsplit(".", 1)
__import__(module_name)
bot_class = getattr(sys.modules[module_name], class_name)
self.bot = bot_class(settings.IRC_HOST, settings.IRC_PORT,
settings.IRC_CHANNEL, settings.BOT_NICKNAME,
settings.BOT_PASSWORD)
spawn(self.bot.start)
spawn(self.bot_watcher)
self.logger = getLogger("irc.webhooks")
self.logger.setLevel(settings.LOG_LEVEL)
self.logger.addHandler(StreamHandler())
def bot_watcher(self):
"""
Thread (greenlet) that will try and reconnect the bot if
it's not connected.
"""
default_interval = 5
interval = default_interval
while True:
if not self.bot.connection.connected:
if self.bot.reconnect():
interval = default_interval
else:
interval *= 2
sleep(interval)
def respond_webhook(self, environ):
"""
Passes the request onto a bot with a webhook if the webhook
path is requested.
"""
request = FieldStorage(fp=environ["wsgi.input"], environ=environ)
url = environ["PATH_INFO"]
params = dict([(k, request[k].value) for k in request])
try:
if self.bot is None:
raise NotImplementedError
response = self.bot.handle_webhook_event(environ, url, params)
except NotImplementedError:
return 404
except:
self.logger.debug(format_exc())
return 500
return response or 200
def respond_static(self, environ):
"""
Serves a static file when Django isn't being used.
"""
path = os.path.normpath(environ["PATH_INFO"])
if path == "/":
content = self.index()
content_type = "text/html"
else:
path = os.path.join(os.path.dirname(__file__), path.lstrip("/"))
try:
with open(path, "r") as f:
content = f.read()
except IOError:
return 404
content_type = guess_type(path)[0]
return (200, [("Content-Type", content_type)], content)
def index(self):
"""
Loads the chat interface template when Django isn't being
used, manually dealing with the Django template bits.
"""
root_dir = os.path.dirname(__file__)
template_dir = os.path.join(root_dir, "templates", "gnotty")
with open(os.path.join(template_dir, "base.html"), "r") as f:
base = f.read()
with open(os.path.join(template_dir, "chat.html"), "r") as f:
base = base.replace("{% block content %}", f.read())
replace = {
"{% block content %}": "",
"{% block extrahead %}": "",
"{% endblock %}": "",
"{% load gnotty_tags %}": "",
"{% extends \"gnotty/base.html\" %}": "",
"{% url gnotty_chat %}": "/",
"{% gnotty_nav %}": "",
"{% templatetag openvariable %}": "{{",
"{% templatetag closevariable %}": "}}",
}
for k, v in replace.items():
base = base.replace(k, v)
for k, v in settings.items():
base = base.replace("{{ %s }}" % k, unicode(v or ""))
return base
def respond_django(self, environ):
"""
Tries to redirect to a Django app if someone accesses an
invalid URL when Django is being used.
"""
environ["port"] = ""
if environ["SERVER_NAME"] in ("127.0.0.1", "localhost"):
environ["port"] = ":8000"
location = ("%(wsgi.url_scheme)s://" +
"%(SERVER_NAME)s%(port)s%(PATH_INFO)s") % environ
return (301, [("Location", location)], None)
def respond_unauthorized(self, environ):
"""
Just return unauthorized HTTP status if the
``unauthorized`` method returns ``True`` inside
``__call__``.
"""
return 401
def authorized(self, environ):
"""
If we're running Django and ``GNOTTY_LOGIN_REQUIRED`` is set
to ``True``, pull the session cookie from the environment and
validate that the user is authenticated.
"""
if self.django and settings.LOGIN_REQUIRED:
try:
from django.conf import settings as django_settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.core.exceptions import ObjectDoesNotExist
cookie = SimpleCookie(environ["HTTP_COOKIE"])
cookie_name = django_settings.SESSION_COOKIE_NAME
session_key = cookie[cookie_name].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (ImportError, KeyError, ObjectDoesNotExist):
return False
return True
def __call__(self, environ, start_response):
"""
WSGI application handler.
"""
authorized = self.authorized(environ)
path = environ["PATH_INFO"]
if path.startswith("/socket.io/") and authorized:
socketio_manage(environ, {"": IRCNamespace})
return
if not authorized:
dispatch = self.respond_unauthorized
elif path.startswith("/webhook/"):
dispatch = self.respond_webhook
elif self.django:
dispatch = self.respond_django
else:
dispatch = self.respond_static
response = dispatch(environ)
if isinstance(response, int):
response = (response, [], None)
elif isinstance(response, basestring):
response = (200, [], response)
status, headers, content = response
status_text = HTTP_STATUS_TEXT.get(status, "")
headers.append(("Server", settings.GNOTTY_VERSION_STRING))
start_response("%s %s" % (status, status_text), headers)
if content is None:
if status == 200:
content = ""
else:
content = "<h1>%s</h1>" % status_text.title()
return [content]
def serve_forever(django=False):
"""
Starts the gevent-socketio server.
"""
logger = getLogger("irc.dispatch")
logger.setLevel(settings.LOG_LEVEL)
logger.addHandler(StreamHandler())
app = IRCApplication(django)
server = SocketIOServer((settings.HTTP_HOST, settings.HTTP_PORT), app)
print "%s [Bot: %s] listening on %s:%s" % (
settings.GNOTTY_VERSION_STRING,
app.bot.__class__.__name__,
settings.HTTP_HOST,
settings.HTTP_PORT,
)
server.serve_forever()
def kill(pid_file):
"""
Attempts to shut down a previously started daemon.
"""
try:
with open(pid_file) as f:
os.kill(int(f.read()), 9)
os.remove(pid_file)
except (IOError, OSError):
return False
return True
def run():
"""
CLI entry point. Parses args and starts the gevent-socketio server.
"""
settings.parse_args()
pid_name = "gnotty-%s-%s.pid" % (settings.HTTP_HOST, settings.HTTP_PORT)
pid_file = settings.PID_FILE or os.path.join(gettempdir(), pid_name)
if settings.KILL:
if kill(pid_file):
print "Daemon killed"
else:
print "Could not kill any daemons"
return
elif kill(pid_file):
print "Running daemon killed"
if settings.DAEMON:
daemonize(pid_file)
serve_forever()
if __name__ == "__main__":
run() | PypiClean |
/NNTools-0.1.0.tar.gz/NNTools-0.1.0/nntools/dataset/tools.py | class CacheBullet:
def __call__(self, **kwargs):
return kwargs
class Composition:
def __init__(self):
self.ops = []
self.deactivated = []
self._index_bullet = 0
def add(self, *funcs):
for f in funcs:
self.ops.append(f)
return self
def deactivate_op(self, index):
if not isinstance(index, list):
index = [index]
self.deactivated += index
def __call__(self, **kwargs):
for i, op in enumerate(self.ops):
if i in self.deactivated:
continue
if (isinstance(op, CacheBullet)):
continue
kwargs = op(**kwargs)
return kwargs
def __lshift__(self, other):
return self.add(other)
def precache_call(self, **kwargs):
if not self.has_bullet_cache:
return kwargs
else:
for i, op in enumerate(self.ops):
if i in self.deactivated:
continue
if i>=self._index_bullet:
break
kwargs = op(**kwargs)
return kwargs
def postcache_call(self, **kwargs):
if not self.has_bullet_cache:
return self(**kwargs)
else:
for i, op in enumerate(self.ops):
if i<=self._index_bullet or i in self.deactivated:
continue
else:
kwargs = op(**kwargs)
return kwargs
@property
def has_bullet_cache(self):
for i, op in enumerate(self.ops):
if isinstance(op, CacheBullet):
self._index_bullet = i
return True
self._index_bullet = 0
return False
def __str__(self):
output = ''
for i, o in enumerate(self.ops):
output += '%i_' % i + str(o) + ' STATUS: ' + ('Active' if i not in self.deactivated else 'Inactive') + ' \n'
return output
def __repr__(self):
return str(self)
def __len__(self):
return len(self.ops) | PypiClean |
/Flask-Philo-3.7.1.tar.gz/Flask-Philo-3.7.1/flask_philo/cloud/aws/sqs.py | from .utils import run_action
from prettytable import PrettyTable
import argparse
import boto3
import boto3.ec2
import json
import uuid
app = None
def send_message(queue_url, message_body, region=None):
client = boto3.client('sqs', region_name=region)
return client.send_message(QueueUrl=queue_url, MessageBody=message_body)
def send_message_batch(queue_url, entries, region=None):
client = boto3.client('sqs', region_name=region)
return client.send_message_batch(
QueueUrl=queue_url, Entries=entries)
def list_queues():
region_queues = {}
for r in boto3.client('ec2', 'us-west-2').describe_regions()['Regions']:
region = r['RegionName']
client = boto3.client('sqs', region_name=region)
try:
queues = client.list_queues()
if queues:
region_queues[region] = queues
except Exception as e:
app.logger.info(e)
return region_queues
def create_queue(queue_name, region=None, attributes={}):
client = boto3.client('sqs', region_name=region)
return client.create_queue(QueueName=queue_name, Attributes=attributes)
def receive_message(queue_url, max_number_of_messages=1, region=None):
client = boto3.client('sqs', region_name=region)
result = client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=max_number_of_messages)
return result
def delete_message(queue_url, receipt_handle, region=None):
client = boto3.client('sqs', region_name=region)
result = client.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
return result
def purge_queue(queue_url, region=None):
client = boto3.client('sqs', region_name=region)
result = client.purge_queue(
QueueUrl=queue_url,
)
return result
def delete_queue(queue_url, region=None):
client = boto3.client('sqs', region_name=region)
result = client.delete_queue(
QueueUrl=queue_url,
)
return result
def run(dapp, cmd):
global app
app = dapp
def list_queues_cmd():
t = PrettyTable([
'Queue Url', 'Region'
])
for region, data in list_queues().items():
if 'QueueUrls' in data:
for q in data['QueueUrls']:
t.add_row([q, region])
print(t)
def send_message_batch_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
parser.add_argument('--entries', required=True)
args, extra_params = parser.parse_known_args()
entries = [
{'Id': str(uuid.uuid4()), 'MessageBody': str(entry)}
for entry in json.loads(args.entries)]
print(
send_message_batch(
args.queue_url, entries, region=args.region))
def send_message_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
parser.add_argument('--message_body', required=True)
args, extra_params = parser.parse_known_args()
print(
send_message(
args.queue_url, args.message_body, region=args.region))
def create_queue_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_name', required=True)
parser.add_argument('--json_attributes', required=False, default='{}')
args, extra_params = parser.parse_known_args()
print(
create_queue(
args.queue_name, region=args.region,
attributes=json.loads(args.json_attributes)))
def receive_message_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
parser.add_argument(
'--max_number_of_messages', required=False, default=1, type=int)
args, extra_params = parser.parse_known_args()
print(
receive_message(
args.queue_url,
max_number_of_messages=args.max_number_of_messages,
region=args.region))
def delete_message_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
parser.add_argument('--receipt_handle', required=True)
args, extra_params = parser.parse_known_args()
print(
delete_message(
args.queue_url, args.receipt_handle, region=args.region)
)
def purge_queue_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
args, extra_params = parser.parse_known_args()
print(purge_queue(args.queue_url, region=args.region))
def delete_queue_cmd():
parser = argparse.ArgumentParser()
parser.add_argument(
'--region', required=True, help='AWS Region')
parser.add_argument('--queue_url', required=True)
args, extra_params = parser.parse_known_args()
print(delete_queue(args.queue_url, region=args.region))
actions = {
'create_queue': create_queue_cmd,
'list_queues': list_queues_cmd,
'send_message': send_message_cmd,
'send_message_batch': send_message_batch_cmd,
'receive_message': receive_message_cmd,
'delete_message': delete_message_cmd,
'purge_queue': purge_queue_cmd,
'delete_queue': delete_queue_cmd
}
run_action(actions, cmd) | PypiClean |
/JRPC-utils-0.2.1.tar.gz/JRPC-utils-0.2.1/jrpcutils.py | import json
import inspect
JSONRPC_VERSION = "2.0"
PARSE_ERROR_CODE = -32700
INVALID_REQUEST_CODE = -32600
METHOD_NOT_FOUND_CODE = -32601
INVALID_PARAMS_CODE = 32602
INTERNAL_ERROR_CODE = 32603
SERVER_ERROR_CODE = -32000
class JsonRpcException(Exception):
"""
Common base class for all json rpc exceptions.
"""
def __init__(self, message=None):
if message is not None:
self.message = message
class ParseError(JsonRpcException):
"""
ParseError. Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text.
"""
code = PARSE_ERROR_CODE
message = 'ParseError'
class InvalidRequest(JsonRpcException):
"""
Invalid Request. The JSON sent is not a valid Request object.
"""
code = INVALID_REQUEST_CODE
message = 'Invalid Request'
class MethodNotFound(JsonRpcException):
"""
Method not found. The method does not exist / is not available.
"""
code = METHOD_NOT_FOUND_CODE
message = 'Method not found'
class InvalidParams(JsonRpcException):
"""
Invalid method parameter(s).
"""
code = INVALID_PARAMS_CODE
message = 'Invalid params'
class InternalError(JsonRpcException):
"""
Internal JSON-RPC error.
"""
code = INTERNAL_ERROR_CODE
message = 'JSON-RPC Internal Error'
class ServerError(JsonRpcException):
"""
Code: - 32000 to - 32099 Server error Reserved for implementation - defined server - errors.
For example:
class MyFirstServerError(ServerError):
CODE = -32789
class MySecondServerError(ServerError):
CODE = -32789
"""
code = SERVER_ERROR_CODE
message = 'Server error'
class JsonRpcRequest(object):
"""
JSON RPC Request
"""
version = None
method = None
params = None
id = None
def __init__(self, raw_request):
"""
This method forms a JsonRpcRequest object (self) from json formatted string.
"""
try:
data = json.loads(raw_request)
except ValueError:
raise ParseError()
if not isinstance(data, dict):
raise InvalidRequest("Invalid request format.")
request_id = data.get("id")
if request_id is None:
raise InvalidRequest("The id must be specified.")
if not isinstance(request_id, int):
raise InvalidRequest("The id must be integer.")
self.id = request_id
version = data.get("jsonrpc")
if version != JSONRPC_VERSION:
raise InvalidRequest("The version must be specified.")
self.version = JSONRPC_VERSION
method = data.get("method")
if method is None:
raise InvalidRequest("The method must be specified.")
if not isinstance(method, (str, unicode,)):
raise InvalidRequest("The method must be string.")
if method == "":
raise InvalidRequest("The method must not be an empty string.")
self.method = method
params = data.get("params")
if params is not None and not isinstance(params, (dict, list,)):
raise InvalidRequest("Invalid params format.")
self.params = params
def get_json_representation(self, without_serialization=False):
request = {
"jsonrpc": self.version,
"method": self.method,
"params": self.params,
"id": self.id,
}
if without_serialization:
return request
return json.dumps(request)
def __unicode__(self):
return self.get_json_representation()
def __str__(self):
return self.get_json_representation()
class JsonRpcResponse(object):
"""
Json Rpc Response
"""
id = None
error_code = None
error_message = None
result = None
def __init__(self, result=None, request_id=None, error_code=None, error_message=None):
if result is None and error_code is None:
raise Exception("You must specify result or error_code.")
self.id = request_id
self.error_code = error_code
self.error_message = error_message
self.result = result
def get_json_representation(self, without_serialization=False):
response = {
"jsonrpc": JSONRPC_VERSION,
"id": self.id,
}
if self.error_code is None:
response.update({
"result": self.result,
})
else:
response.update({
"error": {
"code": self.error_code,
"message": self.error_message
},
})
if without_serialization:
return response
return json.dumps(response)
def __unicode__(self):
return self.get_json_representation()
def __str__(self):
return self.get_json_representation()
def get_id(raw_request):
"""
This function tries to get id from raw_request.
"""
try:
data = json.loads(raw_request)
except ValueError:
return None
request_id = data.get("id")
if not isinstance(request_id, int):
return None
return request_id
route_paths = dict()
def resolver(method):
try:
func_or_class = route_paths[method]
except KeyError:
raise MethodNotFound()
else:
return func_or_class
def procedure(path):
def wrap_func(f):
route_paths[path] = f
return f
return wrap_func
class Param(object):
validators = [] # a list of validators that the param value will be passed through
def __init__(self, required=True, validators=[]):
self.required = required
self.validators.extend(validators)
def to_python(self, value):
"""
Overriding this functions you can add some extra transformation
for param's value before the value will be assign to the corresponding Procedure class's field.
"""
return value
def validate(self, name, value):
for validator in self.validators:
validator(name, value)
class AutoDiscoverProcedure(type):
"""
This metaclass adds a routing rule (method -> class) every time Procedure's subclass creates.
"""
def __new__(mcs, name, bases, dict):
new_class = type.__new__(mcs, name, bases, dict)
if 'Meta' not in dict:
raise Exception('You must specify inner class Meta for class {}'.format(name))
meta = dict['Meta']
if not meta.__dict__.get('abstract'):
if 'method' not in meta.__dict__:
raise Exception('You must specify method\'s name for class {} in inner class Meta'.format(name))
route_paths[meta.__dict__['method']] = new_class
return new_class
class JsonRpcProcedure(object):
__metaclass__ = AutoDiscoverProcedure
def handle(self, params):
self.build_params(params)
return self.proceed()
def build_params(self, params):
for field in self.__class__.__dict__:
if isinstance(self.__class__.__dict__[field], Param):
param = self.__class__.__dict__[field]
if param.required and field not in params:
raise InvalidParams('There is not a required param {} in the request'.format(field))
elif not param.required and field not in params:
setattr(self, field, None)
else:
value = param.to_python(params[field])
param.validate(field, value)
setattr(self, field, value)
def proceed(self):
return 'not implemented'
class Meta:
abstract = True
def dispatch(raw_request, resolver_func):
try:
request = JsonRpcRequest(raw_request)
method = resolver_func(request.method)
if inspect.isfunction(method):
result = method(request.params)
elif inspect.isclass(method) and issubclass(method, JsonRpcProcedure):
result = method().handle(request.params)
else:
raise Exception('Incorrect handler for method {}'.format(request.method))
response = JsonRpcResponse(request_id=request.id, result=result)
except JsonRpcException as e:
response = JsonRpcResponse(request_id=get_id(raw_request), error_code=e.code, error_message=e.message)
return response
def only_integer(name, value):
if not isinstance(value, int):
raise InvalidParams("Param {} must be an integer".format(name))
def only_string(name, value):
if not isinstance(value, (str, unicode,)):
raise InvalidParams("Param {} must be a string".format(name))
def only_list(name, value):
if not isinstance(value, list):
raise InvalidParams("Param {} must be a list".format(name))
class IntParam(Param):
validators = [only_integer, ]
class StringParam(Param):
validators = [only_string, ]
class ListParam(Param):
validators = [only_list, ] | PypiClean |
/IdracRedfishSupport-0.0.8.tar.gz/IdracRedfishSupport-0.0.8/SupportAssistCollectionNetworkShareREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description="Python script using Redfish API with OEM extension to either export support assist (SA) collection to a network share or get/accept/register End User license agreement (EULA). NOTE: the SA file copied to your network share will be in ZIP format using your server service tag in the name. Example of SA report file name \"TSR20200122131132_M538C3S.zip\"")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--accept', help='Accept support assist end user license agreement (EULA)', action="store_true", required=False)
parser.add_argument('--get', help='Get support assist end user license agreement (EULA)', action="store_true", required=False)
parser.add_argument('--register', help='Register Support Assist for iDRAC. NOTE: You must also pass in city, company name, country, first name, first email, last name, phone number, street, state and zip arguments to register. NOTE: ISM must be installed and running on the operating system before you register SA.', action="store_true", required=False)
parser.add_argument('--export-network', help='Export Support Assist collection to network share. NOTE: Make sure you also use arguments ipaddress, sharetype, sharename and dataselectorarrayin for export to network share. If using CIFS, you need to also use username and password arguments.', dest="export_network", action="store_true", required=False)
parser.add_argument('--export-last', help='Export Support Assist last collection stored on iDRAC to network share. NOTE: Make sure you also use arguments --shareip, --sharetype and --sharename.', dest="export_last", action="store_true", required=False)
parser.add_argument('--city', help='Pass in city name to register Support Assist', required=False)
parser.add_argument('--companyname', help='Pass in company name to register Support Assist', required=False)
parser.add_argument('--country', help='Pass in country to register Support Assist', required=False)
parser.add_argument('--first-email', help='Pass in primary (first) email address to register Support Assist', dest="first_email", required=False)
parser.add_argument('--firstname', help='Pass in firstname to register Support Assist', required=False)
parser.add_argument('--lastname', help='Pass in lastname to register Support Assist', required=False)
parser.add_argument('--phonenumber', help='Pass in phone number to register Support Assist', required=False)
parser.add_argument('--second-firstname', help='Pass in firstname of the secondary contact to register Support Assist', dest="second_firstname", required=False)
parser.add_argument('--second-lastname', help='Pass in lastname of the secondary contact to register Support Assist', dest="second_lastname", required=False)
parser.add_argument('--second-phonenumber', help='Pass in phone number of the secondary contact to register Support Assist', dest="second_phonenumber", required=False)
parser.add_argument('--second-email', help='Pass in email address of the secondary contact to register Support Assist', dest="second_email", required=False)
parser.add_argument('--street', help='Pass in street name to register Support Assist', required=False)
parser.add_argument('--state', help='Pass in state to register Support Assist', required=False)
parser.add_argument('--zip', help='Pass in zipcode to register Support Assist', required=False)
parser.add_argument('--shareip', help='Pass in the IP address of the network share', required=False)
parser.add_argument('--sharetype', help='Pass in the share type of the network share. Supported values are NFS, CIFS, HTTP, HTTPS, FTP, TFTP', required=False)
parser.add_argument('--sharename', help='Pass in the network share share name', required=False)
parser.add_argument('--username', help='Pass in network share username if auth is configured (this is required for CIFS, optional for HTTP and HTTPS)', required=False)
parser.add_argument('--password', help='Pass in network share username password if auth is configured (this is required for CIFS, optional for HTTP and HTTPS)', required=False)
parser.add_argument('--data', help='Pass in a value for the type of data you want to collect. Supported values are: pass in 0 for \"DebugLogs\", pass in 1 for "HWData\", pass in 2 for \"OSAppData\", pass in 3 for \"TTYLogs\", pass in 4 for \"TelemetryReports\". Note: If you do not pass in this argument, default settings will collect HWData. Note: You can pass in one value or multiple values to collect. If you pass in multiple values, use comma separator for the values (Example: 0,3)', required=False)
parser.add_argument('--filter', help='Filter personal identification information (PII) for Support Assist collection. Supported values are: 0 for \"No\" and 1 for \"Yes\". NOTE: If you don\'t pass in this argument, no filtering is performed for the collection.', required=False)
args = vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- SupportAssistCollectionNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --get, this example will get SA EULA current status.
\n- SupportAssistCollectionNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --accept, this example will accept SA EULA.
\n- SupportAssistCollectionNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --register --city Austin --state Texas --zip 78665 --companyname Dell --country US --firstname test --lastname tester --phonenumber "512-123-4567" --first-email \"tester1@yahoo.com\" --second-email \"tester2@gmail.com\" --street \"1234 One Dell Way\", this example shows registering SupportAssist.
\n- SupportAssistCollectionNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --export-network --shareip 192.168.0.130 --sharetype HTTP --sharename http_share --data 3, this example wil export SA collection for storage TTYlogs only to HTTP share.
\n- SupportAssistCollectionNetworkShareREDFISH.py -ip 192.168.0.120 -u root -p calvin --export-last --shareip 192.168.0.130 --sharetype HTTP --sharename http_share, this example will export last cached SupportAssist collection to network share.""")
sys.exit(0)
def check_supported_idrac_version():
supported = ""
if args["x"]:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
if response.__dict__['reason'] == "Unauthorized":
logging.error("\n- FAIL, unauthorized to execute Redfish command. Check to make sure you are passing in correct iDRAC username/password and the IDRAC user has the correct privileges")
sys.exit(0)
data = response.json()
supported = "no"
for i in data['Actions'].keys():
if "SupportAssistCollection" in i:
supported = "yes"
if supported == "no":
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def support_assist_accept_EULA():
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistAcceptEULA' % (idrac_ip)
method = "SupportAssistAcceptEULA"
payload = {}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 202 or response.status_code == 200:
logging.debug("- PASS, POST command passed to accept EULA")
else:
data = response.json()
logging.error("\n- FAIL, status code %s returned, detailed error information:\n %s" % (response.status_code, data))
sys.exit(0)
logging.info("\n- PASS, %s method passed and End User License Agreement (EULA) has been accepted" % method)
def support_assist_get_EULA_status():
logging.info("\n- Current Support Assist End User License Agreement Information -\n")
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistGetEULAStatus' % (idrac_ip)
method = "SupportAssistGetEULAStatus"
payload = {}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
for i in data.items():
if not "ExtendedInfo" in i[0]:
print("%s: %s" % (i[0],i[1]))
def support_assist_register():
url = 'https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Attributes' % idrac_ip
payload = {"Attributes":{"OS-BMC.1.AdminState":"Enabled"}}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
statusCode = response.status_code
data = response.json()
if statusCode != 200:
logging.error("\n- FAIL, Command failed for action %s, status code is: %s\n" % (args["s"].upper(),statusCode))
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistRegister' % (idrac_ip)
method = "SupportAssistRegister"
payload = {"City": args["city"], "CompanyName": args["companyname"], "Country":args["country"], "PrimaryFirstName":args["firstname"],"PrimaryLastName":args["lastname"], "PrimaryPhoneNumber":args["phonenumber"], "State":args["state"], "Street1": args["street"],"Zip":args["zip"]}
if args["first_email"]:
payload["PrimaryEmail"] = args["first_email"]
if args["second_email"]:
payload["SecondaryEmail"] = args["second_email"]
if args["second_firstname"]:
payload["SecondaryFirstName"] = args["second_firstname"]
if args["second_lastname"]:
payload["SecondaryLastName"] = args["second_lastname"]
if args["second_phonenumber"]:
payload["SecondaryPhoneNumber"] = args["second_phonenumber"]
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 200 or response.status_code == 202:
logging.info("\n- PASS, SupportAssistRegister action passed, status code %s returned" % response.status_code)
else:
logging.error("\n- FAIL, SupportAssistRegister action failed, status code %s returned. Detailed error results:\n" % response.status_code)
data = response.__dict__
print(data["_content"])
sys.exit(0)
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistGetEULAStatus' % (idrac_ip)
method = "SupportAssistGetEULAStatus"
payload = {}
logging.info("- INFO, validating if Support Assist is registered for iDRAC")
time.sleep(15)
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if data["IsRegistered"] == "Registered":
logging.info("\n- PASS, Support Assist verified as registered")
else:
logging.error("\n- FAIL, Support Assist not registered, current status is: %s" % data["IsRegistered"])
sys.exit(0)
def export_support_assist_colection_network_share():
global job_id
if args["export_network"]:
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistCollection' % (idrac_ip)
method = "SupportAssistCollection"
elif args["export_last"]:
url = 'https://%s/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.SupportAssistExportLastCollection' % (idrac_ip)
method = "SupportAssistExportLastCollection"
payload = {}
if args["filter"]:
if args["filter"] == "0":
payload["Filter"] = "No"
elif args["filter"] == "1":
payload["Filter"] = "Yes"
if args["shareip"]:
payload["IPAddress"] = args["shareip"]
if args["sharetype"]:
payload["ShareType"] = args["sharetype"]
if args["sharename"]:
payload["ShareName"] = args["sharename"]
if args["username"]:
payload["UserName"] = args["username"]
if args["password"]:
payload["Password"] = args["password"]
if args["data"]:
data_selector_values=[]
if "," in args["data"]:
data_selector = [i for i in args["data"].split(",")]
if "0" in data_selector:
data_selector_values.append("DebugLogs")
if "1" in data_selector:
data_selector_values.append("HWData")
if "2" in data_selector:
data_selector_values.append("OSAppData")
if "3" in data_selector:
data_selector_values.append("TTYLogs")
if "4" in data_selector:
data_selector_values.append("TelemetryReports")
payload["DataSelectorArrayIn"] = data_selector_values
else:
if args["data"] == "0":
data_selector_values.append("DebugLogs")
if args["data"] == "1":
data_selector_values.append("HWData")
if args["data"] == "2":
data_selector_values.append("OSAppData")
if args["data"] == "3":
data_selector_values.append("TTYLogs")
if args["data"] == "4":
data_selector_values.append("TelemetryReports")
payload["DataSelectorArrayIn"] = data_selector_values
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if response.status_code != 202:
logging.error("\n- FAIL, status code %s returned, POST command failure results:\n %s" % (response.status_code, data))
sys.exit(0)
try:
job_id = response.headers['Location'].split("/")[-1]
except:
logging.error("- FAIL, unable to find job ID in headers POST response, headers output is:\n%s" % response.headers)
sys.exit(0)
logging.info("- PASS, job ID %s successfuly created for %s method\n" % (job_id, method))
def loop_job_status():
start_time = datetime.now()
count_number = 0
while True:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
current_time = (datetime.now()-start_time)
if response.status_code != 200:
logging.error("\n- FAIL, Command failed to check job status, return code %s" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
data = response.json()
if str(current_time)[0:7] >= "1:00:00":
logging.error("\n- FAIL: Timeout of 1 hour has been hit, script stopped\n")
sys.exit(0)
elif data['JobState'] == "CompletedWithErrors":
logging.info("\n- INFO, SA collection completed with errors, please check iDRAC Lifecycle Logs for more details")
sys.exit(0)
elif "fail" in data['Message'].lower() or "error" in data['Message'].lower():
logging.error("- FAIL: job ID %s failed, failed message is: %s" % (job_id, data['Message']))
sys.exit(0)
elif data['JobState'] == "Completed":
if data['Message'] == "The SupportAssist Collection and Transmission Operation is completed successfully.":
logging.info("\n--- PASS, Final Detailed Job Status Results ---\n")
else:
logging.error("\n--- FAIL, Final Detailed Job Status Results ---\n")
for i in data.items():
pprint(i)
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
service_tag = data['Oem']['Dell']['DellSystem']['NodeID']
logging.info("\n- SA exported log file located on your network share should be in ZIP format with server service tag \"%s\" in the file name" % service_tag)
break
else:
count_number_now = data['PercentComplete']
if count_number_now > count_number:
logging.info("- INFO, %s, percent complete: %s" % (data['Message'].strip("."), data['PercentComplete']))
count_number = count_number_now
else:
continue
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] and args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip = args["ip"]
idrac_username = args["u"]
if args["p"]:
idrac_password = args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
if args["export_network"] or args["export_last"]:
export_support_assist_colection_network_share()
loop_job_status()
elif args["accept"]:
support_assist_accept_EULA()
elif args["get"]:
support_assist_get_EULA_status()
elif args["register"] and args["city"] and args["companyname"] and args["country"] and args["email"] and args["firstname"] and args["lastname"] and args["phonenumber"] and args["state"] and args["street"] and args["zip"]:
support_assist_register()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.") | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/job/fr_FR/__init__.py | from .. import Provider as BaseProvider
# Source: ONISEP
# https://www.data.gouv.fr/fr/datasets/r/462f1a98-6f37-4414-9e08-016b78cc4d08
class Provider(BaseProvider):
jobs = [
"BIM manager",
"accessoiriste",
"accompagnant éducatif et social éducative et sociale",
"accompagnateur de tourisme équestre",
"accompagnateur de voyages",
"accompagnateur en moyenne montagne",
"acheteur",
"acheteur d'espaces publicitaires",
"actuaire",
"adjoint administratif administrative",
"administrateur de base de données",
"administrateur de biens",
"administrateur de logiciels de laboratoire",
"administrateur de mission humanitaire",
"administrateur de spectacle",
"administrateur judiciaire",
"administrateur réseaux",
"administrateur territorial",
"affûteur",
"agenceur de cuisines et salles de bains",
"agent arboricole",
"agent artistique",
"agent d'escale",
"agent d'exploitation de l'eau",
"agent de constatation des douanes",
"agent de développement des énergies renouvelables",
"agent de développement local",
"agent de développement touristique",
"agent de propreté et d'hygiène",
"agent de propreté urbaine",
"agent de sécurité",
"agent de sûreté ferroviaire",
"agent de transit",
"agent général générale d'assurances",
"agent hydrothermal hydrothermale",
"agent immobilier immobilière",
"agriculteur",
"agronome",
"aide-chimiste",
"aide-soignant",
"ajusteur-monteur",
"ambulancier",
"analyste de crédit",
"analyste financier",
"anatomiste",
"anesthésiste-réanimateur",
"animalier de laboratoire",
"animateur 2D et 3D",
"animateur d'activités physiques et sportives pour tous",
"animateur de bassin versant",
"animateur de radio et de télévision",
"animateur du patrimoine",
"animateur nature nature",
"animateur socioculturel socioculturelle",
"antiquaire",
"apiculteur",
"aquaculteur",
"architecte",
"architecte d'intérieur",
"architecte des systèmes d'information",
"architecte naval",
"architecte produit industriel",
"architecte réseaux",
"architecte web",
"archiviste",
"archéologue",
"art-thérapeute",
"artiste de cirque",
"ascensoriste",
"assistant commercial commerciale",
"assistant de gestion en PME",
"assistant de service social",
"assistant dentaire",
"assistant en architecture",
"assistant en ressources humaines",
"assistant en études de prix",
"assistant maternel maternelle",
"assistant réalisateur réalisatrice",
"astrophysicien",
"attaché commercial commerciale",
"attaché d'administration",
"attaché de presse",
"attaché de recherche clinique (ARC)",
"attaché territorial territoriale",
"audioprothésiste",
"auditeur externe",
"auditeur interne",
"auditeur qualité",
"auteur-compositeur interprète interprète",
"auxiliaire de puériculture",
"auxiliaire spécialisé vétérinaire",
"avocat",
"aérodynamicien",
"bactériologiste",
"barman",
"batelier",
"bibliothécaire",
"bijoutier-joaillier",
"bio-informaticien",
"biologiste en environnement",
"biologiste médical",
"biostatisticien",
"botaniste",
"bottier",
"boucher",
"boulanger",
"brancardier",
"brodeur",
"bronzier",
"cadreur",
"caissier",
"canalisateur",
"carreleur",
"carrossier",
"cartographe",
"chanteur",
"charcutier-traiteur",
"chargé de recherche en recrutement",
"chargé d'affaires en génie climatique",
"chargé d'affaires en génie mécanique",
"chargé d'études en marketing",
"chargé d'études en valorisation agricole des déchets",
"chargé d'études média",
"chargé d'études naturalistes",
"chargé d'études ressources humaines",
"chargé d'études économiques",
"chargé de clientèle banque",
"chargé de communication interne",
"chargé de pharmacovigilance",
"chargé de production",
"chargé de projet événementiel",
"chargé de recherche en acoustique musicale",
"chargé de recherche et développement déchets",
"chargé de référencement web",
"chargé de valorisation de la recherche",
"chargé de veille législative et réglementaire",
"chargé des méthodes outils et qualité en informatique",
"chargé des relations publiques",
"chargé hygiène sécurité environnement (HSE)",
"charpentier bois",
"charpentier métallique",
"chaudronnier",
"chauffeur de taxi",
"chef de projet packaging",
"chef comptable",
"chef d'exploitation d'usine d'incinération",
"chef d'exploitation des remontées mécaniques",
"chef de chantier",
"chef de chantier en installations électriques",
"chef de cultures légumières",
"chef de fabrication des industries graphiques",
"chef de mission humanitaire",
"chef de produit marketing",
"chef de produit technique en informatique",
"chef de produit touristique",
"chef de projet biodiversité",
"chef de projet communication digitale",
"chef de projet démantèlement nucléaire",
"chef de projet informatique",
"chef de projet multimédia",
"chef de projet sites et sols pollués",
"chef de projet web/mobile",
"chef de projet éolien",
"chef de publicité",
"chef de rayon",
"chef de station de traitement des eaux",
"chef des ventes",
"chef monteur monteuse",
"chercheur en biologie",
"chercheur en biologie du sport",
"chercheur en chimie",
"chercheur en physique",
"chirurgien",
"chirurgien-dentiste",
"chocolatier-confiseur",
"clerc d'huissier",
"climatologue",
"coffreur-boiseur",
"cogniticien",
"coiffeur",
"collaborateur de notaire de notaire",
"collecteur de fonds",
"coloriste",
"commercial export",
"commercial à bord des trains",
"commerçant en alimentation",
"commissaire de police",
"commissaire-priseur",
"community manager",
"comptable",
"comédien",
"concepteur de jeux vidéo",
"concepteur de niveaux de jeu web",
"concepteur designer packaging",
"concepteur multimédia",
"concepteur-rédacteur",
"conducteur d'engins de travaux publics",
"conducteur d'engins forestiers de récolte en entreprises de travaux forestiers",
"conducteur de bus ou d'autocar",
"conducteur de ligne de production alimentaire",
"conducteur de machine onduleuse",
"conducteur de machines agricoles",
"conducteur de machines à imprimer",
"conducteur de métro",
"conducteur de train",
"conducteur de travaux",
"conducteur de travaux agricoles",
"conducteur opérateur opératrice de scierie",
"conducteur routier routière",
"conducteur de machines à papier",
"conseiller agricole",
"conseiller d'élevage",
"conseiller en assurances",
"conseiller en environnement",
"conseiller en fusions-acquisitions",
"conseiller en génétique",
"conseiller en insertion sociale et professionnelle",
"conseiller en séjour",
"conseiller en voyages",
"conseiller en économie sociale et familiale",
"conseiller espace info-énergie",
"conseiller principal d'éducation principale d'éducation",
"conseiller pénitentiaire d'insertion et de probation",
"conseiller sportif sportive en salle de remise en forme",
"conservateur du patrimoine",
"conservateur territorial de bibliothèques",
"consignataire de navire",
"constructeur de routes",
"consultant",
"consultant SaaS",
"consultant en conduite de changement",
"consultant en informatique décisionnelle",
"consultant en management de l'innovation",
"consultant en solutions intégrées",
"consultant en systèmes d'information",
"consultant en validation",
"consultant green IT",
"consultant informatique",
"contremaître",
"contrôleur aérien aérienne",
"contrôleur de gestion",
"contrôleur de performance",
"contrôleur des douanes et droits indirects",
"contrôleur technique automobile",
"convoyeur de fonds",
"coordonnateur d'études cliniques",
"cordiste",
"cordonnier",
"correcteur",
"costumier",
"courtier",
"couvreur",
"credit manager",
"critique d'art",
"cryptologue",
"cuisinier",
"céramiste",
"danseur",
"data manager",
"designer d'interaction",
"designer graphique",
"designer industriel industrielle",
"designer sonore",
"dessinateur de BD",
"dessinateur en construction mécanique",
"dessinateur-projeteur",
"diagnostiqueur immobilier",
"directeur artistique",
"directeur d'accueil collectif de mineurs (ACM)",
"directeur d'agence bancaire",
"directeur d'hôpital",
"directeur d'hôtel",
"directeur d'office de tourisme",
"directeur de création",
"directeur de golf",
"directeur de la photographie",
"directeur de magasin à grande surface",
"directeur de restaurant",
"directeur des services pénitentiaires",
"diététicien",
"documentaliste",
"domoticien",
"déclarant en douane",
"décolleteur",
"décorateur",
"démographe",
"déménageur",
"dépanneur en électroménager",
"développeur d'applications mobiles",
"développeur informatique",
"développeur rural rurale humanitaire",
"développeur économique",
"employé d'élevage",
"employé de chai",
"employé de pressing",
"employé de restaurant",
"encadreur",
"enquêteur privé privée",
"enseignant d'art",
"enseignant de la conduite automobile et de la sécurité routière",
"enseignant humanitaire",
"enseignant spécialisé spécialisée",
"enseignant-chercheur",
"entraîneur de chevaux",
"entraîneur sportif sportive",
"ergonome",
"ergothérapeute",
"esthéticien-cosméticien",
"ethnologue",
"expert bilan carbone",
"expert automobile",
"expert en assurances",
"expert en sécurité informatique",
"expert immobilier immobilier",
"expert-comptable",
"facteur",
"facteur d'instruments",
"façadier",
"façonnier des industries graphiques",
"femme de chambre",
"ferronnier d'art",
"fiscaliste",
"fleuriste",
"formateur d'adultes",
"formateur en informatique",
"formateur technique en agroéquipement",
"formulateur",
"garde (chasse, pêche, littoral, rivière, parcs nationaux)",
"garde à cheval",
"gardien de la paix",
"gardien de police municipale",
"garçon de café",
"gendarme",
"gestionnaire actif/passif",
"gestionnaire de contrats d'assurance",
"gestionnaire de contrats informatiques",
"gestionnaire de données cliniques",
"gestionnaire de parc micro-informatique",
"gestionnaire de patrimoine",
"glaciologue",
"gouvernant",
"greffier",
"grutier",
"guichetier",
"guide de haute montagne",
"guide-conférencier",
"généalogiste",
"généticien",
"géochimiste",
"géographe",
"géologue",
"géologue minier",
"géologue modélisateur",
"géomaticien",
"géomètre-topographe",
"géophysicien",
"géotechnicien",
"géothermicien",
"gérant de portefeuille",
"gérant de restauration collective",
"halieute",
"histologiste",
"horloger",
"horticulteur",
"hot liner",
"huissier de justice",
"hydraulicien",
"hydrogéologue",
"hydrologue",
"hôte d'accueil",
"hôtesse de l'air",
"iconographe",
"illustrateur",
"infirmier",
"infirmier humanitaire",
"informaticien industriel industrielle",
"ingénieur RD en énergies renouvelables",
"ingénieur analogicien analogicienne",
"ingénieur analyste de l'air",
"ingénieur aromaticien aromaticienne",
"ingénieur biomédical biomédicale",
"ingénieur brevets",
"ingénieur calcul",
"ingénieur chimiste",
"ingénieur chimiste en développement analytique",
"ingénieur cloud computing",
"ingénieur combustion et brûleurs",
"ingénieur concepteur conceptrice en mécanique",
"ingénieur d'affaires en génie électrique",
"ingénieur d'application",
"ingénieur d'études en sûreté nucléaire",
"ingénieur de la police technique et scientifique",
"ingénieur de maintenance industrielle",
"ingénieur de recherche (papiers cartons)",
"ingénieur de recherche clinique et épidémiologique",
"ingénieur du BTP",
"ingénieur du son",
"ingénieur efficacité énergétique du bâtiment",
"ingénieur en acoustique",
"ingénieur en automatismes",
"ingénieur en aéronautique",
"ingénieur en caractérisation des matériaux",
"ingénieur en chef territorial",
"ingénieur en construction automobile",
"ingénieur en construction navale",
"ingénieur en fonderie",
"ingénieur en génie climatique",
"ingénieur en imagerie médicale",
"ingénieur en mécanique",
"ingénieur en métrologie",
"ingénieur en production et expérimentations végétales",
"ingénieur en électronique numérique",
"ingénieur en énergie solaire",
"ingénieur environnement",
"ingénieur environnement et risques industriels",
"ingénieur essais",
"ingénieur fluides, énergies, réseaux, environnement",
"ingénieur forage",
"ingénieur forestier forestière",
"ingénieur frigoriste",
"ingénieur gaz",
"ingénieur hydrogéomorphologue",
"ingénieur hydroécologue",
"ingénieur intégration satellite",
"ingénieur logiciel",
"ingénieur logistique",
"ingénieur maintenance aéronautique",
"ingénieur mathématicien",
"ingénieur matériaux",
"ingénieur métallurgiste",
"ingénieur méthodes mécaniques",
"ingénieur nucléaire",
"ingénieur opticien opticienne",
"ingénieur papetier papetière",
"ingénieur plasturgiste",
"ingénieur process aval",
"ingénieur procédés en chimie",
"ingénieur production dans les biotechnologies",
"ingénieur production en aéronautique",
"ingénieur production en mécanique",
"ingénieur pétrolier pétrolière",
"ingénieur qualité moteur",
"ingénieur radioprotection",
"ingénieur recherche et développement (R&D) en agroéquipement",
"ingénieur recherche et développement en agroalimentaire",
"ingénieur réservoir",
"ingénieur structures",
"ingénieur support",
"ingénieur système",
"ingénieur systèmes embarqués",
"ingénieur technico-commercial technico-commerciale",
"ingénieur technico-commercial technico-commerciale en chimie",
"ingénieur technico-commercial technico-commerciale en informatique",
"ingénieur technico-commercial technico-commerciale en électronique",
"ingénieur textile",
"ingénieur traitement de l'image",
"ingénieur télécoms et réseaux",
"ingénieur écoconcepteur écoconceptrice",
"ingénieur électricien",
"ingénieur électronicien électronicienne",
"ingénieur électronicien électronicienne des systèmes de la sécurité aérienne (IESSA)",
"ingénieur études et développement en logiciels de simulation",
"inspecteur de banque",
"inspecteur des douanes, des finances publiques ou du travail",
"inspecteur du permis de conduire et de la sécurité routière",
"installateur en télécoms",
"inséminateur",
"intégrateur web",
"journaliste",
"journaliste reporter d'images",
"juge d'instruction",
"juge des contentieux de la protection",
"juge des enfants",
"juriste d'entreprise",
"juriste droit de l'environnement",
"juriste en droit social",
"juriste en propriété intellectuelle",
"lad-jockey, lad-driver",
"libraire",
"linguiste",
"machiniste-constructeur ou plateau",
"magasinier cariste",
"magistrat",
"manager de risques",
"mandataire judiciaire",
"manipulateur en électroradiologie médicale",
"maquettiste",
"maquilleur artistique",
"maraîcher",
"marchandiseur",
"maroquinier",
"maréchal-ferrant",
"masseur",
"matelot de la Marine Nationale",
"matelot de la marine marchande",
"matelot à la pêche",
"maçon",
"maître d'hôtel",
"maître-chien",
"menuisier",
"metteur en scène",
"microbiologiste",
"microtechnicien",
"militaire du rang de l'armée de terre",
"militaire technicien de l'air (MTA)",
"miroitier",
"mixeur son",
"modiste",
"modéliste",
"moniteur d'activités équestres",
"moniteur de ski",
"moniteur-éducateur",
"monteur en installations thermiques et climatiques",
"monteur en réseaux de distribution électrique",
"monteur-câbleur",
"mouleur-noyauteur",
"musicien",
"mécanicien bateaux",
"mécanicien d'entretien d'avion",
"mécanicien et technicien",
"mécanicien marine navigant",
"mécanicien-outilleur",
"mécanicien-réparateur en matériel agricole",
"mécatronicien",
"médecin de secours en montagne",
"médecin généraliste",
"médecin humanitaire",
"médecin spécialiste",
"médiateur familial familiale",
"médiateur scientifique",
"météorologiste",
"neurobiologiste",
"nivoculteur",
"notaire",
"océanologue",
"oenologue",
"officier de gendarmerie",
"officier de l'armée de l'air",
"officier de l'armée de terre",
"officier de la Marine nationale",
"officier de la marine marchande",
"officier de police",
"officier marinier marinière",
"opticien-lunetier",
"optronicien",
"opérateur de fabrication de produits alimentaires",
"opérateur de raffinerie",
"opérateur en traitement des matériaux",
"opérateur prépresse",
"opérateur sur machine à commande numérique",
"orfèvre",
"orthodontiste",
"orthophoniste",
"orthoprothésiste",
"orthoptiste",
"ostéopathe",
"ouvrier agricole",
"ouvrier forestier forestière",
"ouvrier paysagiste",
"ouvrier plasturgiste",
"palefrenier",
"paléontologue",
"parfumeur",
"patron pêcheur",
"paysagiste",
"peintre en bâtiment",
"pharmacien",
"pharmacien dans l'industrie",
"photographe",
"physicien médical médicale",
"pilote d'hélicoptère",
"pilote de ligne",
"pilote de ligne automatisée (chimie - agroalimentaire - industrie pharmaceutique)",
"pisteur secouriste",
"plombier",
"plâtrier",
"podo-orthésiste",
"poissonnier",
"professeur d'éducation physique et sportive (EPS)",
"professeur dans l'enseignement agricole",
"professeur de collège et de lycée",
"professeur de lycée professionnel",
"professeur de mathématiques ou de physique-chimie",
"professeur de musique et de danse",
"professeur des écoles des écoles",
"professeur en activité physique adaptée",
"professeur-documentaliste",
"programmiste",
"projectionniste",
"prothésiste dentaire",
"prototypiste en matériaux souples",
"préparateur en pharmacie",
"psychanalyste",
"psychologue",
"psychologue de l'Éducation nationale spécialité éducation, développement et apprentissages",
"psychologue de l'éducation nationale spécialité éducation, développement et conseil en"
+ " orientation scolaire et professionnelle",
"psychomotricien",
"puériculteur",
"pâtissier",
"pédiatre",
"pédicure-podologue",
"pédologue",
"relieur-doreur",
"reporter-photographe",
"responsable achats en chimie",
"responsable approvisionnement",
"responsable assurance qualité",
"responsable biométrie",
"responsable d'élevage agricole",
"responsable de fabrication en chimie",
"responsable de formation",
"responsable de la collecte des déchets ménagers",
"responsable de la promotion des ventes",
"responsable de laboratoire de contrôle en biologie",
"responsable de laboratoire de contrôle en chimie",
"responsable de laboratoire de recherche",
"responsable de plate-forme biotechnologique",
"responsable de production alimentaire",
"responsable de projets culturels",
"responsable de rémunération",
"responsable de réseau d'assainissement",
"responsable de réseau eau potable",
"responsable de scierie",
"responsable de site de traitement des déchets",
"responsable des ouvrages hydroélectriques",
"responsable des produits structurés actions",
"responsable des ressources humaines",
"responsable du back office",
"responsable du recrutement",
"responsable du service après-vente",
"responsable du soutien logistique intégré",
"responsable e-CRM",
"responsable qualité en agroalimentaire",
"restaurateur d'oeuvres d'art",
"roboticien",
"rudologue",
"réceptionniste",
"rédacteur en chef",
"rédacteur médical médicale",
"rédacteur on line",
"rédacteur territorial territoriale",
"réflexologue",
"régisseur de spectacles",
"régisseur général générale cinéma",
"régisseur lumière",
"régleur",
"sage-femme",
"salesman",
"sapeur-pompier",
"scripte",
"sculpteur sur bois",
"scénariste",
"secrétaire",
"secrétaire administratif",
"secrétaire d'édition",
"secrétaire de rédaction",
"secrétaire des affaires étrangères",
"secrétaire juridique",
"secrétaire médical",
"sellier",
"serrurier dépanneur dépanneuse",
"serrurier-métallier",
"sociologue",
"soigneur d'animaux",
"solier-moquettiste",
"sommelier",
"soudeur",
"souffleur de verre",
"sous-officier de l'armée de l'air",
"sous-officier de l'armée de terre",
"souscripteur",
"sportif de haut niveau",
"spécialiste de l'accessibilité numérique",
"spécialiste des affaires réglementaires en chimie",
"staffeur-ornemaniste",
"statisticien",
"statisticien en analyse sensorielle",
"statisticien en géomarketing",
"statisticien industriel industrielle",
"styliste",
"substitut du procureur",
"surveillant de centre pénitentiaire",
"syndic de copropriété",
"sérigraphe",
"tailleur de pierre",
"tailleur-couturier",
"tapissier d'ameublement",
"technicien automobile",
"technicien biologiste",
"technicien chimiste",
"technicien céramiste",
"technicien d'analyses biomédicales",
"technicien d'essais",
"technicien d'exploitation de l'eau",
"technicien d'exploitation du réseau gaz",
"technicien d'intervention clientèle gaz",
"technicien de contrôle",
"technicien de fabrication de mobilier et de menuiserie",
"technicien de forge",
"technicien de l'intervention sociale et familiale",
"technicien de la circulation ferroviaire SNCF",
"technicien de maintenance en génie climatique",
"technicien de maintenance en informatique",
"technicien de maintenance industrielle",
"technicien de police technique et scientifique",
"technicien des industries du verre",
"technicien démonstrateur démonstratrice en matériel agricole",
"technicien en automatismes",
"technicien en engins de travaux publics",
"technicien en lignes haute tension",
"technicien en métrologie",
"technicien en optique de précision",
"technicien en traitement des déchets",
"technicien en traitement des matériaux",
"technicien forestier forestière",
"technicien logistique",
"technicien packaging",
"technicien paysagiste",
"technicien plasturgiste",
"technicien prototypiste en agroéquipement",
"technicien pétrolier pétrolière",
"technicien qualité",
"technicien radioprotection",
"technicien réalisateur réalisatrice radio",
"technicien thermicien thermicienne",
"technicien télécoms et réseaux",
"technicien électronicien électronicienne",
"technicien électrotechnicien électrotechnicienne",
"technico-commercial en agroalimentaire",
"teinturier blanchisseur",
"testeur",
"toiletteur d'animaux",
"tonnelier",
"trader",
"traducteur technique",
"traducteur-interprète",
"traffic manager",
"télévendeur",
"urbaniste",
"veilleur stratégique",
"vendeur conseil caviste",
"vendeur en animalerie",
"vendeur en magasin",
"vendeur en micro-informatique et multimédia",
"vendeur-conseil en matériel agricole",
"vendeur-magasinier en fournitures automobiles",
"verrier au chalumeau",
"visiteur médical médicale",
"viticulteur",
"vitrailliste",
"volcanologue",
"vétérinaire",
"web-ergonome",
"webdesigner",
"webmestre",
"yield manager",
"zoologiste",
"ébéniste",
"éclairagiste",
"économe de flux",
"économiste de la construction",
"économètre statisticien",
"écrivain",
"éditeur",
"éducateur canin canine",
"éducateur de jeunes enfants",
"éducateur de la protection judiciaire de la jeunesse (PJJ)",
"éducateur sportif sportive",
"éducateur sportif sportive des activités aquatiques et de la natation",
"éducateur spécialisé spécialisée",
"éducateur technique spécialisé technique spécialisée",
"élagueur",
"électricien installateur installatrice",
"électromécanicien",
"électromécanicien en remontées mécaniques",
"électronicien automobile",
"énergéticien",
"étalagiste",
"étanchéiste",
] | PypiClean |
/Asymptotic%20Complexity%20Judge-0.0.1.tar.gz/Asymptotic Complexity Judge-0.0.1/ACJudge_Pkg/Client.py | import sys, Ice, os.path, time ,argparse, datetime
from xtermcolor import colorize
from prettytable import PrettyTable
from random import randrange
from dotenv import load_dotenv
from ACJudge_Pkg import OJMod
__name__ = 'Client'
load_dotenv()
def list_problems(problems):
problem_table = PrettyTable(['ID', 'Title'])
problem_table.title = 'Problems'
for p in problems:
problem_table.add_row([p.id, p.title])
print(problem_table)
def fetch_id(judge, _id):
print(judge.problemDescription(_id))
def get_supported_ext():
return [str(OJMod.SupportedLanguages.valueOf(i)).lower() for i in range(len(OJMod.SupportedLanguages._enumerators))]
def fetch_current(judge):
print(judge.fetch())
def supported_lang_help():
return f'[Invalid Language] Currently we only support {", ".join(get_supported_ext())}'
def parse_extension(filename):
ext = os.path.splitext(filename)[1][1:].lower()
for i, e in enumerate(get_supported_ext()):
if e == ext:
return OJMod.SupportedLanguages.valueOf(i)
return None
def setup_env():
address = input("Please enter the ip-address: ")
port = input("Please enter the port: ")
with open(f'{os.path.dirname(os.path.abspath(__file__))}/.env', 'w') as f:
f.write(f'export ASYMPTOTIC_COMPLEXITY_ADDRESS={address}\nexport ASYMPTOTIC_COMPLEXITY_PORT={port}')
os.environ['ASYMPTOTIC_COMPLEXITY_ADDRESS'] = address
os.environ['ASYMPTOTIC_COMPLEXITY_PORT'] = port
def _extract_source(f):
source = None
with open(f, 'r') as file:
source = file.read()
return source
def _process_testcase(test_case, res, test):
status = None
if not test:
status = colorize("Passed", 0x06BA84) if res == OJMod.Results.A else colorize("Failed", 0xBA2A06)
else: # Was a test run
if res == OJMod.Results.A:
status = colorize("Passed", 0x06BA84)
elif res == OJMod.Results.WA:
status = colorize("Wrong Answer", 0xBA2A06)
else:
issue = None
if res == OJMod.Results.TLE:
issue = "Time Limit Exceeded"
elif res == OJMod.Results.MLE:
issue = "Memory Limit Exceeded"
elif res == OJMod.Results.CE:
issue = "Compliation Error"
elif res == OJMod.Results.RE:
issue = "Runtime Error"
else:
issue = "ISSUE"
status = colorize(issue, 0xDBC51C)
return status
def _pending_testcase(test_case, cycle_tick):
cycle_tick = cycle_tick % 4
text = ''
if cycle_tick == 0:
text = '\\'
elif cycle_tick == 1:
text = '|'
elif cycle_tick == 2:
text = '/'
elif cycle_tick == 3:
text = '-'
text += " Running..."
print(f'[Test: {test_case}]: {colorize(text, 0x0596B9)}', end="\r"),
def _attempt(judge, args):
f = args.test if args.test else args.submit
ext = parse_extension(f)
if not ext:
raise RuntimeError(supported_lang_help())
print()
print(f'Creating New Submission.....')
print(f'Submission({colorize(datetime.datetime.now(), 0x0596B9)})------------------------------------------------\n')
source = _extract_source(f)
if not source:
raise RuntimeError("Invalid source file")
results = None
if not args.id:
results = judge.test(source, ext) \
if args.test \
else judge.submit(source, ext)
else:
results = judge.testWithID(int(args.id), source, ext) \
if args.test \
else judge.submitWithID(int(args.id), source, ext)
for test_case, res in enumerate(results):
test_case += 1
for i in range(13 + randrange(5)):
_pending_testcase(test_case, i)
time.sleep(randrange(5)/10)
sys.stdout.write("\033[K")
status = _process_testcase(test_case, res, args.test != None)
print(f'[Test: {test_case}]: {status}', end="\r"),
print()
time.sleep(0.5)
if not args.test:
print()
final_result = colorize("Accepted", 0x06BA84) \
if (results[-1] == OJMod.Results.A) \
else colorize("Not Accepted", 0xBA2A06)
print(f'Submission({colorize(datetime.datetime.now(), 0x0596B9)}): {final_result}')
print(f'\n----------------------Submission End--------------------------')
def _fetch(judge, args):
print(colorize("Fetching....", 0x0596B9))
print("\n")
if not args.id:
fetch_current(judge)
else:
fetch_id(judge, int(args.id))
def _list(judge):
print(colorize("Fetching....", 0x0596B9))
print("\n")
list_problems(judge.listProblems())
def run(judge, args):
if args.fetch:
_fetch(judge, args)
elif args.list:
_list(judge)
elif args.test or args.submit:
_attempt(judge, args)
def parser():
parser = argparse.ArgumentParser(
description='ACJudge is terminal judge for the Asymptotic Complexity Group.')
parser.add_argument('-t', '--test', help="Path to file for which you'd like to test")
parser.add_argument('-s', '--submit', help="Path to file for which you'd like to submit")
parser.add_argument('-f', '--fetch', action='store_true', help="Retrieve the current problem statement")
parser.add_argument('-l', '--list', action='store_true', help="List all the available problems")
parser.add_argument('-i', '--id', help="Focus on a specific problem id (this can be added to the --fetch, --test and --submit commands)")
return parser
def parse():
return parser().parse_args()
def main():
if "ASYMPTOTIC_COMPLEXITY_ADDRESS" not in os.environ or "ASYMPTOTIC_COMPLEXITY_PORT" not in os.environ:
setup_env()
with Ice.initialize(sys.argv) as communicator:
base = communicator.stringToProxy(f'Judge:tcp -h {os.environ.get("ASYMPTOTIC_COMPLEXITY_ADDRESS")} -p {os.environ.get("ASYMPTOTIC_COMPLEXITY_PORT")}')
judge = OJMod.ComsPrx.checkedCast(base)
if not judge:
raise RuntimeError("Invalid proxy")
args = parse()
if args.fetch or args.test or args.submit or args.list:
run(judge, args)
else:
parser().print_help() | PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/ext/autosummary/templates/autosummary/class.rst | {{ fullname | escape | underline}}
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% block methods %}
.. automethod:: __init__
{% if methods %}
.. rubric:: {{ _('Methods') }}
.. autosummary::
{% for item in methods %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
{% block attributes %}
{% if attributes %}
.. rubric:: {{ _('Attributes') }}
.. autosummary::
{% for item in attributes %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
| PypiClean |
/MutatorMath-3.0.1.zip/MutatorMath-3.0.1/Docs/old_designSpaceFileFormat.md | MutatorMath DesignSpace Format (old)
====================================

Please refer to the [designSpaceDocument](https://github.com/LettError/designSpaceDocument) repository for an up to data specification of the file.
The UFO support in MutatorMath introduces a useful storage format in XML for MutatorMath designspaces. This document describes the format.
## Document structure
The document must contain a single **designspace** top level element. The current format version is `3`. The designspace element must contain one **sources** element and one **instances** element.
* The **sources** element contains one or more **source** elements.
* The **instances** element contains one or more **instance** elements.
The designspace format makes a difference between "masters" and "sources". Source specifically indicates the UFO file. Master indicates a particular use of a source in a MutatorMath calculation. In general: the sources bring data to the calculation, instances take data out.
The font.info contains different kinds of data. Some are strings (names, urls, copyrights), some are numerical but not geometrical (versions, PANOSE). The designspace offers some controls
Both instance and source elements contain paths to files. These paths are expected to be relative to the path of the .designspace document. This allows the same .designspace to be deployed to multiple locations, but still reference the proper source files. It also allows sources to be stored in their own directories, and instances to be created into their own directories.
## An example of a DesignSpace description
```xml
<?xml version="1.0" ?>
<designspace format="3">
<sources>
<source filename="../sources/Light/font.ufo" name="master_1">
<lib copy="1"/>
<groups copy="1"/>
<info copy="1"/>
<location>
<dimension name="weight" xvalue="0.000000"/>
</location>
</source>
<source filename="../sources/Bold/font.ufo" name="master_2">
<location>
<dimension name="weight" xvalue="1.000000"/>
</location>
</source>
</sources>
<instances>
<instance familyname="MyFamily" filename="../instance/Medium.ufo" stylename="Medium">
<location>
<dimension name="weight" xvalue="0.500000"/>
</location>
<info/>
<kerning/>
</instance>
</instances>
</designspace>
```
## The Elements
```xml
<?xml version="1.0" ?>
<designspace format="3">
<!-- optional: list of axis elements -->
<axes>
<axis
<!-- required: 4 letter axis tag see OpenType axis tags -->
tag="aaaa"
<!-- optional: human readable name -->
name="nice name for axis"
<!-- required: minimum value for axis -->
minimum="72"
<!-- required: maximum value for axis -->
maximum="1000"
<!-- optional: default value for axis -->
default="96"
/>
<!-- optional child element: avar table values, "map"
<map input="<number>" output="<number>" />
</axis>
</axes>
<!-- required: one sources element -->
<sources>
<!-- required: one or more source elements -->
<source
<!-- required: path to UFO source -->
filename=""
<!-- optional: unique identifier for this source -->
[name=""]
>
<!-- required location element -->
<location/>
<!-- optional: flags for which data this master should provide or mute -->
[<lib copy="1"/>]
[<groups copy="1"/>]
[<info [copy="1"][mute="1"]/>]
[<kerning mute="1"/>]
<!-- optional: flag to mute a specific source glyph -->
[<glyph name="" mute="1"/>]
</source>
</sources>
<!-- required: one instances element -->
<instances>
<!-- required: one ore more instance elements -->
<instance
<!-- required: path to UFO instance -->
filename=""
<!-- required: familyname and stylename -->
familyname=""
stylename=""
<!-- optional: some more names -->
[postscriptfontname=""]
[stylemapfamilyname=""]
[stylemapstylename=""]
>
<!-- required location element -->
<location/>
<!-- if present, calculate the font.info for this instance -->
[<info>
<!-- if location is present, calculate the font.info at this location -->
[<location/>]
</info>]
<!-- if present, calculate the font.kerning for this instance -->
[<kerning>
<!-- if location is present, calculate the kerning at this location -->
[<location/>]
</kerning>]
<!-- optional: special definitions for specific glyphs.
It is expected that an instance will always generate all glyphs.
The special definitions in the **glyphs** element are expected
to complement the basic glyphset.
-->
[<glyphs>
<!-- required: one or more glyph elements -->
<glyph
<!-- required: the AGL glyphname -->
name=""
<!-- optional: unicode value for this glyph -->
[unicode=""]
>
<!-- optional: alternative location for this glyph. -->
[<location/>]
<!-- optional: a note for this glyph -->
[<note>
nice glyph!
</note>]
<!-- optional: a list of alternative sources for this glyph.
If present these masters supersede any masters defined by the instance.
This expects these masters to form a complete designspace.
-->
[<masters>
<!-- required: one or more master elements -->
<master
<!-- required: source identifier for this master -->
source=""
<!-- optional: alternative glyph for this master -->
[glyphname=""]
>
<!-- required alternative location for this master -->
<location/>
</master>
</masters>]
</glyph>
</glyphs>]
</instance>
</instances>
</designspace>
```
## The axis element
## The source element
The **source** element stores all the data needed to locate a UFO file and indicates on how to use the different kinds of data in a MutatorMath calculation. The source element can contain a number of child elements. The **location** element is required, it positions the source in the designspace. The **lib**, **groups**, **info**, **kerning** elements are optional. Some types of data can be muted: this means that specific data will not be used to calculate an instance.
#### Attributes of the source element
* **filename**
* Required, string.
* Path to a UFO, **relative to the path of the designspace document.**
* **name**
* Required, string.
* A unique identifier for this source, can be used to refer to this source, for instance in the **master** element.
#### Child elements
* ```<location/>```
* Required.
* ```<lib copy="1"/>```:
* If the **lib** element is present and its copy attribute is set to "1", this source will be the provider of font.lib data.
* Only one source can be the lib data provider.
* Optional. If the lib element is not present this source will not be the provider of font.lib data.
* ```<groups copy="1"/>```
* If the **groups** element is present and its copy attribute is set to "1", this source will be the provider of font.groups data.
* Only one source can be the groups data provider.
* Optional. If the groups element is not present this source will not be the provider of font.groups data.
* ```<info [copy="1"] [mute="1"]/>```
* If the **info** element is present and the `copy` attribute is set to "1", this source will be the provider of the non numerical attributes of the font.info.
* Only one source can be the info data provider.
* The optional `mute` attribute when set to "1", indicates the numerical attributes of font.info should excluded from the calculation.
* If the info element is not present this source will not be the provider of font.info data, but the numerical font.info data will be entered into the calculation.
* ```<kerning mute="1"/>```
* Optional. If present, this kerning from this source is to be excluded from calculations.
* ```<glyph name="" mute="1"/>```
* Optional. If present, this glyph from this source is to be excluded from calculations.
##### Example
```xml
<source filename="../sources/Bold/font.ufo" name="master_2">
<!-- insert this master at weight=1, width=0 -->
<location>
<dimension name="weight" xvalue="1.000000"/>
<dimension name="width" xvalue="0.000000"/>
</location>
<lib copy="1"/>
<groups copy="1"/>
<info copy="1"/>
<kerning mute="1"/>
<glyph name="AE.alt" mute="1"/>
</source>
```
## The instance element
The `instance` element stores all the data needed to perform a MutatorMath calculation with the previously defined sources and create a new UFO. The instance element can contain a number of child elements. The `location` element is required, it defines a point in the designspace. The `lib`, `groups`, `info` elements are optional. Wrapped in the **glyphs** element, an instance can contain zero or more **glyph** elements. A glyph element can be used to define exceptions in the designspace geometry: for instance, set a different location for one specific glyph, a different set of sources.
It is expected the instance generates all glyphs that are available in the sources. An instance may have special definitions for some glyphs, these complement the basic glyphset.
The familyname and stylename are necessary to make UFOs. Some additional names can be added.
#### Attributes of the instance element
* **filename**
* String, required.
* Path to a UFO, **relative to the path of the designspace document.**
* If this path does not exist, it should be created when the instance is processed.
* **familyname**
* String, required.
* FamilyName field for the new instance. Corresponds with font.info.familyName.
* **stylename**
* String, required.
* StyleName field for the new instance. Corresponds with font.info.familyName.
* **postscriptfontname**
* String, optional
* PostScript FontName, corresponds with font.info.postScriptFontName
* **stylemapfamilyname**
* String, optional
* Stylemap familyname, corresponds with font.info.styleMapFamilyName
* **stylemapstylename**
* String, optional
* Stylemap stylename, corresponds with font.info.styleMapStyleName
#### Child elements
* ```<location/>```
* Required.
* ```<info/>```
* Optional.
* Add this element if the instance needs to calculate the font.info data. If the info element contains a location element this supercedes the instance location.
* ```<glyphs>...</glyphs>```
* Optional. The glyphs element can contain one or more **glyph** elements.
* ```<kerning/>```
* Optional.
* Add this element if the instance needs to calculate the font.kerning data. If the kerning element contains a location element this supercedes the instance location.
* A kerning element may have one child **location** element. If present this location should be used in calculating the kerning.
#### Example
```xml
<instance familyname="MyFamily" filename="../Medium.ufo" stylename="Medium">
<location>
<dimension name="weight" xvalue="0.500000"/>
</location>
<glyphs>
<glyph name="N">
<location>
<dimension name="width" xvalue="0.700000"/>
</location>
<masters>
<master glyphname="N.alt" source="master_1">
<location>
<dimension name="weight" xvalue="0.490000"/>
</location>
</master>
<master glyphname="N.alt" source="master_2">
<location>
<dimension name="weight" xvalue="0.490000"/>
</location>
</master>
</masters>
</glyph>
</glyphs>
</instance>
```
## The location element
The location element describes a point in the designspace. Locations are used to position a source as a master, and to indicate where the instances are to be calculated. Location elements are used in several places in a designspace. A location element has no attributes, but needs to contain at least one **dimension** child elements.
```xml
<location>
<dimension name="" xvalue="" [yvalue=""]/>
[...]
</location>
```
#### Attributes of the dimension element
* **name**
* Required, string. Name of the dimension. For instance "width" or "weight".
* **xvalue**
* Required, value. A string representation of distance in this dimension.
* **yvalue**
* Optional value if this dimension is to be anisotropic.
#### Examples
```xml
<!-- location with a single dimension -->
<location>
<dimension name="weight" xvalue="0.500000"/>
</location>
<!-- location with a single anisotropic dimension -->
<location>
<dimension name="weight" xvalue="0.500000" yvalue="0.48728"/>
</location>
<!-- location with two dimensions -->
<location>
<dimension name="weight" xvalue="0.500000"/>
<dimension name="width" xvalue="0.500000"/>
</location>
<!-- location with seven dimensions just so you know you can't get away with some botched up old multiple master interface. -->
<location>
<dimension name="weight" xvalue="0.500000"/>
<dimension name="width" xvalue="0.500000"/>
<dimension name="optical" xvalue="72"/>
<dimension name="serif" xvalue="-100"/>
<dimension name="slant" xvalue="4"/>
<dimension name="wobble" xvalue="1000"/>
<dimension name="splatter" xvalue="1000", yvalue="400"/>
</location>
```
## The glyph element
The optional **glyph** element can be used in a instance element to store information about masters and locations that are different from the ones defined for the instance.
```xml
<glyphs>
<!-- required: one or more glyph elements -->
<glyph
<!-- required: the AGL glyphname -->
name=""
<!-- optional: unicode value for this glyph -->
[unicode=""]
>
<!-- optional: alternative location for this glyph. -->
[<location/>]
<!-- optional: a note for this glyph -->
[<note>
nice glyph!
</note>]
[<masters>
...a number of master elements
</masters>]
</glyph>
</glyphs>
```
#### Attributes of the glyph element
* **name**
* Required, string. The glyph name.
* **unicode**
* Optional, hex. The unicode value of the glyph, expressed as a string in the format **"0xFF"**. If no unicode value is given, use the unicode for this glyph used in the sources.
#### Child elements of the glyph element
* ```<location/>```
* Optional, location element. If the location element is present it will be the location for this glyph. If it is not present, the location defined for the instance will be used.
* ```<note>...</note>```
* Optional, string. Corresponds with the **defcon** glyph.note attribute.
* ```<masters>...</masters>```
* a list of master elements.
## The master element
Used in the masters element to specify which glyphs from which sources have to be used for this glyph.
```xml
<masters>
<master
[source="sourcename"]
[glyphname='a.alt']
>
[<location/>]
</master>
...
</masters>
```
#### Attributes of the master element
* **source**
* Optional, string.
* Name of the source, must match with the **name** attribute of exactly one **source** element.
* **glyphname**
* Optional, string
* Alternative glyphname if data is to come from a different glyph.
#### Child elements of the master element
* **location**
* Optional.
* If a location element is present, use this alternative location for this master. If no location is present, use the location defined for the source.
## Notes on this document
Initial version of this specification. The package is rather new and changes are to be expected.
| PypiClean |
/FPL_wildcard_team_selector-1.1.1.tar.gz/FPL_wildcard_team_selector-1.1.1/FPL_wildcard_team_selector/FPL_data_visualization/team_selection_visualization.py | import turtle
class visualize_team_selection:
def __init__(self, list_of_goalies, list_of_defenders, list_of_midfielders, list_of_strikers, cash_left):
self.list_of_goalies = list_of_goalies
self.list_of_defenders = list_of_defenders
self.list_of_midfielders = list_of_midfielders
self.list_of_strikers = list_of_strikers
self.cash_left = cash_left
def draw_pitch(self):
'''
Uses turtle graphics to visualize a soccer field
'''
GREEN="#149118"
screen = turtle.Screen()
screen.tracer(0)
screen.bgcolor(GREEN)
myBrush = turtle.Turtle()
myBrush.width(1)
myBrush.hideturtle()
myBrush.speed(0)
myBrush.color("#FFFFFF")
#Outer lines
myBrush.penup()
myBrush.goto(-320,280)
myBrush.pendown()
myBrush.goto(320,280)
myBrush.goto(320,-220)
myBrush.goto(-320,-220)
myBrush.goto(-320,280)
#Penalty Box - Top
myBrush.penup()
myBrush.goto(0,190)
myBrush.pendown()
myBrush.circle(40)
myBrush.penup()
myBrush.goto(-100,280)
myBrush.pendown()
myBrush.fillcolor(GREEN)
myBrush.begin_fill()
myBrush.goto(100,280)
myBrush.goto(100,215)
myBrush.goto(-100,215)
myBrush.goto(-100,280)
myBrush.end_fill()
#Penalty Box - Bottom
myBrush.penup()
myBrush.goto(0,-210)
myBrush.pendown()
myBrush.circle(40)
myBrush.penup()
myBrush.goto(-100,-220)
myBrush.pendown()
myBrush.fillcolor(GREEN)
myBrush.begin_fill()
myBrush.goto(100,-220)
myBrush.goto(100,-155)
myBrush.goto(-100,-155)
myBrush.goto(-100,-220)
myBrush.end_fill()
# Goal Box - Bottom
myBrush.penup()
myBrush.goto(40,-220)
myBrush.pendown()
myBrush.goto(40,-195)
myBrush.goto(-40,-195)
myBrush.goto(-40,-220)
# Goal Box - Top
myBrush.penup()
myBrush.goto(40,280)
myBrush.pendown()
myBrush.goto(40,255)
myBrush.goto(-40,255)
myBrush.goto(-40,280)
#Halfway Line
myBrush.penup()
myBrush.goto(-320,30)
myBrush.pendown()
myBrush.goto(320,30)
#Central Circle
myBrush.penup()
myBrush.goto(0,-10)
myBrush.pendown()
myBrush.circle(40)
#turtle.update()
def draw_player(self, color, x, y, label):
screen = turtle.Screen()
screen.tracer(0)
myPen = turtle.Turtle()
myPen.hideturtle()
myPen.penup()
myPen.goto(x,y)
myPen.fillcolor(color)
myPen.begin_fill()
myPen.circle(10)
myPen.end_fill()
myPen.penup()
x_offset = (len(label)/2) * 5
myPen.goto(x-x_offset,y-20)
myPen.write(label[4:])
def draw_all_players(self):
#Draw 2 Goalkeepers
self.draw_player("blue", -80, -190, self.list_of_goalies[0])
self.draw_player("blue", 80, -190, self.list_of_goalies[1])
#Draw 5 defenders
self.draw_player("yellow", 300, -110, self.list_of_defenders[0])
self.draw_player("yellow", 150, -110, self.list_of_defenders[1])
self.draw_player("yellow", 0, -110, self.list_of_defenders[2])
self.draw_player("yellow", -150, -110, self.list_of_defenders[3])
self.draw_player("yellow", -300, -110, self.list_of_defenders[4])
#Draw 5 Midfielders
self.draw_player("yellow", 300, 20, self.list_of_midfielders[0])
self.draw_player("yellow", 150, 20, self.list_of_midfielders[1])
self.draw_player("yellow", 0, 20, self.list_of_midfielders[2])
self.draw_player("yellow", -150, 20, self.list_of_midfielders[3])
self.draw_player("yellow", -300, 20, self.list_of_midfielders[4])
#Draw 3 Strikers
self.draw_player("yellow", -170, 150, self.list_of_strikers[0])
self.draw_player("yellow", 0, 150, self.list_of_strikers[1])
self.draw_player("yellow", 170, 150, self.list_of_strikers[2])
#Add Cash Remaining:
cash_left_label = "Cash Remaining: " + str(self.cash_left)
self.draw_player("white", 300, -270, cash_left_label)
def run_visualization(self):
self.draw_pitch()
self.draw_all_players()
turtle.mainloop() | PypiClean |
/ATpy-0.9.7.tar.gz/ATpy-0.9.7/atpy/vo_conesearch.py | from __future__ import print_function, division
from distutils import version
import warnings
import tempfile
vo_minimum_version = version.LooseVersion('0.3')
try:
import vo.conesearch as vcone
vo_installed = True
except:
vo_installed = False
def _check_vo_installed():
if not vo_installed:
raise Exception("Cannot query the VO - vo " + \
vo_minimum_version.vstring + " or later required")
def read(self, catalog=None, ra=None, dec=None, radius=None, verb=1,
pedantic=False, **kwargs):
'''
Query a VO catalog using the STScI vo module
This docstring has been adapted from the STScI vo conesearch module:
*catalog* [ None | string | VOSCatalog | list ]
May be one of the following, in order from easiest to use to most
control:
- None: A database of conesearch catalogs is downloaded from
STScI. The first catalog in the database to successfully return
a result is used.
- catalog name: A name in the database of conesearch catalogs at
STScI is used. For a list of acceptable names, see
vo_conesearch.list_catalogs().
- url: The prefix of a url to a IVOA Cone Search Service. Must end
in either ? or &.
- A VOSCatalog instance: A specific catalog manually downloaded
and selected from the database using the APIs in the
STScI vo.vos_catalog module.
- Any of the above 3 options combined in a list, in which case
they are tried in order.
*pedantic* [ bool ]
When pedantic is True, raise an error when the returned VOTable
file violates the spec, otherwise issue a warning.
*ra* [ float ]
A right-ascension in the ICRS coordinate system for the position
of the center of the cone to search, given in decimal degrees.
*dec* [ float ]
A declination in the ICRS coordinate system for the position of
the center of the cone to search, given in decimal degrees.
*radius* [ float]
The radius of the cone to search, given in decimal degrees.
*verb* [ int ]
Verbosity, 1, 2, or 3, indicating how many columns are to be
returned in the resulting table. Support for this parameter by a
Cone Search service implementation is optional. If the service
supports the parameter, then when the value is 1, the response
should include the bare minimum of columns that the provider
considers useful in describing the returned objects. When the
value is 3, the service should return all of the columns that are
available for describing the objects. A value of 2 is intended for
requesting a medium number of columns between the minimum and
maximum (inclusive) that are considered by the provider to most
typically useful to the user. When the verb parameter is not
provided, the server should respond as if verb = 2. If the verb
parameter is not supported by the service, the service should
ignore the parameter and should always return the same columns for
every request.
Additional keyword arguments may be provided to pass along to the
server. These arguments are specific to the particular catalog being
queried.
'''
_check_vo_installed()
self.reset()
# Perform the cone search
VOTable = vcone.conesearch(catalog_db=catalog, pedantic=pedantic,
ra=ra, dec=dec, sr=radius, verb=verb, **kwargs)
# Write table to temporary file
output = tempfile.NamedTemporaryFile()
VOTable._votable.to_xml(output)
output.flush()
# Read it in using ATpy VO reader
self.read(output.name, type='vo', verbose=False)
# Check if table is empty
if len(self) == 0:
warnings.warn("Query returned no results, so the table will be empty")
# Remove temporary file
output.close()
def list_catalogs():
_check_vo_installed()
for catalog in vcone.list_catalogs():
if "BROKEN" in catalog:
continue
print("%30s" % catalog) | PypiClean |
/Docassemble-Pattern-3.6.7.tar.gz/Docassemble-Pattern-3.6.7/docassemble_pattern/text/nl/__init__.py |
####################################################################################################
# Dutch linguistical tools using fast regular expressions.
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int
from builtins import map, zip, filter
from builtins import object, range
import os
import sys
import re
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
# Import parser base classes.
from docassemble_pattern.text import (
Lexicon, Model, Morphology, Context, Parser as _Parser, ngrams, pprint, commandline,
PUNCTUATION
)
# Import parser universal tagset.
from docassemble_pattern.text import (
penntreebank2universal,
PTB, PENN, UNIVERSAL,
NOUN, VERB, ADJ, ADV, PRON, DET, PREP, ADP, NUM, CONJ, INTJ, PRT, PUNC, X
)
# Import parse tree base classes.
from docassemble_pattern.text.tree import (
Tree, Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table,
SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
)
# Import sentiment analysis base classes.
from docassemble_pattern.text import (
Sentiment as _Sentiment,
NOUN, VERB, ADJECTIVE, ADVERB,
MOOD, IRONY
)
# Import spelling base class.
from docassemble_pattern.text import (
Spelling
)
# Import verb tenses.
from docassemble_pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
PROGRESSIVE,
PARTICIPLE
)
# Import inflection functions.
from docassemble_pattern.text.nl.inflect import (
pluralize, singularize, NOUN, VERB, ADJECTIVE,
verbs, conjugate, lemma, lexeme, tenses,
predicative, attributive
)
# Import all submodules.
from docassemble_pattern.text.nl import inflect
sys.path.pop(0)
#--- DUTCH PARSER ----------------------------------------------------------------------------------
# The Dutch parser (accuracy 92%) is based on Jeroen Geertzen's language model:
# Brill-NL, http://cosmion.net/jeroen/software/brill_pos/
# The lexicon uses the WOTAN tagset:
# http://lands.let.ru.nl/literature/hvh.1999.2.ps
WOTAN = "wotan"
wotan = {
"Adj(": (("vergr", "JJR"), ("overtr", "JJS"), ("", "JJ")),
"Adv(": (("deel", "RP"), ("", "RB")),
"Art(": (("", "DT"),),
"Conj(": (("", "CC"),),
"Int": (("", "UH"),),
"Misc": (("symb", "SYM"), ("vreemd", "FW")),
"N(": (("eigen,ev", "NNP"), ("eigen,mv", "NNPS"), ("ev", "NN"), ("mv", "NNS")),
"Num(": (("", "CD"),),
"Prep(": (("inf", "TO"), ("", "IN")),
"Pron(": (("bez", "PRP$"), ("", "PRP")),
"Punc(": (("komma", ","), ("open", "("), ("sluit", ")"), ("schuin", "CC"), ("", ".")),
"V(": (("hulp", "MD"), ("ott,3", "VBZ"), ("ott", "VBP"), ("ovt", "VBD"),
("verl", "VBN"), ("teg", "VBG"), ("", "VB"))
}
def wotan2penntreebank(token, tag):
""" Converts a WOTAN tag to a Penn Treebank II tag.
For example: bokkenrijders/N(soort,mv,neut) => bokkenrijders/NNS
"""
for k, v in wotan.items():
if tag.startswith(k):
for a, b in v:
if a in tag:
return (token, b)
return (token, tag)
def wotan2universal(token, tag):
""" Converts a WOTAN tag to a universal tag.
For example: bokkenrijders/N(soort,mv,neut) => bokkenrijders/NOUN
"""
if tag.startswith("Adv"):
return (token, ADV)
return penntreebank2universal(*wotan2penntreebank(token, tag))
ABBREVIATIONS = set((
"a.d.h.v.", "afb.", "a.u.b.", "bv.", "b.v.", "bijv.", "blz.", "ca.", "cfr.", "dhr.", "dr.",
"d.m.v.", "d.w.z.", "e.a.", "e.d.", "e.g.", "enz.", "etc.", "e.v.", "evt.", "fig.", "i.e.",
"i.h.b.", "ir.", "i.p.v.", "i.s.m.", "m.a.w.", "max.", "m.b.t.", "m.b.v.", "mevr.", "min.",
"n.a.v.", "nl.", "n.o.t.k.", "n.t.b.", "n.v.t.", "o.a.", "ong.", "pag.", "ref.", "t.a.v.",
"tel.", "zgn."
))
def find_lemmata(tokens):
""" Annotates the tokens with lemmata for plural nouns and conjugated verbs,
where each token is a [word, part-of-speech] list.
"""
for token in tokens:
word, pos, lemma = token[0], token[1], token[0]
if pos.startswith("JJ") and word.endswith("e"):
lemma = predicative(word)
if pos == "NNS":
lemma = singularize(word)
if pos.startswith(("VB", "MD")):
lemma = conjugate(word, INFINITIVE) or word
token.append(lemma.lower())
return tokens
class Parser(_Parser):
def find_tokens(self, tokens, **kwargs):
# 's in Dutch preceded by a vowel indicates plural ("auto's"): don't replace.
kwargs.setdefault("abbreviations", ABBREVIATIONS)
kwargs.setdefault("replace", {"'n": " 'n"})
s = _Parser.find_tokens(self, tokens, **kwargs)
s = [re.sub(r"' s (ochtends|morgens|middags|avonds)", "'s \\1", s) for s in s]
return s
def find_lemmata(self, tokens, **kwargs):
return find_lemmata(tokens)
def find_tags(self, tokens, **kwargs):
if kwargs.get("tagset") in (PENN, None):
kwargs.setdefault("map", lambda token, tag: wotan2penntreebank(token, tag))
if kwargs.get("tagset") == UNIVERSAL:
kwargs.setdefault("map", lambda token, tag: wotan2universal(token, tag))
if kwargs.get("tagset") is WOTAN:
kwargs.setdefault("map", lambda token, tag: (token, tag))
return _Parser.find_tags(self, tokens, **kwargs)
class Sentiment(_Sentiment):
def load(self, path=None):
_Sentiment.load(self, path)
# Map "verschrikkelijk" to adverbial "verschrikkelijke" (+1%)
if not path:
for w, pos in list(dict.items(self)):
if "JJ" in pos:
p, s, i = pos["JJ"]
self.annotate(attributive(w), "JJ", p, s, i)
parser = Parser(
lexicon = os.path.join(MODULE, "nl-lexicon.txt"),
frequency = os.path.join(MODULE, "nl-frequency.txt"),
morphology = os.path.join(MODULE, "nl-morphology.txt"),
context = os.path.join(MODULE, "nl-context.txt"),
default = ("N(soort,ev,neut)", "N(eigen,ev)", "Num()"),
language = "nl"
)
lexicon = parser.lexicon # Expose lexicon.
sentiment = Sentiment(
path = os.path.join(MODULE, "nl-sentiment.xml"),
synset = "cornetto_id",
negations = ("geen", "gene", "ni", "niet", "nooit"),
modifiers = ("JJ", "RB",),
modifier = lambda w: w.endswith(("ig", "isch", "lijk")),
tokenizer = parser.find_tokens,
language = "nl"
)
spelling = Spelling(
path = os.path.join(MODULE, "nl-spelling.txt")
)
def tokenize(s, *args, **kwargs):
""" Returns a list of sentences, where punctuation marks have been split from words.
"""
return parser.find_tokens(s, *args, **kwargs)
def parse(s, *args, **kwargs):
""" Returns a tagged Unicode string.
"""
return parser.parse(s, *args, **kwargs)
def parsetree(s, *args, **kwargs):
""" Returns a parsed Text from the given string.
"""
return Text(parse(s, *args, **kwargs))
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]):
""" Returns a parsed Text from the given parsed string.
"""
return Text(s, token)
def tag(s, tokenize=True, encoding="utf-8", **kwargs):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
def keywords(s, top=10, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return parser.find_keywords(s, **dict({
"frequency": parser.frequency,
"top": top,
"pos": ("NN",),
"ignore": ("rt", "mensen")}, **kwargs))
def suggest(w):
""" Returns a list of (word, confidence)-tuples of spelling corrections.
"""
return spelling.suggest(w)
def polarity(s, **kwargs):
""" Returns the sentence polarity (positive/negative) between -1.0 and 1.0.
"""
return sentiment(s, **kwargs)[0]
def subjectivity(s, **kwargs):
""" Returns the sentence subjectivity (objective/subjective) between 0.0 and 1.0.
"""
return sentiment(s, **kwargs)[1]
def positive(s, threshold=0.1, **kwargs):
""" Returns True if the given sentence has a positive sentiment (polarity >= threshold).
"""
return polarity(s, **kwargs) >= threshold
split = tree # Backwards compatibility.
#---------------------------------------------------------------------------------------------------
# python -m pattern.nl xml -s "De kat wil wel vis eten maar geen poot nat maken." -OTCL
if __name__ == "__main__":
commandline(parse) | PypiClean |
/Flask-Sillywalk-2.1.zip/Flask-Sillywalk-2.1/README.rst | flask-sillywalk
===============
A Flask extension that implements Swagger support (http://swagger.wordnik.com/)
What's Swagger?
---------------
Swagger is a spec to help you document your APIs. It's flexible and
produces beautiful API documentation that can then be used to build
API-explorer-type sites, much like the one at
http://developer.wordnik.com/docs -- To read more about the Swagger
spec, head over to https://github.com/wordnik/swagger-core/wiki or
http://swagger.wordnik.com
Git Repository and issue tracker: https://github.com/hobbeswalsh/flask-sillywalk
Documentation: http://flask-sillywalk.readthedocs.org/en/latest/
.. |travisci| image:: https://travis-ci.org/hobbeswalsh/flask-sillywalk.png
.. _travisci: https://travis-ci.org/hobbeswalsh/flask-sillywalk
|travisci|_
Why do I want it?
-----------------
* You want your API to be easy to read.
* You want other people to be able to use your API easily.
* You'd like to build a really cool API explorer.
* It's Friday night and your friend just ditched on milkshakes.
How do I get it?
----------------
From your favorit shell::
$ pip install flask-sillywalk
How do I use it?
----------------
I'm glad you asked. In order to use this code, you need to first
instantiate a SwaggerApiRegistry, which will keep track of all your API
endpoints and documentation.
Usage::
from flask import Flask
from flask.ext.sillywalk import SwaggerApiRegistry, ApiParameter, ApiErrorResponse
app = Flask("my_api")
registry = SwaggerApiRegistry(
app,
baseurl="http://localhost:5000/api/v1",
api_version="1.0",
api_descriptions={"cheese": "Operations with cheese."})
register = registry.register
registerModel = registry.registerModel
Then, instead of using the "@app.route" decorator that you're used to
using with Flask, you use the "register" decorator you defined above (or
"registerModel" if you're registering a class that describes a possible
API return value).
Now that we've got an API registry, we can register some functions. The
@register decorator, when just given a path (like @app.route), will
register a GET mthod with no possible parameters. In order to document a
method with parameters, we can feed the @register function some
parameters.
Usage::
@register("/api/v1/cheese/random")
def get_random_cheese():
"""Fetch a random Cheese from the database.
Throws OutOfCheeseException if this is not a cheese shop."""
return htmlify(db.cheeses.random())
@register("/api/v1/cheese/<cheeseName>",
parameters=[
ApiParameter(
name="cheeseName",
description="The name of the cheese to fetch",
required=True,
dataType="str",
paramType="path",
allowMultiple=False)
],
responseMessages=[
ApiErrorResponse(400, "Sorry, we're fresh out of that cheese.")
])
def get_cheese(cheeseName):
"""Gets a single cheese from the database."""
return htmlify(db.cheeses.fetch(name=cheeseName))
Now, if you navigate to http://localhost:5000/api/v1/resources.json you
should see the automatic API documentation. See documentation for all the
cheese endpoints at http://localhost:5000/api/v1/cheese.json
What's left to do?
------------------
Well, lots, actually. This release:
* Doesn't support XML (but do we really want to?)
* Doesn't support the full swagger spec (e.g. "type" in data models
* Lots more. Let me know!
| PypiClean |
/integer_partition/__init__.py | from CombinatorialProbability.combinatorics import CombinatorialSequence
from CombinatorialProbability.combinatorics import CombinatorialStructure
class IntegerPartition(CombinatorialSequence, CombinatorialStructure):
"""Generator for integer partitions.
This class is meant as a generator for integer partitions. It inherits from CombinatorialSequence
and CombinatorialStructure. Several algorithms for uniform generation with a fixed size are
implemented.
This class is not meant to be interpretted as an integer partition object itself. An intended
future feature is to specify restrictions on part sizes, for example, all odd parts, or all
even parts, or parts which are perfect powers, or parts in some specified set U. This type
of property would be specified at the generator level, whereas the actual partition generated
would be an output of a member function.
"""
def __init__(self, **kwargs):
"""Initializes the generator for integer partitions.
"""
# Initialize the sub-classes
CombinatorialSequence.__init__(self, self)
CombinatorialStructure.__init__(self, self)
# Initialize the primary properties
self.p_of_n_array = None
self.p_n_k_table = None
self.target = {}
self.part_sizes = None
# Future feature: if there is restriction on part sizes, it would be specified on the generator object.
if 'part_sizes' in kwargs:
self.part_sizes = kwargs['part_sizes']
# These are the functions related to the recursive properties.
from CombinatorialProbability.integer_partition._make_table import make_p_n_k_table, p_n_k, make_p_of_n_array, p_of_n
# Mimicking the sklearn library, this function "tunes" or "precomputes" as needed for a specified weight
# or more general target, e.g., integer partitions of size n with parts of size at most k.
from ._fit import fit
# A counting function, utilized by CombinatorialSequence.
from ._partition_function import partition_function
# The method which generates samples according to a prescribed method
from ._sampling import sampling, table_method_sampling, pdc_recursive_method_sampling
# Note: table_method_sampling is a member function because it dynamically allocates a larger table on demand.
# Utilized by CombinatorialStructure to generate partitions one at a time.
from ._iterator_methods import next_object
# Utility function to quickly map a partition into a more familiar multiset of positive integers.
from ._transforms import partition_map_to_tuple | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/mode/dtd/dtd.js | (function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("dtd", function(config) {
var indentUnit = config.indentUnit, type;
function ret(style, tp) {type = tp; return style;}
function tokenBase(stream, state) {
var ch = stream.next();
if (ch == "<" && stream.eat("!") ) {
if (stream.eatWhile(/[\-]/)) {
state.tokenize = tokenSGMLComment;
return tokenSGMLComment(stream, state);
} else if (stream.eatWhile(/[\w]/)) return ret("keyword", "doindent");
} else if (ch == "<" && stream.eat("?")) { //xml declaration
state.tokenize = inBlock("meta", "?>");
return ret("meta", ch);
} else if (ch == "#" && stream.eatWhile(/[\w]/)) return ret("atom", "tag");
else if (ch == "|") return ret("keyword", "separator");
else if (ch.match(/[\(\)\[\]\-\.,\+\?>]/)) return ret(null, ch);//if(ch === ">") return ret(null, "endtag"); else
else if (ch.match(/[\[\]]/)) return ret("rule", ch);
else if (ch == "\"" || ch == "'") {
state.tokenize = tokenString(ch);
return state.tokenize(stream, state);
} else if (stream.eatWhile(/[a-zA-Z\?\+\d]/)) {
var sc = stream.current();
if( sc.substr(sc.length-1,sc.length).match(/\?|\+/) !== null )stream.backUp(1);
return ret("tag", "tag");
} else if (ch == "%" || ch == "*" ) return ret("number", "number");
else {
stream.eatWhile(/[\w\\\-_%.{,]/);
return ret(null, null);
}
}
function tokenSGMLComment(stream, state) {
var dashes = 0, ch;
while ((ch = stream.next()) != null) {
if (dashes >= 2 && ch == ">") {
state.tokenize = tokenBase;
break;
}
dashes = (ch == "-") ? dashes + 1 : 0;
}
return ret("comment", "comment");
}
function tokenString(quote) {
return function(stream, state) {
var escaped = false, ch;
while ((ch = stream.next()) != null) {
if (ch == quote && !escaped) {
state.tokenize = tokenBase;
break;
}
escaped = !escaped && ch == "\\";
}
return ret("string", "tag");
};
}
function inBlock(style, terminator) {
return function(stream, state) {
while (!stream.eol()) {
if (stream.match(terminator)) {
state.tokenize = tokenBase;
break;
}
stream.next();
}
return style;
};
}
return {
startState: function(base) {
return {tokenize: tokenBase,
baseIndent: base || 0,
stack: []};
},
token: function(stream, state) {
if (stream.eatSpace()) return null;
var style = state.tokenize(stream, state);
var context = state.stack[state.stack.length-1];
if (stream.current() == "[" || type === "doindent" || type == "[") state.stack.push("rule");
else if (type === "endtag") state.stack[state.stack.length-1] = "endtag";
else if (stream.current() == "]" || type == "]" || (type == ">" && context == "rule")) state.stack.pop();
else if (type == "[") state.stack.push("[");
return style;
},
indent: function(state, textAfter) {
var n = state.stack.length;
if( textAfter.charAt(0) === ']' )n--;
else if(textAfter.substr(textAfter.length-1, textAfter.length) === ">"){
if(textAfter.substr(0,1) === "<") {}
else if( type == "doindent" && textAfter.length > 1 ) {}
else if( type == "doindent")n--;
else if( type == ">" && textAfter.length > 1) {}
else if( type == "tag" && textAfter !== ">") {}
else if( type == "tag" && state.stack[state.stack.length-1] == "rule")n--;
else if( type == "tag")n++;
else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule" && type === ">")n--;
else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule") {}
else if( textAfter.substr(0,1) !== "<" && textAfter.substr(0,1) === ">" )n=n-1;
else if( textAfter === ">") {}
else n=n-1;
//over rule them all
if(type == null || type == "]")n--;
}
return state.baseIndent + n * indentUnit;
},
electricChars: "]>"
};
});
CodeMirror.defineMIME("application/xml-dtd", "dtd");
}); | PypiClean |
/Essential_Probability_Functions-0.1.tar.gz/Essential_Probability_Functions-0.1/Essential_Probability_Functions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) the total number of trials
TODO: Fill out all TODOs in the functions below
"""
# A binomial distribution is defined by two variables:
# the probability of getting a positive outcome
# the number of trials
# If you know these two values, you can calculate the mean and the standard deviation
#
# For example, if you flip a fair coin 25 times, p = 0.5 and n = 25
# You can then calculate the mean and standard deviation with the following formula:
# mean = p * n
# standard deviation = sqrt(n * p * (1 - p))
#
def __init__(self, prob=.5, size=20):
self.p = prob
self.n = size
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
# TODO: store the probability of the distribution in an instance variable p
# TODO: store the size of the distribution in an instance variable n
# TODO: Now that you know p and n, you can calculate the mean and standard deviation
# Use the calculate_mean() and calculate_stdev() methods to calculate the
# distribution mean and standard deviation
#
# Then use the init function from the Distribution class to initialize the
# mean and the standard deviation of the distribution
#
# Hint: You need to define the calculate_mean() and calculate_stdev() methods
# farther down in the code starting in line 55.
# The init function can get access to these methods via the self
# variable.
self.p = prob
self.n = size
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None.
Returns:
float: mean of the binomial distribution
"""
# TODO: calculate the mean of the Binomial distribution. Store the mean
# via the self variable and also return the new mean value
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
# TODO: calculate the standard deviation of the Binomial distribution. Store
# the result in the self standard deviation attribute. Return the value
# of the standard deviation.
self.stdev = math.sqrt(self.p * self.n * (1.0 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
# TODO: The read_data_file() from the Generaldistribution class can read in a data
# file. Because the Binomaildistribution class inherits from the Generaldistribution class,
# you don't need to re-write this method. However, the method
# doesn't update the mean or standard deviation of
# a distribution. Hence you are going to write a method that calculates n, p, mean and
# standard deviation from a data set and then updates the n, p, mean and stdev attributes.
# Assume that the data is a list of zeros and ones like [0 1 0 1 1 0 1].
#
# Write code that:
# updates the n attribute of the binomial distribution
# updates the p value of the binomial distribution by calculating the
# number of positive trials divided by the total trials
# updates the mean attribute
# updates the standard deviation attribute
#
# Hint: You can use the calculate_mean() and calculate_stdev() methods
# defined previously.
self.data = self.read_data_file('numbers_binomial.txt')
self.n = len(self.data)
# update the value of p
self.p = 0
for i in self.data:
if i == 1:
self.p += 1
self.p = self.p / self.n
# update the value of mean and standard deviation
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
return self.p, self.n
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
# TODO: Use the matplotlib package to plot a bar chart of the data
# The x-axis should have the value zero or one
# The y-axis should have the count of results for each case
#
# For example, say you have a coin where heads = 1 and tails = 0.
# If you flipped a coin 35 times, and the coin landed on
# heads 20 times and tails 15 times, the bar chart would have two bars:
# 0 on the x-axis and 15 on the y-axis
# 1 on the x-axis and 20 on the y-axis
# Make sure to label the chart with a title, x-axis label and y-axis label
plt.hist(self.data)
plt.title('Binomial Histogram')
plt.xlabel('Value')
plt.ylabel('Count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
k (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
# TODO: Calculate the probability density function for a binomial distribution
# For a binomial distribution with n trials and probability p,
# the probability density function calculates the likelihood of getting
# k positive outcomes.
#
# For example, if you flip a coin n = 60 times, with p = .5,
# what's the likelihood that the coin lands on heads 40 out of 60 times?
n_fact = math.factorial(self.n)
k_fact = math.factorial(k)
n_k_fact = math.factorial(self.n - k)
pro_para = (n_fact / (k_fact * n_k_fact)) * math.pow(self.p, k) * math.pow((1 - self.p), (self.n - k))
return pro_para
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None.
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
# TODO: Use a bar chart to plot the probability density function from
# k = 0 to k = n
# Hint: You'll need to use the pdf() method defined above to calculate the
# density function for every value of k.
# Be sure to label the bar chart with a title, x label and y label
# This method should also return the x and y values used to make the chart
# The x and y values should be stored in separate lists
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
# TODO: Define addition for two binomial distributions. Assume that the
# p values of the two distributions are the same. The formula for
# summing two binomial distributions with different p values is more complicated,
# so you are only expected to implement the case for two distributions with equal p.
# the try, except statement above will raise an exception if the p values are not equal
# Hint: You need to instantiate a new binomial object with the correct n, p,
# mean and standard deviation values. The __add__ method should return this
# new binomial object.
# When adding two binomial distributions, the p value remains the same
# The new n value is the sum of the n values of the two distributions.
result = Binomial(prob=self.p, size=(self.n + other.n))
result.p = self.p
result.n = self.n + other.n
result.calculate_mean()
result.calculate_stdev()
other.data = self.data
result.data = self.data.extend(other.data)
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
# TODO: Define the representation method so that the output looks like
# mean 5, standard deviation 4.5, p .8, n 20
#
# with the values replaced by whatever the actual distributions values are
# The method should return a string in the expected format
return 'mean {}, standard deviation {}, p {}, n {}'.format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Firmant-0.2.3a1.tar.gz/Firmant-0.2.3a1/firmant/writers/__init__.py | from firmant import chunks
from firmant import decorators
from firmant import utils
from firmant.utils import workarounds
class Writer(chunks.AbstractChunk):
'''The base class of all writers.
This class defines an abstract base class that all writers are required to
adhere to. To use this class in the creation of a writer, create a subclass
with all necessary methods and properties overwritten.
.. seealso::
Module :mod:`abc`
This module is part of the Python standard library in 2.6+.
.. doctest::
:hide:
>>> import sys
>>> from firmant import routing
>>> logger = get_logger()
To create a new type of writer, inherit from :class:`Writer`:
.. doctest::
>>> class SampleWriter(Writer):
... extension = 'txt'
... def key(self, obj):
... return {'obj': str(obj)}
... def obj_list(self, environment, objects):
... return objects.get('objs', [])
... def render(self, environment, path, obj):
... print 'Save object "%s" to "%s"' % (obj, path)
The new writer meets the criteria for two different abstract base classes:
.. doctest::
>>> import firmant.chunks
>>> issubclass(SampleWriter, firmant.chunks.AbstractChunk)
True
>>> issubclass(SampleWriter, Writer)
True
.. warning::
When creating a writer, do not store state in the writer itself. While
it appears that a writer is a single object, it will actually share state
across three or more chunks during typical usage.
If it is necessary to store state, place it in environment keyed to the
writer class:
.. doctest::
:hide:
>>> environment = {}
.. doctest::
>>> environment[SampleWriter] = 'stored state goes here'
This is because of the need to split actions between url
forward-declaration and rendering.
The remainder of this section is devoted to describing the implementation
details of :class:`Writer`'s template methods.
Chunks are passed environment and object dictionaries. While it is not
technically a chunk, the :class:`Writer` interface follows the same pattern.
When called with an environment and set of objects, a writer will return two
more chunks (in addition to the environment and object dictionaries).
.. doctest::
>>> environment = {'log': logger
... ,'urlmapper': urlmapper
... }
>>> environment['urlmapper'].add(
... routing.SinglePathComponent('obj', str)
... )
>>> objects = {'objs': ['obj1', 'obj2', 'obj3']}
>>> sw = SampleWriter(environment, objects)
>>> sw.scheduling_order
10
>>> pprint(sw(environment, objects)) #doctest: +ELLIPSIS
({'log': <logging.Logger instance at 0x...>,
'urlmapper': <firmant.routing.URLMapper object at 0x...>},
{'objs': ['obj1', 'obj2', 'obj3']},
[<firmant.writers.SampleWriter object at 0x...>,
<firmant.writers.SampleWriter object at 0x...>])
.. note::
The chunks returned do not share any state with the :class:`Writer` that
created them. The fact that the class name is the same is an
implementation detail that may change in the future.
The first chunk is the chunk that will build the urls, while the second is
used for rendering. Neither chunk returns more chunks.
.. doctest::
>>> environment, objects, (urls, render) = sw(environment, objects)
>>> urls.scheduling_order
500
>>> render.scheduling_order
900
>>> pprint(urls(environment, objects)) #doctest: +ELLIPSIS
({'log': <logging.Logger instance at 0x...>,
'urlmapper': <firmant.routing.URLMapper object at 0x...>,
'urls': {'firmant.writers.SampleWriter': ['http://testurl/obj1/',
'http://testurl/obj2/',
'http://testurl/obj3/']}},
{'objs': ['obj1', 'obj2', 'obj3']},
[])
>>> pprint(render(environment, objects)) #doctest: +ELLIPSIS
Save object "obj1" to "outputdir/obj1/index.txt"
Save object "obj2" to "outputdir/obj2/index.txt"
Save object "obj3" to "outputdir/obj3/index.txt"
({'log': <logging.Logger instance at 0x...>,
'urlmapper': <firmant.routing.URLMapper object at 0x...>},
{'objs': ['obj1', 'obj2', 'obj3']},
[])
..
These are for completeness of tests but don't contribute to docs.
..
Make sure we don't accept bad actions. This would break
:attr:`scheduling_order` and :meth:`__call__`
.. doctest::
:hide:
>>> SampleWriter({}, {}, action='something else')
Traceback (most recent call last):
ValueError: `action` is invalid
'''
def __init__(self, environment, objects, action=None):
# pylint: disable-msg=W0613
super(Writer, self).__init__()
if action not in (None, 'urls', 'renderer'):
raise ValueError('`action` is invalid')
self.__action__ = self.__default__
if action == 'urls':
self.__action__ = self.__urls__
elif action == 'renderer':
self.__action__ = self.__renderer__
def __call__(self, environment, objects):
return self.__action__(environment, objects)
@property
def scheduling_order(self):
'''The following scheduling orders apply to writers:
10
At timestep 10, the writer will create the chunks for forward
declaration and rendering.
500
At timestep 500, the writer will forward-declare urls.
900
At timestep 900, the writer will render the objects.
'''
return {self.__default__: 10
,self.__urls__: 500
,self.__renderer__: 900}[self.__action__]
def __default__(self, environment, objects):
return (environment, objects,
[self.__class__(environment, objects, 'urls'),
self.__class__(environment, objects, 'renderer')])
@decorators.in_environment('log')
@decorators.in_environment('urlmapper')
def __urls__(self, environment, objects):
urlmapper = environment['urlmapper']
newenv = environment.copy()
if 'urls' not in newenv:
newenv['urls'] = {}
newenv['urls'][self.writername] = ret = []
for obj in self.obj_list(environment, objects):
url = urlmapper.url(self.extension, **self.key(obj))
environment['log'].info(_("%s declared '%s'") %
(utils.class_name(self.__class__), url))
ret.append(url)
ret.sort()
return (newenv, objects, [])
@decorators.in_environment('urlmapper')
def __renderer__(self, environment, objects):
if 'urlmapper' not in environment:
error = _('`urlmapper` expected in `environment`')
raise ValueError(error)
urlmapper = environment['urlmapper']
for obj in self.obj_list(environment, objects):
path = urlmapper.path(self.extension, **self.key(obj))
environment['log'].info(_("%s rendered '%s'") %
(utils.class_name(self.__class__), path))
self.render(environment, path, obj)
return (environment, objects, [])
@workarounds.abstractproperty
def extension(self):
'''The extension that will be used when finding the path/url of an
object.
This will be passed to a :class:`firmant.routing.URLMapper` instance.
'''
@workarounds.abstractmethod
def key(self, obj):
'''Map the object to a dictionary of attributes.
The attributes will be passed to the :meth:`path` and :meth`url` methods
of :class:`firmant.routing.URLMapper`.
'''
@workarounds.abstractmethod
def obj_list(self, environment, objects):
'''The objects that should be passed to :meth:`render`
It will be passed the `environment` and `objects` dictionaries that were
passed to the chunk.
'''
@workarounds.abstractmethod
def render(self, environment, path, obj):
'''Write the object to the path on filesystem.
`path` will be a path under the output directory. `obj` is one of
the objects returned by obj_list.
'''
@property
def writername(self):
'''The string displayed when interacting with the user.
This should be the name of the class they must specify in the
configuration. The default value should not be changed.
'''
return utils.class_name(self.__class__)
def _setup(self):
'''Setup the test cases.
'''
from firmant.routing import URLMapper
self.globs['urlmapper'] = URLMapper('outputdir', 'http://testurl/') | PypiClean |
/EQcorrscan-0.4.4.tar.gz/EQcorrscan-0.4.4/eqcorrscan/doc/tutorials/matched-filter.rst | Matched-filter detection
========================
This tutorial will cover using both the match-filter objects, and using the
internal functions within match-filter. The match-filter objects are designed
to simplify meta-data handling allowing for shorter code with fewer mistakes and
therefore more consistent results.
Match-filter objects
--------------------
The match-filter module contains five objects:
- :doc:`Tribe </submodules/core.match_filter.tribe>`
- :doc:`Template </submodules/core.match_filter.template>`
- :doc:`Party </submodules/core.match_filter.party>`
- :doc:`Family </submodules/core.match_filter.family>`
- :doc:`Detection </submodules/core.match_filter.detection>`
The :doc:`Tribe </submodules/core.match_filter.tribe>` object is a container for
multiple :doc:`Template </submodules/core.match_filter.template>` objects.
:doc:`Templates </submodules/core.match_filter.template>` contain the waveforms
of the template alongside the metadata used to generate the template. Both
:doc:`Templates </submodules/core.match_filter.template>` and
:doc:`Tribes </submodules/core.match_filter.tribe>` can be written to disk as
tar archives containing the waveform data in miniseed format, event catalogues
associated with the :doc:`Templates </submodules/core.match_filter.template>`
(if provided) in quakeml format and meta-data in a csv file. This archives
can be read back in or transferred between machines.
The :doc:`Detection </submodules/core.match_filter.detection>`,
:doc:`Family </submodules/core.match_filter.family>` and
:doc:`Party </submodules/core.match_filter.party>` objects are heirachical,
a single :doc:`Detection </submodules/core.match_filter.detection>` object
describes a single event detection, and contains information regarding how
the detection was made, what time it was made at alongside other useful
information, it does not store the
:doc:`Template </submodules/core.match_filter.template>` object used for the
detection, but does store a reference to the name of the
:doc:`Template </submodules/core.match_filter.template>`.
:doc:`Family </submodules/core.match_filter.family>` objects are containers
for multiple :doc:`Detections </submodules/core.match_filter.detection>` made
using a single :doc:`Template </submodules/core.match_filter.template>`
(name chosen to match the literature). These objects do contain the
:doc:`Template </submodules/core.match_filter.template>` used for the detections,
and as such can be used to re-create the list of detections is necessary.
:doc:`Party </submodules/core.match_filter.party>` objects are containers for
multiple :doc:`Family </submodules/core.match_filter.family>` objects. All
objects in the detection heirachy have read and write methods - we recommend
writing to tar archives (default) for Party and Family objects, as this will
store all metadata used in detection, which should allow for straightforward
reproduction of results.
Template creation
-----------------
:doc:`Templates </submodules/core.match_filter.template>` have a construct
method which accesses the functions in
:doc:`template_gen </submodules/core.template_gen>`. Template.construct
only has access to methods that work on individual events, and not catalogs; for
that use the Tribe.construct method. For example, we can use the *from_sac*
method to make a Template from a series of SAC files associated with a single
event:
.. code-block:: python
>>> import glob
>>> from eqcorrscan.core.match_filter import Template
>>> import os
>>> from eqcorrscan import tests
>>> # Get the path for the test-data so we can test this
>>> TEST_PATH = os.path.dirname(tests.__file__)
>>> sac_files = glob.glob(TEST_PATH + '/test_data/SAC/2014p611252/*')
>>> # sac_files is now a list of all the SAC files for event id:2014p611252
>>> template = Template().construct(
... method='from_sac', name='test', lowcut=2.0, highcut=8.0,
... samp_rate=20.0, filt_order=4, prepick=0.1, swin='all',
... length=2.0, sac_files=sac_files)
Tribe creation
--------------
As eluded to above, Template.construct only works for individual events, to
make a lot of templates we have to use the Tribe.construct method. The syntax
is similar, but we don't specify names - templates are named according
to their start-time, but you can rename them later if you wish:
.. code-block:: python
>>> from eqcorrscan.core.match_filter import Tribe
>>> from obspy.clients.fdsn import Client
>>> client = Client('NCEDC')
>>> catalog = client.get_events(eventid='72572665', includearrivals=True)
>>> # To speed the example we have a catalog of one event, but you can have
>>> # more, we are also only using the first five picks, again to speed the
>>> # example.
>>> catalog[0].picks = catalog[0].picks[0:5]
>>> tribe = Tribe().construct(
... method='from_client', catalog=catalog, client_id='NCEDC', lowcut=2.0,
... highcut=8.0, samp_rate=20.0, filt_order=4, length=6.0, prepick=0.1,
... swin='all', process_len=3600, all_horiz=True)
Matched-filter detection using a Tribe
--------------------------------------
Both Tribe and Template objects have *detect* methods. These methods call
the main match_filter function. They can be given an un-processed stream and
will complete the appropriate processing using the same processing values stored
in the Template objects. Because Tribe objects can contain Templates with a
range of processing values, this work is completed in groups for groups of
Templates with the same processing values. The Tribe object also has a
client_detect method which will download the appropriate data. Both *detect*
and *client_detect* methods return Party objects.
For example, we can use the Tribe we created above to detect through a day of
data by running the following:
.. code-block:: python
>>> from obspy import UTCDateTime
>>> party, stream = tribe.client_detect(
... client=client, starttime=UTCDateTime(2016, 1, 2),
... endtime=UTCDateTime(2016, 1, 3), threshold=8, threshold_type='MAD',
... trig_int=6, plotvar=False, return_stream=True)
Generating a Party from a Detection csv
---------------------------------------
If you are moving from detections written out as a csv file from an older
version of EQcorrscan, but want to use Party objects now, then this section is
for you!
First, you need to generate a Tribe from the templates you used to make the
detections. Instructions for this are in the
:doc:`Template creation tutorial </tutorials/template-creation>`
section.
Once you have a Tribe, you can generate a Party using the following:
.. code-block:: python
>>> detections = read_detections(detection_file) # doctest:+SKIP
>>> party = Party() # doctest:+SKIP
>>> for template in tribe: # doctest:+SKIP
... template_detections = [d for d in detections
... if d.template_name == template.name]
... family = Family(template=template, detections=template_detections)
... party += family
Lag-calc using a Party
----------------------
Because parties contain Detection and Template information they can be used to
generate re-picked catalogues using lag-calc:
.. code-block:: python
>>> stream = stream.merge().sort(['station'])
>>> repicked_catalog = party.lag_calc(stream, pre_processed=False,
... shift_len=0.2, min_cc=0.4) # doctest:+ELLIPSIS
By using the above examples you can go from a standard catalog available from
data centers, to a matched-filter detected and cross-correlation repicked
catalog in a handful of lines.
Simple example - match-filter.match-filter
------------------------------------------
This example does not work out of the box, you will have to have your own templates
and data, and set things up for this. However, in principle matched-filtering
can be as simple as:
.. code-block:: python
from eqcorrscan.core.match_filter import match_filter
from eqcorrscan.utils import pre_processing
from obspy import read
# Read in and process the daylong data
st = read('continuous_data')
# Use the same filtering and sampling parameters as your template!
st = pre_processing.dayproc(
st, lowcut=2, highcut=10, filt_order=4, samp_rate=50,
starttime=st[0].stats.starttime.date)
# Read in the templates
templates = []
template_names = ['template_1', 'template_2']
for template_file in template_names:
templates.append(read(template_file))
detections = match_filter(
template_names=template_names, template_list=templates, st=st,
threshold=8, threshold_type='MAD', trig_int=6, plotvar=False, cores=4)
This will create a list of detections, which are of class detection. You can
write out the detections to a csv (colon separated) using the detection.write
method, set `append=True` to write all the detections to one file. Beware though,
if this is set and the file already exists, it will just add on to the old file.
.. code-block:: python
for detection in detections:
detection.write('my_first_detections.csv', append=True)
Data gaps and how to handle them
--------------------------------
Data containing gaps can prove problematic for normalized cross-correlation. Because
the correlations are normalized by the standard deviation of the data, if the standard
deviation is low, floating-point rounding errors can occur. EQcorrscan tries to
avoid this in two ways:
1. In the 'eqcorrscan.utils.correlate` (fftw) functions, correlations are not computed
when the variance of the data window is less than 1e-10, or when there are fewer than
`template_len - 1` non-flat data values (e.g. at-least one sample that is not
in a gap), or when the mean of the data multiplied by the standard deviation
of the data is less than 1e-10.
2. The :doc:`pre_processing </submodules/utils.pre_processing>` functions fill gaps prior to processing,
process the data, then edit the data within the gaps to be zeros. During processing
aliased signal will appear in the gaps, so it is important to remove those
artifacts to ensure that gaps contain zeros (which will be correctly identified
by the :doc:`correlate </submodules/utils.correlate>` functions.
As a caveat of point 1: if your data have very low variance, but real data, your data
will be artificially gained by :doc:`pre_processing </submodules/utils.pre_processing>`
to ensure stable correlations.
If you provide data with filled gaps (e.g. you used `st = st.merge(fill_value=0)` to
either:
* The `detect` method of :doc:`Tribe </submodules/core.match_filter.tribe>`,
* The `detect` method of :doc:`Template </submodules/core.match_filter.template>`,
* :doc:`shortproc </submodules/autogen/eqcorrscan.utils.pre_processing.shortproc>`,
* :doc:`dayproc </submodules/autogen/eqcorrscan.utils.pre_processing.dayproc>`,
Then you will end up with the *wrong* result from the correlation or match_filter
functions. You should provide data with gaps maintained, but merged
(e.g. run `st = st.merge()` before passing the data to those functions).
If you have data that you know contains gaps that have been padded you must remove
the pads and reinstate the gaps.
Memory limitations and what to do about it
------------------------------------------
You may (if you are running large numbers of templates, long data durations, or using
a machine with small memory) run in to errors to do with memory consumption. The
most obvious symptom of this is your computer freezing because it has allocated
all of its RAM, or declaring that it cannot allocate memory. Because EQcorrscan
computes correlations in parallel for multiple templates for the same data period,
it will generate a large number of correlation vectors. At start-up, EQcorrscan
will try to assign the memory it needs (although it then requires a little more
later to do the summation across channels), so you might find that it fills your
memory very early - this is just to increase efficiency and ensure that the memory
is available when needed.
To get around memory limitations you can:
* Reduce the number of templates you run in parallel at once - for example you can
make groups of a number of templates and run that group in parallel, before running
the next group in parallel. This is not much less efficient, unless you have
a machine with more CPU cores than your group-size.
* Reduce the length of data you are correlating at any one time. The default is
to use day-long files, but there is nothing stopping you using shorter waveform
durations.
* Reduce the number of channels in templates to only those that you need. Note,
EQcorrscan will generate vectors of zeros for templates that are missing a
channel that is present in other templates, again for processing efficiency,
if not memory efficiency.
* Reduce your sampling rate. Obviously this needs to be at-least twice as large
as your upper frequency filter, but much above this is wasted data.
The three threshold parameters
------------------------------
EQcorrscan detects both positively and negatively correlated waveforms.
The match-filter routine has three key threshold parameters:
* **threshold_type** can either be MAD, abs or av_chan_corr. MAD stands for Median Absolute
Deviation and is the most commonly used detection statistic in matched-filter studies.
abs is the absolute cross-channel correlation sum, note that if you have different
numbers of channels in your templates then this threshold metric probably isn't for you.
av_chan_corr sets a threshold in the cross-channel correlation sum based on av_chan_corr x number of channels.
* **threshold** is the value used for the above metric.
* **trig_int** is the minimum interval in seconds for a detection using the same template.
If there are multiple detections within this window for a single template then EQcorrscan
will only give the best one (that exceeds the threshold the most).
Advanced example - match-filter-match-filter
--------------------------------------------
In this section we will outline using the templates generated in the first tutorial
to scan for similar earthquakes within a day of data. This small example does not truly exploit the parallel
operations within this package however, so you would be encouraged to think
about where parallel operations occur (*hint, the code can run one template
per CPU*), and why there are --instance and--splits flags in the other
scripts in the github repository (*hint, if you have heaps of memory
and CPUs you can do some brute force day parallelisation!*).
The main processing flow is outlined in the figure below, note the main speedups
in this process are achieved by running multiple templates at once, however this
increases memory usage. If memory is a problem there are flags (mem_issue) in the
match_filter.py source that can be turned on - the codes will then write temporary
files, which is slower, but can allow for more data crunching at once, your trade-off,
your call.
.. image:: processing_flow.png
:width: 600px
:align: center
:alt: processing_flow.png
.. literalinclude:: ../../tutorials/match_filter.py
SLURM example
-------------
When the authors of EQcorrscan work on large projects, we use grid computers with
the SLURM (Simple Linux Utility for Resource Management) job scheduler installed.
To facilitate ease of setup, what follows is an example of how we run this.
.. code-block:: bash
#!/bin/bash
#SBATCH -J MatchTest
#SBATCH -A ##########
#SBATCH --time=12:00:00
#SBATCH --mem=7G
#SBATCH --nodes=1
#SBATCH --output=matchout_%a.txt
#SBATCH --error=matcherr_%a.txt
#SBATCH --cpus-per-task=16
#SBATCH --array=0-49
# Load the required modules here.
module load OpenCV/2.4.9-intel-2015a
module load ObsPy/0.10.3rc1-intel-2015a-Python-2.7.9
module load joblib/0.8.4-intel-2015a-Python-2.7.9
# Run your python script using srun
srun python2.7 LFEsearch.py --splits 50 --instance $SLURM_ARRAY_TASK_ID
Where we use a script (LFEsearch.py) that accepts splits and instance flags,
this section of the script is as follows:
.. code-block:: python
Split=False
instance=False
if len(sys.argv) == 2:
flag=str(sys.argv[1])
if flag == '--debug':
Test=True
Prep=False
elif flag == '--debug-prep':
Test=False
Prep=True
else:
raise ValueError("I don't recognise the argument, I only know --debug and --debug-prep")
elif len(sys.argv) == 5:
# Arguments to allow the code to be run in multiple instances
Split=True
Test=False
Prep=False
args=sys.argv[1:len(sys.argv)]
for i in xrange(len(args)):
if args[i] == '--instance':
instance=int(args[i+1])
print 'I will run this for instance '+str(instance)
elif args[i] == '--splits':
splits=int(args[i+1])
print 'I will divide the days into '+str(splits)+' chunks'
elif not len(sys.argv) == 1:
raise ValueError("I only take one argument, no arguments, or two flags with arguments")
else:
Test=False
Prep=False
Split=False
The full script is not included in EQcorrscan, but is available on request.
| PypiClean |
/HTMLTestRunner-rv-1.1.2.tar.gz/HTMLTestRunner-rv-1.1.2/HTMLTestRunner/static/script.js | output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (level == 0) {
if (id.substr(0,2) == 'ft' || id.substr(0,2) == 'pt'){
tr.className = 'hiddenRow';
}
}else if (level == 1) {
if (id.substr(0,2) == 'ft'){
tr.className = '';
}else if (id.substr(0,2) == 'pt'){
tr.className = 'hiddenRow';
}
}else if (level == 2) {
if (id.substr(0,2) == 'pt'){
tr.className = '';
}else if (id.substr(0,2) == 'ft'){
tr.className = 'hiddenRow';
}
}else {
if ((id.substr(0,2) == 'ft') || (id.substr(0,2) == 'pt')){
tr.className = '';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr){
if (tr.className) {
toHide = 0;
}
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById('div_'+tid).style.display = 'none'
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
} | PypiClean |
/CustomIntents-1.0.0.tar.gz/CustomIntents-1.0.0/LICENSE.md | GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
| PypiClean |
/NewTestPackage-0.0.2.tar.gz/NewTestPackage-0.0.2/README.md | # NewTestPackage
[](https://pypi.python.org/pypi/NewTestPackage)
[](https://anaconda.org/conda-forge/NewTestPackage)
**to just test geo **
- Free software: MIT license
- Documentation: https://Maherhassanali.github.io/NewTestPackage
## Features
- TODO
## Credits
This package was created with [Cookiecutter](https://github.com/cookiecutter/cookiecutter) and the [giswqs/pypackage](https://github.com/giswqs/pypackage) project template.
| PypiClean |
/Nano-Assault-1.0.tar.gz/Nano-Assault-1.0/nanoassault/run_level.py |
# (C) 2017, Eike Jesinghaus
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from .bullets import BulletPlayer
from .collectibles import Energy
from .constants import SCALEFAC, FPS, TILEX, TILEY
import pygame
pygame.init()
def quit_():
exit(0)
def gameover():
quit_()
def run_level(screen, level, player):
ui_heart = pygame.image.load(os.path.join(os.path.dirname(__file__), "img", "ui_heart.png"))
ui_heart = pygame.transform.scale(ui_heart, (ui_heart.get_width()*SCALEFAC, ui_heart.get_height()*SCALEFAC))
sound_pickup = pygame.mixer.Sound(os.path.join(os.path.dirname(__file__), "sound", "pickup.wav"))
sound_open = pygame.mixer.Sound(os.path.join(os.path.dirname(__file__), "sound", "open.wav"))
sound_hit = pygame.mixer.Sound(os.path.join(os.path.dirname(__file__), "sound", "hit0.wav"))
clock = pygame.time.Clock()
bg = level.generate_bg()
collectibles = pygame.sprite.RenderClear()
doors = pygame.sprite.RenderClear()
players = pygame.sprite.RenderClear()
bullets = pygame.sprite.RenderClear()
enemies = pygame.sprite.RenderClear()
walls = pygame.sprite.RenderClear()
players.add(player)
walls.add(*level.generate_walls())
walls.draw(bg)
enemies.add(*level.generate_enemies())
doors.add(*level.generate_doors())
screen.blit(bg, (0, 0))
doors.draw(screen)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit_()
elif event.key == pygame.K_w:
player.motion[1] = -1
elif event.key == pygame.K_s:
player.motion[1] = 1
elif event.key == pygame.K_a:
player.motion[0] = -1
elif event.key == pygame.K_d:
player.motion[0] = 1
elif event.key == pygame.K_UP and not player.bullet_cooldown:
bullets.add(BulletPlayer(player.rect.center, (0, -1)))
player.bullet_cooldown = 30
elif event.key == pygame.K_DOWN and not player.bullet_cooldown:
bullets.add(BulletPlayer(player.rect.center, (0, 1)))
player.bullet_cooldown = 30
elif event.key == pygame.K_LEFT and not player.bullet_cooldown:
bullets.add(BulletPlayer(player.rect.center, (-1, 0)))
player.bullet_cooldown = 30
elif event.key == pygame.K_RIGHT and not player.bullet_cooldown:
bullets.add(BulletPlayer(player.rect.center, (1, 0)))
player.bullet_cooldown = 30
elif event.type == pygame.KEYUP:
if event.key == pygame.K_w:
player.motion[1] = 0
if pygame.key.get_pressed()[pygame.K_s]:
player.motion[1] = 1
elif event.key == pygame.K_s:
player.motion[1] = 0
if pygame.key.get_pressed()[pygame.K_w]:
player.motion[1] = -1
elif event.key == pygame.K_a:
player.motion[0] = 0
if pygame.key.get_pressed()[pygame.K_d]:
player.motion[0] = 1
elif event.key == pygame.K_d:
player.motion[0] = 0
if pygame.key.get_pressed()[pygame.K_a]:
player.motion[0] = -1
doors.clear(screen, bg)
collectibles.clear(screen, bg)
enemies.clear(screen, bg)
players.clear(screen, bg)
bullets.clear(screen, bg)
if not player.update():
gameover()
for w in pygame.sprite.spritecollide(player, walls, False):
player.coll_obstacle(w)
for d in pygame.sprite.spritecollide(player, doors, False):
player.coll_obstacle(d)
if level.cleared and player.keys > 0:
sound_open.play()
player.keys -= 1
d.kill()
level.unlocked = True
for c in pygame.sprite.spritecollide(player, collectibles, True):
c.on_collect(player)
sound_pickup.play()
if player.rect.top < 16*SCALEFAC:
player.rect.bottom = screen.get_height()-24*SCALEFAC
return level.up[0]
elif player.rect.left < 16*SCALEFAC:
player.rect.right = screen.get_width()-16*SCALEFAC
return level.left[0]
elif player.rect.bottom > screen.get_height()-24*SCALEFAC:
player.rect.top = 16*SCALEFAC
return level.down[0]
elif player.rect.right > screen.get_width()-16*SCALEFAC:
player.rect.left = 16*SCALEFAC
return level.right[0]
enemies.update(player, bullets)
if len(enemies) == 0 and len(collectibles) == 0:
if not level.cleared:
if len(level.enemies):
sound_open.play()
level.clear(collectibles, doors)
for e in pygame.sprite.spritecollide(player, enemies, False):
sound_hit.play()
e.contact(player)
c = pygame.sprite.groupcollide(enemies, walls, False, False)
for i in c:
i.coll_obstacle(c[i])
c = pygame.sprite.groupcollide(enemies, doors, False, False)
for i in c:
i.coll_obstacle(c[i])
bullets.update()
c = pygame.sprite.groupcollide(bullets, enemies, False, False)
for i in c:
if i.byplayer:
sound_hit.play()
i.hit(c[i][0])
i.kill()
for b in pygame.sprite.spritecollide(player, bullets, False):
if not b.byplayer:
b.hit(player)
b.kill()
doors.draw(screen)
collectibles.draw(screen)
enemies.draw(screen)
players.draw(screen)
bullets.draw(screen)
pygame.draw.rect(screen, (10, 10, 10), (0, TILEY*16*SCALEFAC, TILEX*16*SCALEFAC, 8*SCALEFAC), 0)
for i in range(0, player.hp+1):
screen.blit(ui_heart, (i*SCALEFAC*8, TILEY*16*SCALEFAC))
pygame.display.update()
clock.tick_busy_loop(FPS) | PypiClean |
/Genetic_Algorithm_VEDA-0.0.3.tar.gz/Genetic_Algorithm_VEDA-0.0.3/__init__.py | from multiprocessing.sharedctypes import copy
import os
from quopri import decodestring
from random import *
import random
import datetime
import bisect
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
class Genoma:
def __init__(self, numberOfGenes = 30, nInputs = 4, rateMutation = 0.1):
self.genotipo = []
self.copyGenotipo = []
self.numberOfGenes = numberOfGenes
self.faultChance = 1
self.nInputs = nInputs
self.nOutputs = 2**nInputs
self.fitness = 0.0
self.noiseFitness = 0.0
self.rateMutation = rateMutation
self.deadGenesRate = 0.6
self.activeGenesRate = 0.4
self.flagDeadActive = 0
self.indexDeadGenes = []
self.indexActiveGenes = []
self.Stochasticity = 0
self.ToEvaluate = []
def setGenotipo(self, a):
self.genotipo = a.copy()
def fill_Initial_Genome(self):
for i in range (0,self.numberOfGenes):
self.genotipo.append("")
def generate_parent(self):
self.fill_Initial_Genome() # fill the genome with the rigth length
for i in range(0,self.numberOfGenes):
in1 = randint(0,i+self.nInputs-1) # returns a number between 0 and n (PS: in randint() both inputs are included)
in2 = randint(0,i+self.nInputs-1) # returns a number between 0 and n (PS: in randint() both inputs are included)
sin1 = str(in1)
sin2 = str(in2)
gene = sin1 +"-"+ sin2
self.genotipo.pop(i)
self.genotipo.insert(i, gene)
# 0102 0102 0102 0102 0102 0102
def identify_deadGenes(self):
for i in range (0,self.numberOfGenes-1):
self.ToEvaluate.append(False)
self.ToEvaluate.append(True)
p = self.numberOfGenes-1
while p>=0:
if self.ToEvaluate[p]:
inputs = self.genotipo[p].split("-")
input1 = int(inputs[0])
input2 = int(inputs[1])
x = input1 - self.nInputs
y = input2 - self.nInputs
if(x >= 0):
self.ToEvaluate[x] = True
if(y >= 0):
self.ToEvaluate[y] = True
p-=1
def withdraw_deadGenes(self): #Withdraw the genes that won't be used in any other position of the genome
self.copyGenotipo = self.genotipo.copy()
count = 1
s = Stack()
while(True):
self.indexDeadGenes = []
for j in range(0,self.numberOfGenes-1):
for i in range (0,self.numberOfGenes): #The iº gene of the genome. To each element of the list (each gene of genome) exception by the last (the last one is necessarily the output of the system).
nin = self.copyGenotipo[i].split("-")
nin1 = nin[0] #The fisrt half (the input1)
nin2 = nin[1] #The second half (the input2)
elementSearch = str(j+self.nInputs)
if(elementSearch == nin1 or elementSearch == nin2):
break
elif(i == self.numberOfGenes-1):
self.copyGenotipo[j] = "xx-xx"
self.indexDeadGenes.append(j)
s.push(len(self.indexDeadGenes))
if s.size() == 2:
felement = s.pop()
selement = s.pop()
if(selement == felement):
break
s.push(felement)
for i in range(0,self.numberOfGenes):
if(self.copyGenotipo[i] != "xx-xx"):
self.indexActiveGenes.append(i)
def gpinand(self, l):
count = 0
n = len(l)
for i in range(0,n):
if(l[i] == 1):
count+=1
if(count%2 == 0):
return 0
else:
return 1
def gpinandOld(self, in1,in2,in3):
count = 0
if(in1 == 1):
count+=1
if(in2 == 1):
count+=1
if(in3 == 1):
count+=1
if(count%2 == 0):
return 0
else:
return 1
def NAND(self,a,b):
if(not(a and b)):
return 1
else:
return 0
def badNAND(self,a,b):
if(not(a and b)):
return 0
else:
return 1
def getCartesianProduct(self,l):
CartesianProduct = [[]]
for iterable in l:
CartesianProduct = [a+[b] for a in CartesianProduct for b in iterable]
return CartesianProduct
def calculateFitnessWithFaultsOld(self):
if(self.flagDeadActive == 1):
self.withdraw_deadGenes()
fitnessCounter = 0
for i0 in range(0,2):
for i1 in range(0,2):
for i2 in range(0,2):
for i3 in range(0,2):
valuesTable = {'0':i0, '1':i1, '2':i2, '3':i3}
i = 4
for element in self.genotipo:
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
x = randint(0, 100)
if(x<=self.faultChance):
out = self.badNAND(valuesTable[in1], valuesTable[in2])
else:
out = self.NAND(valuesTable[in1], valuesTable[in2])
si = str(i)
valuesTable[si] = out
i+=1
if(valuesTable[si] == self.gpinandOld(valuesTable['0'],valuesTable['1'],valuesTable['2'],valuesTable['3'])):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateFitnessOld(self):
if(self.flagDeadActive == 1):
self.withdraw_deadGenes()
fitnessCounter = 0
for i0 in range(0,2):
for i1 in range(0,2):
for i2 in range(0,2):
valuesTable = {'0':i0, '1':i1, '2':i2}
i = 3
for element in self.genotipo:
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
x = randint(0, 100)
out = self.NAND(valuesTable[in1], valuesTable[in2])
si = str(i)
valuesTable[si] = out
i+=1
if(valuesTable[si] == self.gpinandOld(valuesTable['0'],valuesTable['1'],valuesTable['2'])):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateFitness(self):
if(self.flagDeadActive == 1):
self.withdraw_deadGenes()
fitnessCounter = 0
l = []
dic = dict()
for i in range(0,self.nInputs):
subL = [0,1]
l.append(subL)
TrueTable = self.getCartesianProduct(l)
for i in range(0,self.nOutputs):
ithTrueTable = TrueTable[i]
for input in range(0,self.nInputs):
sinput = str(input)
dic[sinput] = ithTrueTable[input]
indexOut = self.nInputs
for element in self.genotipo:
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
out = self.NAND(dic[in1], dic[in2])
sindexOut = str(indexOut)
dic[sindexOut] = out
indexOut+=1
lGPINAND = []
for m in range(0,self.nInputs):
sm = str(m)
value = dic[sm]
lGPINAND.append(value)
if(dic[sindexOut] == self.gpinand(lGPINAND)):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateFitnessNew(self):
fitnessCounter = 0
l = []
dic = dict()
for i in range(0,self.nInputs):
subL = [0,1]
l.append(subL)
TrueTable = self.getCartesianProduct(l)
for i in range(0,self.nOutputs):
ithTrueTable = TrueTable[i]
for input in range(0,self.nInputs):
sinput = str(input)
dic[sinput] = ithTrueTable[input]
indexOut = self.nInputs
position = 0
for element in self.genotipo:
if(self.ToEvaluate[position]):
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
out = self.NAND(dic[in1], dic[in2])
sindexOut = str(indexOut)
dic[sindexOut] = out
indexOut+=1
position+=1
lGPINAND = []
for m in range(0,self.nInputs):
sm = str(m)
value = dic[sm]
lGPINAND.append(value)
if(dic[sindexOut] == self.gpinand(lGPINAND)):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateFitnessWithFaults(self):
if(self.flagDeadActive == 1):
self.withdraw_deadGenes()
fitnessCounter = 0
l = []
dic = dict()
for i in range(0,self.nInputs):
subL = [0,1]
l.append(subL)
TrueTable = self.getCartesianProduct(l)
for i in range(0,self.nOutputs):
ithTrueTable = TrueTable[i]
for input in range(0,self.nInputs):
sinput = str(input)
dic[sinput] = ithTrueTable[input]
indexOut = self.nInputs
for element in self.genotipo:
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
x = randint(0, 100)
if(x<=self.faultChance):
out = self.badNAND(dic[in1], dic[in2])
else:
out = self.NAND(dic[in1], dic[in2])
sindexOut = str(indexOut)
dic[sindexOut] = out
indexOut+=1
lGPINAND = []
for m in range(0,self.nInputs):
sm = str(m)
value = dic[sm]
lGPINAND.append(value)
if(dic[sindexOut] == self.gpinand(lGPINAND)):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateFitnessWithFaultsNew(self):
fitnessCounter = 0
l = []
dic = dict()
for i in range(0,self.nInputs):
subL = [0,1]
l.append(subL)
TrueTable = self.getCartesianProduct(l)
for i in range(0,self.nOutputs):
ithTrueTable = TrueTable[i]
for input in range(0,self.nInputs):
sinput = str(input)
dic[sinput] = ithTrueTable[input]
indexOut = self.nInputs
position = 0
for element in self.genotipo:
if(self.ToEvaluate[position]):
elements = element.split("-")
in1 = elements[0]
in2 = elements[1]
x = randint(0, 100)
if(x<=self.faultChance):
out = self.badNAND(dic[in1], dic[in2])
else:
out = self.NAND(dic[in1], dic[in2])
sindexOut = str(indexOut)
dic[sindexOut] = out
indexOut+=1
position+=1
lGPINAND = []
for m in range(0,self.nInputs):
sm = str(m)
value = dic[sm]
lGPINAND.append(value)
if(dic[sindexOut] == self.gpinand(lGPINAND)):
fitnessCounter += 1
self.fitness = float(fitnessCounter/self.nOutputs)
def calculateNoiseFitness(self):
noise = random.uniform(-self.Stochasticity,self.Stochasticity)
noise = round(noise,4)
self.noiseFitness = self.fitness + noise
def copyGene(self, destiny):
destiny.genotipo = self.genotipo.copy()
destiny.copyGenotipo = self.copyGenotipo.copy()
destiny.fitness = self.fitness
destiny.noiseFitness = self.noiseFitness
destiny.flagDeadActive = self.flagDeadActive
destiny.indexDeadGenes = self.indexDeadGenes.copy()
destiny.indexActiveGenes = self.indexActiveGenes.copy()
destiny.nOutputs = self.nOutputs
def mutateWithParam(self):
childGenes = Genoma() # a copy of the parente that will be mutate
self.copyGene(childGenes)
deadMutationIndex = int(len(self.indexDeadGenes) * self.deadGenesRate) # 50% of the active genes will be mutate
i=0
while(i < deadMutationIndex):
indexMut = random.sample(self.indexDeadGenes, 1)
index = int(indexMut[0])
newGene,alternate = random.sample(list(range(0,index+self.nInputs)),2)
if newGene < 10:
newGene = "0"+str(newGene)
newGene = str(newGene)
if alternate < 10:
alternate = "0"+str(alternate)
alternate = str(alternate)
inputPosition = randint(0,1) # in the random input of the gene (the first or second)
if inputPosition == 0: # if the input that need to be mutate is the first:
if index == self.index1 or index == self.index2 or index == self.index3 or index == self.index4:
return self # if the index of any mutate is the same of the inputs index, return parent
if newGene == childGenes.genotipo[index][0:2]:
childGenes.genotipo[index] = alternate + str(childGenes.genotipo[index][2:4])
else:
childGenes.genotipo[index] = newGene + str(childGenes.genotipo[index][2:4])
else:
if newGene == childGenes.genotipo[index][2:4]: # if the input that need to be mutate is the second:
childGenes.genotipo[index] = str(childGenes.genotipo[index][0:2]) + alternate
else:
childGenes.genotipo[index] = str(childGenes.genotipo[index][0:2]) + newGene
i+=1
activeMutationIndex = max(2,int(len(self.indexActiveGenes) * self.activeGenesRate) ) # 20% of the active genes will be mutate
j = 0
while(j < activeMutationIndex):
indexMut = random.sample(self.indexActiveGenes, 1)
index = int(indexMut[0])
newGene,alternate = random.sample(list(range(0,index+self.nInputs)),2)
newGene = str(newGene)
alternate = str(alternate)
inputPosition = randint(0,1) # in the random input of the gene (the first or second)
if inputPosition == 0: # if the input that need to be mutate is the first:
if index == self.index1 or index == self.index2 or index == self.index3 or index == self.index4:
return self # if the index of any mutate is the same of the inputs index, return parent
if newGene == childGenes.genotipo[index][0:2]:
childGenes.genotipo[index] = alternate + str(childGenes.genotipo[index][2:4])
else:
childGenes.genotipo[index] = newGene + str(childGenes.genotipo[index][2:4])
else:
if newGene == childGenes.genotipo[index][2:4]: # if the input that need to be mutate is the second:
childGenes.genotipo[index] = str(childGenes.genotipo[index][0:2]) + alternate
else:
childGenes.genotipo[index] = str(childGenes.genotipo[index][0:2]) + newGene
j +=1
return childGenes
def mutate(self):
childGenes = Genoma() # a copy of the parente that will be mutate
self.copyGene(childGenes)
numberOfMutations = max(self.numberOfGenes*self.rateMutation,1)
for i in range(0,int(numberOfMutations)):
indexMut = randint(1,self.numberOfGenes-1)
newGene,alternate = random.sample(list(range(0,indexMut+self.nInputs)),2)
newGene = str(newGene)
alternate = str(alternate)
whichInput = randint(0,1)
inputs = childGenes.genotipo[indexMut].split("-")
input1 = inputs[0]
input2 = inputs[1]
if(whichInput == 0):
if newGene == input1:
childGenes.genotipo[indexMut] = alternate + "-" + input2
else:
childGenes.genotipo[indexMut] = newGene + "-" + input2
else:
if newGene == input2:
childGenes.genotipo[indexMut] = input1 + "-" + alternate
else:
childGenes.genotipo[indexMut] = input1 + "-" + newGene
return childGenes
class GeneticAlgorithm():
def __init__(self, step = 1/16, alpha = 0, y = 10, maxGeneration = 400000):
self.step = step
self.y = y
self.startTime = datetime.datetime.now()
self.data_atual = datetime.datetime.today()
self.totalGeneration = 0
self.countGeneration = 0
self.maxGeneration = maxGeneration
self.histogram= []
def display(self, guess, fitness, noiseFitness, totalGeneration):
sguess = ' '.join(guess)
timeDiff = datetime.datetime.now() - self.startTime
print("{0}\t {1}\t {2}\t Geração: {3} Tempo: {4}\n ".format(sguess, fitness, round(noiseFitness, 4), self.totalGeneration, str(timeDiff), self.totalGeneration))
def saveNetlists(self, generation, fitness,noiseFitness ,countGeneration):
fImprovedNetlist = open("Netlists improved.txt", 'a', encoding='utf-8') # The file that save the improveds genomes
data_atual = datetime.datetime.today()
sbestParent = ' '.join(generation) # Convert the child in str to append the genome in Netlists improved.txt
appendFile = sbestParent+" at "+str(data_atual)+ " " + str(fitness) + " " + str(noiseFitness) + " Geração: "+str(countGeneration) + "\n" # Make the string format to append
fImprovedNetlist.write(appendFile) # Append the string in Netlists improved.txt
fImprovedNetlist.close()
def makeHistgram(self, childFitness): # make the histogram list
bisect.insort(self.histogram,str(childFitness)) # Using the bisect library, insert the fitness garanting the sorting
def saveHistogram(self):
fHistogram = open("histgramArray.txt", 'a', encoding='utf-8')
sHistogramList = ','.join(self.histogram) # Convert the histogram in str to append in histgramArray.txt
appendFile = sHistogramList
fHistogram.write(appendFile)
fHistogram.close()
def getBestGenomeWithSize(self, listChild):
bestChild = listChild[0]
for child in listChild:
if (child.noiseFitness > bestChild.noiseFitness):
bestChild = child
elif((child.noiseFitness == bestChild.noiseFitness) and (len(child.indexActiveGenes) <= len(bestChild.indexActiveGenes))):
bestChild = child
return bestChild
def getBestGenome(self, listChild):
bestChild = listChild[0]
for child in listChild:
if(child.fitness > bestChild.fitness):
bestChild = child
return bestChild
def evolution(self):
bestParent = Genoma()
bestParent.generate_parent() # Generate the first generation (The first Parent)
# bestParent.calculateFitness() # Get the first generation fitness
bestParent.calculateFitnessWithFaults()
bestParent.calculateNoiseFitness()
self.display(bestParent.genotipo, bestParent.fitness,bestParent.noiseFitness, self.totalGeneration)
listGenomes = []
ffc = 0
reference = Genoma()
bestParent.copyGene(reference)
nd = 20
while True:
self.totalGeneration = self.totalGeneration + 1
listGenomes.clear()
cf = 0
for i in range(0,nd):
bestParent.calculateFitnessWithFaults()
cf = cf + bestParent.fitness
bestParent.fitness = cf/nd
bestParent.calculateNoiseFitness()
listGenomes.append(bestParent)
for i in range(0, self.y):
child = Genoma()
bestParent.mutate().copyGene(child)
#child.calculateFitness()
#child.calculateFitnessWithFaults()
cf = 0
for i in range(0,nd):
child.calculateFitnessWithFaults()
cf = cf + child.fitness
child.fitness = cf/nd
child.calculateNoiseFitness()
listGenomes.append(child)
self.getBestGenomeWithSize(listGenomes).copyGene(bestParent)
if(self.totalGeneration % 1000 == 0):
self.display(bestParent.genotipo, bestParent.fitness,bestParent.noiseFitness,self.totalGeneration)
# For debugging: Show the mutations
# ind = 0
# for i,j in zip(bestParent.genotipo,reference.genotipo):
# if i != j:
# print(ind+4,':',j,'->',i)
# ind += 1
# bestParent.copyGene(reference)
# print('\n')
if(self.totalGeneration>=self.maxGeneration):
break
if (bestParent.fitness >= 1): # if the child fitness is 1 or the we have more than 10000 tries of mutation to that child, end the algorithm
ffc += 1
if (ffc == 10000):
self.display(bestParent.genotipo,bestParent.fitness,bestParent.noiseFitness,self.totalGeneration)
break
bestParent.withdraw_deadGenes()
print('Active part of the genotype of the last genome', bestParent.indexActiveGenes)
timeDiff = datetime.datetime.now() - self.startTime
print("The end in: ",str(timeDiff))
def evolutionPerformanceDiff(self):
bestParent = Genoma()
bestParent.generate_parent() # Generate the first generation (The first Parent)
# set the same parent to the evolution with the new Mutate()
bestParent_improved = Genoma()
bestParent_improved.setGenotipo(bestParent.genotipo)
bestParent.calculateFitness()
bestParent.calculateNoiseFitness()
self.display(bestParent.genotipo, bestParent.fitness,bestParent.noiseFitness, self.totalGeneration)
listGenomes = []
ffc = 0
reference = Genoma()
bestParent.copyGene(reference)
nd = 1
while True:
self.totalGeneration = self.totalGeneration + 1
listGenomes.clear()
cf = 0
for i in range(0,nd):
bestParent.calculateFitness()
cf = cf + bestParent.fitness
bestParent.fitness = cf/nd
bestParent.calculateNoiseFitness()
listGenomes.append(bestParent)
for i in range(0, self.y):
child = Genoma()
bestParent.mutate().copyGene(child)
cf = 0
for i in range(0,nd):
child.calculateFitness()
cf = cf + child.fitness
child.fitness = cf/nd
child.calculateNoiseFitness()
listGenomes.append(child)
self.getBestGenomeWithSize(listGenomes).copyGene(bestParent)
if(self.totalGeneration % 1000 == 0):
self.display(bestParent.genotipo, bestParent.fitness,bestParent.noiseFitness,self.totalGeneration)
if(self.totalGeneration>=self.maxGeneration):
break
if (bestParent.fitness >= 1):
ffc += 1
if (ffc == 10000):
self.display(bestParent.genotipo,bestParent.fitness,bestParent.noiseFitness,self.totalGeneration)
break
bestParent.withdraw_deadGenes()
print('Active part of the genotype of the last genome', bestParent.indexActiveGenes)
timeDiff1 = datetime.datetime.now() - self.startTime
print("The end in: ",str(timeDiff1))
# THE NEW WAY ############################################################
self.totalGeneration = 0
self.startTime = datetime.datetime.now()
self.data_atual = datetime.datetime.today()
bestParent_improved.identify_deadGenes()
bestParent_improved.calculateFitnessNew()
bestParent_improved.calculateNoiseFitness()
self.display(bestParent_improved.genotipo, bestParent_improved.fitness,bestParent_improved.noiseFitness, self.totalGeneration)
listGenomes = []
ffc = 0
reference = Genoma()
bestParent_improved.copyGene(reference)
nd = 1
while True:
self.totalGeneration = self.totalGeneration + 1
listGenomes.clear()
cf = 0
for i in range(0,nd):
bestParent_improved.identify_deadGenes()
bestParent_improved.calculateFitnessNew()
cf = cf + bestParent_improved.fitness
bestParent_improved.fitness = cf/nd
bestParent_improved.calculateNoiseFitness()
listGenomes.append(bestParent_improved)
for i in range(0, self.y):
child = Genoma()
bestParent_improved.mutate().copyGene(child)
cf = 0
for i in range(0,nd):
child.identify_deadGenes()
child.calculateFitnessNew()
cf = cf + child.fitness
child.fitness = cf/nd
child.calculateNoiseFitness()
listGenomes.append(child)
self.getBestGenomeWithSize(listGenomes).copyGene(bestParent_improved)
if(self.totalGeneration % 1000 == 0):
self.display(bestParent_improved.genotipo, bestParent_improved.fitness,bestParent_improved.noiseFitness,self.totalGeneration)
if(self.totalGeneration>=self.maxGeneration):
break
if (bestParent_improved.fitness >= 1):
ffc += 1
if (ffc == 10000):
self.display(bestParent_improved.genotipo,bestParent_improved.fitness,bestParent_improved.noiseFitness,self.totalGeneration)
break
timeDiff2 = datetime.datetime.now() - self.startTime
print("The end in: ",str(timeDiff2))
print(timeDiff1 - timeDiff2)
print(timeDiff2/timeDiff1)
def back_foward_propagation_Proof(self,attempts):
backPropagationCounter=0
flagBackPropagation = True
flagFowardPropagation = True
fowardPropagationCounter=0
x=0
while(x<attempts):
x+=1
bestParent = Genoma()
bestParent.generate_parent()
bestParent.identify_deadGenes()
bestParent.withdraw_deadGenes()
#print("ToEvaluate: ", bestParent.ToEvaluate)
#print("indexActiveGenes: ", bestParent.indexActiveGenes)
for i in range(0,bestParent.numberOfGenes):
#print("i: ",i,"bestParent.ToEvaluate[i]: ",bestParent.ToEvaluate[i]," ")
if(bestParent.ToEvaluate[i]):
if(not(i in bestParent.indexActiveGenes)):
flagBackPropagation = False
break
else:
if(i in bestParent.indexActiveGenes):
flagBackPropagation = False
break
if(flagBackPropagation):
backPropagationCounter+=1
else:
flagBackPropagation = True
bestParent.calculateFitness()
fitnessOld = bestParent.fitness
bestParent.calculateFitnessNew()
fitnessNew = bestParent.fitness
if(fitnessOld == fitnessNew):
fowardPropagationCounter+=1
print("Backpropagation Robustness: ")
print("Counter: ", backPropagationCounter)
print("Attempts: ", attempts)
print("Accuracy: ", backPropagationCounter/attempts)
print(" ")
print("Fowardpropagation Robustness: ")
print("Counter: ", fowardPropagationCounter)
print("Attempts: ", attempts)
print("Accuracy: ", fowardPropagationCounter/attempts)
print(" ")
def deep_foward_propagation_Proof(self):
fowardPropagationCounter = 0
attempts = 0
flagFowardPropagation = True
listGenomes = []
bestParent = Genoma()
bestParent.generate_parent() # Generate the first generation (The first Parent)
bestParent.calculateFitness() # Get the first generation fitness
fitnessOld = bestParent.fitness
bestParent.identify_deadGenes()
bestParent.calculateFitnessNew()
fitnessNew = bestParent.fitness
if(fitnessOld != fitnessNew):
attempts+=1
flagFowardPropagation = False
attempts+=1
if(flagFowardPropagation):
fowardPropagationCounter+=1
while True:
self.totalGeneration = self.totalGeneration + 1
listGenomes.clear()
listGenomes.append(bestParent)
for i in range(0, self.y):
child = Genoma()
bestParent.mutate().copyGene(child)
child.calculateFitness()
fitnessOld = child.fitness
child.identify_deadGenes()
child.calculateFitnessNew()
fitnessNew = child.fitness
listGenomes.append(child)
attempts+=1
if(fitnessOld == fitnessNew):
fowardPropagationCounter+=1
self.getBestGenome(listGenomes).copyGene(bestParent)
if(self.totalGeneration>=self.maxGeneration):
break
if (bestParent.fitness >= 1): # if the child fitness is 1 or the we have more than 10000 tries of mutation to that child, end the algorithm
print("We did it! The fitness arrived in: ", bestParent.fitness)
print("Fitness: ")
print("Fowardpropagation Robustness: ")
print("Counter: ", fowardPropagationCounter)
print("Attempts: ", attempts)
print("Accuracy: ", fowardPropagationCounter/attempts) | PypiClean |
/GraphQL_core_next-1.1.1-py3-none-any.whl/graphql/validation/specified_rules.py | from ..pyutils import FrozenList
from .rules import RuleType
# Spec Section: "Executable Definitions"
from .rules.executable_definitions import ExecutableDefinitionsRule
# Spec Section: "Operation Name Uniqueness"
from .rules.unique_operation_names import UniqueOperationNamesRule
# Spec Section: "Lone Anonymous Operation"
from .rules.lone_anonymous_operation import LoneAnonymousOperationRule
# Spec Section: "Subscriptions with Single Root Field"
from .rules.single_field_subscriptions import SingleFieldSubscriptionsRule
# Spec Section: "Fragment Spread Type Existence"
from .rules.known_type_names import KnownTypeNamesRule
# Spec Section: "Fragments on Composite Types"
from .rules.fragments_on_composite_types import FragmentsOnCompositeTypesRule
# Spec Section: "Variables are Input Types"
from .rules.variables_are_input_types import VariablesAreInputTypesRule
# Spec Section: "Leaf Field Selections"
from .rules.scalar_leafs import ScalarLeafsRule
# Spec Section: "Field Selections on Objects, Interfaces, and Unions Types"
from .rules.fields_on_correct_type import FieldsOnCorrectTypeRule
# Spec Section: "Fragment Name Uniqueness"
from .rules.unique_fragment_names import UniqueFragmentNamesRule
# Spec Section: "Fragment spread target defined"
from .rules.known_fragment_names import KnownFragmentNamesRule
# Spec Section: "Fragments must be used"
from .rules.no_unused_fragments import NoUnusedFragmentsRule
# Spec Section: "Fragment spread is possible"
from .rules.possible_fragment_spreads import PossibleFragmentSpreadsRule
# Spec Section: "Fragments must not form cycles"
from .rules.no_fragment_cycles import NoFragmentCyclesRule
# Spec Section: "Variable Uniqueness"
from .rules.unique_variable_names import UniqueVariableNamesRule
# Spec Section: "All Variable Used Defined"
from .rules.no_undefined_variables import NoUndefinedVariablesRule
# Spec Section: "All Variables Used"
from .rules.no_unused_variables import NoUnusedVariablesRule
# Spec Section: "Directives Are Defined"
from .rules.known_directives import KnownDirectivesRule
# Spec Section: "Directives Are Unique Per Location"
from .rules.unique_directives_per_location import UniqueDirectivesPerLocationRule
# Spec Section: "Argument Names"
from .rules.known_argument_names import KnownArgumentNamesRule
# Spec Section: "Argument Uniqueness"
from .rules.unique_argument_names import UniqueArgumentNamesRule
# Spec Section: "Value Type Correctness"
from .rules.values_of_correct_type import ValuesOfCorrectTypeRule
# Spec Section: "Argument Optionality"
from .rules.provided_required_arguments import ProvidedRequiredArgumentsRule
# Spec Section: "All Variable Usages Are Allowed"
from .rules.variables_in_allowed_position import VariablesInAllowedPositionRule
# Spec Section: "Field Selection Merging"
from .rules.overlapping_fields_can_be_merged import OverlappingFieldsCanBeMergedRule
# Spec Section: "Input Object Field Uniqueness"
from .rules.unique_input_field_names import UniqueInputFieldNamesRule
# Schema definition language:
from .rules.lone_schema_definition import LoneSchemaDefinitionRule
from .rules.unique_operation_types import UniqueOperationTypesRule
from .rules.unique_type_names import UniqueTypeNamesRule
from .rules.unique_enum_value_names import UniqueEnumValueNamesRule
from .rules.unique_field_definition_names import UniqueFieldDefinitionNamesRule
from .rules.unique_directive_names import UniqueDirectiveNamesRule
from .rules.possible_type_extensions import PossibleTypeExtensionsRule
from .rules.known_argument_names import KnownArgumentNamesOnDirectivesRule
from .rules.provided_required_arguments import ProvidedRequiredArgumentsOnDirectivesRule
__all__ = ["specified_rules", "specified_sdl_rules"]
# This list includes all validation rules defined by the GraphQL spec.
#
# The order of the rules in this list has been adjusted to lead to the
# most clear output when encountering multiple validation errors.
specified_rules: FrozenList[RuleType] = FrozenList(
[
ExecutableDefinitionsRule,
UniqueOperationNamesRule,
LoneAnonymousOperationRule,
SingleFieldSubscriptionsRule,
KnownTypeNamesRule,
FragmentsOnCompositeTypesRule,
VariablesAreInputTypesRule,
ScalarLeafsRule,
FieldsOnCorrectTypeRule,
UniqueFragmentNamesRule,
KnownFragmentNamesRule,
NoUnusedFragmentsRule,
PossibleFragmentSpreadsRule,
NoFragmentCyclesRule,
UniqueVariableNamesRule,
NoUndefinedVariablesRule,
NoUnusedVariablesRule,
KnownDirectivesRule,
UniqueDirectivesPerLocationRule,
KnownArgumentNamesRule,
UniqueArgumentNamesRule,
ValuesOfCorrectTypeRule,
ProvidedRequiredArgumentsRule,
VariablesInAllowedPositionRule,
OverlappingFieldsCanBeMergedRule,
UniqueInputFieldNamesRule,
]
)
specified_rules.__doc__ = """\
This list includes all validation rules defined by the GraphQL spec.
The order of the rules in this list has been adjusted to lead to the
most clear output when encountering multiple validation errors.
"""
specified_sdl_rules: FrozenList[RuleType] = FrozenList(
[
LoneSchemaDefinitionRule,
UniqueOperationTypesRule,
UniqueTypeNamesRule,
UniqueEnumValueNamesRule,
UniqueFieldDefinitionNamesRule,
UniqueDirectiveNamesRule,
KnownTypeNamesRule,
KnownDirectivesRule,
UniqueDirectivesPerLocationRule,
PossibleTypeExtensionsRule,
KnownArgumentNamesOnDirectivesRule,
UniqueArgumentNamesRule,
UniqueInputFieldNamesRule,
ProvidedRequiredArgumentsOnDirectivesRule,
]
)
specified_sdl_rules.__doc__ = """This list includes all rules for validating SDL.""" | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.