src
stringlengths 721
1.04M
|
|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 17:32:58 2020
@author: dukel
"""
#%% 00. Initialize
import matplotlib.pyplot as plt
ls_x = [1,2,3,4]
ls_y = [1,4,9,16]
ls_z = [2,3,4,5]
#%% I. Basic Plotting
plt.plot(ls_x, # x
ls_y, # y
color='green',
linestyle='dashed',
label='dashed') #to add to legend
plt.plot([2,3,4,5], # x
[2,3,4,5], # y
color='#2B5B84',
linestyle='dashdot',
label='dashed-dot') #to add to legend
# options to show
plt.title('TESTING123!@#') #title
plt.xlabel('lol whats up') #x axis label
plt.ylabel('Important Figures') #y axis label
plt.legend()
plt.show()
#%% II. Subplots
## FIRST PANEL
plt.subplot(2, #rows
1, #columns
1) #1st panel <--- THIS IS KEY
plt.plot(ls_x,
ls_y,
color='green',
linestyle='dashdot')
## SECOND PANEL
plt.subplot(2, #rows
1, #columns
2) #1st panel <--- THIS IS KEY
plt.plot(ls_z,
ls_z,
color='#2B5B84',
linestyle='dashed')
## plt.show()
plt.show()
#%% III. Setting axis limits
## FIRST PANEL
panel_1 = plt.subplot(2,1,1)
plt.plot(ls_x, ls_y, color='green', linestyle='dashdot')
panel_1.set_xlim([0,6]) # set boundaries, aka limits, to x-axis
panel_1.set_xlim([0,20])
## SECOND PANEL
panel_2 = plt.subplot(2,1,2)
plt.plot(ls_z, ls_z, color='#2B5B84', linestyle='dashed')
panel_2.set_xlim([0,6])
plt.show()
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new activity groups.
To determine which activity groups exist, run get_all_activity_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ActivityGroupService.createActivityGroups
"""
__author__ = ('Vincent Tsao',
'Joseph DiLallo')
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the advertiser company this activity group is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_company_id):
# Initialize appropriate service.
activity_group_service = client.GetService('ActivityGroupService',
version='v201403')
# Create a short-term activity group.
short_term_activity_group = {
'name': 'Short-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '1',
'impressionsLookback': '1'
}
# Create a long-term activity group.
long_term_activity_group = {
'name': 'Long-term activity group #%s' % uuid.uuid4(),
'companyIds': [advertiser_company_id],
'clicksLookback': '30',
'impressionsLookback': '30'
}
# Create the activity groups on the server.
activity_groups = activity_group_service.createActivityGroups([
short_term_activity_group, long_term_activity_group])
# Display results.
for activity_group in activity_groups:
print ('Activity group with ID \'%s\' and name \'%s\' was created.'
% (activity_group['id'], activity_group['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ADVERTISER_COMPANY_ID)
|
#!/usr/bin/env python
"""
setup.py
========
This is a generic as possible setup.py template. The goal is to retrieve almost
all of the information from the main module file, rather than relying on values
explicitly entered here.
## Usage
This setup.py script needs to modified in the following ways:
- `MAIN_FILE` needs to be pointed at the main metadata file, this can be done
easily by modifyng the second arg.
- `setup` kwargs need to be modified:
- `classifiers` needs to be modified to suit your project.
- `keywords` needs to be modified to suit your project.
- If you have files that need to be included (such as `LICENSE`, you need to
create a MANIFEST.in file and `include FILENAME` them.
Other than that, all the metadata should live in your main file, just like
the example below.
## Metadata Example
The following should be placed in your project module's __init__.py file:
::
__author__ = "Ivan Busquets"
__author_email__ = "ivanbusquets@gmail.com"
__copyright__ = "Copyright 2011, Ivan Busquets"
__credits__ = ["Ivan Busquets", "Sean Wallitsch", ]
__license__ = "MIT"
__version__ = "1.2"
__maintainer__ = "Sean Wallitsch"
__maintainer_email__ = "sean@grenadehop.com"
__module_name__ = "animatedSnap3D"
__short_desc__ = "An extension to Nuke's 'snap' options for animated verts"
__status__ = "Development"
__url__ = 'http://github.com/ThoriumGroup/animatedSnap3D'
Note: At this time `credits` is unused.
"""
# ==============================================================================
# IMPORTS
# ==============================================================================
from setuptools import setup, find_packages
import codecs
import os
import re
# ==============================================================================
# GLOBALS
# ==============================================================================
HERE = os.path.abspath(os.path.dirname(__file__))
MAIN_FILE = os.path.join(HERE, 'thorium', '__init__.py')
# Get the long description from the relevant file
with codecs.open('README.rst', encoding='utf-8') as readme_file:
LONG_DESCRIPTION = readme_file.read()
# ==============================================================================
# PRIVATE FUNCTIONS
# ==============================================================================
def _find_metadata(filepath):
"""Reads all the metadata from a source file by opening manually.
Why open and read it and not import?
https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
Args:
filepath : (str)
Filepath to the file containing the metadata.
Returns:
{str: str}
Dictionary with metadata keys and values.
Raises:
RuntimeError
Cannot proceed if version or module_name not found
"""
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(filepath, 'r', 'latin1') as meta_file:
metadata_file = meta_file.read()
metadata = {}
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
author_email_match = re.search(r"^__author_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
copyright_match = re.search(r"^__copyright__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
credits_match = re.search(r"^__credits__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
license_match = re.search(r"^__license__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_match = re.search(r"^__maintainer__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
maint_email_match = re.search(r"^__maintainer_email__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
module_name_match = re.search(r"^__module_name__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
short_desc_match = re.search(r"^__short_desc__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
status_match = re.search(r"^__status__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
url_match = re.search(r"^__url__ = ['\"]([^'\"]*)['\"]",
metadata_file, re.M)
if not version_match or not module_name_match:
raise RuntimeError("Unable to find version or module_name string.")
if author_match:
metadata['author'] = author_match.group(1)
if author_email_match:
metadata['author_email'] = author_email_match.group(1)
if copyright_match:
metadata['copyright'] = copyright_match.group(1)
if credits_match:
metadata['credits'] = credits_match.group(1)
if license_match:
metadata['license'] = license_match.group(1)
if maint_match:
metadata['maintainer'] = maint_match.group(1)
if maint_email_match:
metadata['maintainer_email'] = maint_email_match.group(1)
if module_name_match:
metadata['module_name'] = module_name_match.group(1)
if short_desc_match:
metadata['short_desc'] = short_desc_match.group(1)
if status_match:
metadata['status'] = status_match.group(1)
if version_match:
metadata['version'] = version_match.group(1)
if url_match:
metadata['url'] = url_match.group(1)
return metadata
# ==============================================================================
# MAIN
# ==============================================================================
metadata = _find_metadata(MAIN_FILE)
setup(
name=metadata['module_name'],
version=metadata['version'],
description=metadata.get('short_desc', ''),
long_description=LONG_DESCRIPTION,
# The project URL.
url=metadata.get('url', ''),
# Author & Maintainer details
author=metadata.get('author', ''),
author_email=metadata.get('author_email', ''),
maintainer=metadata.get('maintainer', ''),
maintainer_email=metadata.get('maintainer_email', ''),
# Choose your license
license=metadata.get('license', ''),
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
# OS
'Operating System :: OS Independent',
# Language
'Natural Language :: English',
],
# What does your project relate to?
keywords='film tv color vfx nuke',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=['tests']),
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
include_package_data=True,
# Targeted OS
platforms='any',
)
|
# -*- coding: UTF-8 -*-
"""
``trinity_statistics``
-----------------------------------------------------------------
:Authors: Menachem Sklarz
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
A class that defines a module for running ``abundance_estimates_to_matrix.pl`` on genes or isoforms counts tables produced by ``align_and_estimate_abundance.pl``
See the script documentation `here <https://github.com/trinityrnaseq/trinityrnaseq/wiki/Trinity-Transcript-Quantification#building-expression-matrices>`_.
This conversion makes sense at the project level - combining all sample matrices into a single, normalized, comparison table. However, for completeness, we included a sample scope option for running the script in each sample separately.
.. Note:: ``scope`` is not defined for this module. It only makes sense to run ``abundance_estimates_to_matrix`` when comparing many samples against a single assembly
Requires
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Either ``genes.results`` or ``isoforms.results`` files in the following slots:
* ``sample_data[<sample>]["genes.results"]``
* ``sample_data[<sample>]["isoforms.results"]``
Output:
~~~~~~~~~~~~~
* Creates the following files in the following slots:
* ``<project>.counts.matrix`` in ``self.sample_data["project_data"]["counts.matrix"]``
* ``<project>.not_cross_norm.fpkm.tmp`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp"]``
* ``<project>.not_cross_norm.fpkm.tmp.TMM_info.txt`` in ``self.sample_data["project_data"]["not_cross_norm.fpkm.tmp.TMM_info.txt"]``
* ``<project>.TMM.fpkm.matrix`` in ``self.sample_data["project_data"]["TMM.fpkm.matrix"]``
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table::
:header: "Parameter", "Values", "Comments"
"use_genes", "", "Use 'genes.results' matrix. If not passed, use 'isoforms.results'"
"redirects: --gene_trans_map", "path or 'none'", "If path, use path as gene_trans_map for all samples. If 'none', does not produce gene level estimates. **In order to use an internal gene_trans_map, do not pass this parameter!**"
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
trin_map_stats:
module: trinity_statistics
base: trin_map1
script_path: /path/to/abundance_estimates_to_matrix.pl
use_genes:
redirects:
--est_method: RSEM
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Grabherr, M.G., Haas, B.J., Yassour, M., Levin, J.Z., Thompson, D.A., Amit, I., Adiconis, X., Fan, L., Raychowdhury, R., Zeng, Q. and Chen, Z., 2011. **Trinity: reconstructing a full-length transcriptome without a genome from RNA-Seq data**. *Nature biotechnology*, 29(7), p.644.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Menachem Sklarz"
__version__ = "1.6.0"
class Step_trinity_statistics(Step):
def step_specific_init(self):
self.shell = "bash" # Can be set to "bash" by inheriting instances
self.file_tag = "trin_stats"
if "use_genes" not in self.params:
self.write_warning("'use_genes' not passed. Using 'isoforms.results' matrix")
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
Here you should do testing for dependency output. These will NOT exist at initiation of this instance. They are set only following sample_data updating
"""
# In new version, --gene_trans_map is compulsory! Adding
# If not passed:
# If one exists, use it.
# Otherwise, specify "none"
# If passed:
# If with value, use the value and set project "gene_trans_map" to value
# Otherwise, use existing
if "--gene_trans_map" not in self.params["redir_params"]:
if "gene_trans_map" in self.sample_data["project_data"]:
self.params["redir_params"]["--gene_trans_map"] = self.sample_data["project_data"]["gene_trans_map"]
self.use_gene_trans_map = True
else:
self.params["redir_params"]["--gene_trans_map"] = "none"
self.use_gene_trans_map = False
else: # --gene_trans_map is defined in redir_params
if self.params["redir_params"]["--gene_trans_map"] == None:
raise AssertionExcept("You passed --gene_trans_map with no value. Please specify path or 'none'")
elif self.params["redir_params"]["--gene_trans_map"] == "none":
self.use_gene_trans_map = False
else:
self.sample_data["project_data"]["gene_trans_map"] = self.params["redir_params"]["--gene_trans_map"]
self.use_gene_trans_map = True
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
def build_scripts(self):
# Name of specific script:
self.spec_script_name = self.set_spec_script_name()
self.script = ""
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(self.base_dir)
prefix = self.sample_data["Title"]
self.script += self.get_script_const()
self.script += "--out_prefix %s \\\n\t" % os.sep.join([use_dir, prefix])
# type2use is 'genes.results' or 'isoforms.results'. This is used to then select the correct slot from "mapping"
type2use = "genes.results" if "use_genes" in list(self.params.keys()) else "isoforms.results"
for sample in self.sample_data["samples"]:
try:
self.script += "%s \\\n\t" % self.sample_data[sample][type2use]
except:
raise AssertionExcept("file type %s does not exist for sample." % type2use, sample)
self.script = self.script.rstrip("\\\n\t")
self.script += "\n\n"
if not "version" in self.params or self.params["version"].lower() == "new":
# Storing all output files even though probably not very useful downstream...
self.sample_data["project_data"]["isoform.raw_counts"] = os.sep.join([self.base_dir, "%s.isoform.counts.matrix" % prefix])
self.sample_data["project_data"]["isoform.norm_counts"] = os.sep.join([self.base_dir, "%s.isoform.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["isoform.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["isoform.norm_counts"])
if(self.use_gene_trans_map): # True when --gene_trans_map is not "none"
self.sample_data["project_data"]["gene.raw_counts"] = os.sep.join([self.base_dir, "%s.gene.counts.matrix" % prefix])
self.sample_data["project_data"]["gene.norm_counts"] = os.sep.join([self.base_dir, "%s.gene.TPM.not_cross_norm" % prefix])
self.stamp_file(self.sample_data["project_data"]["gene.raw_counts"] )
self.stamp_file(self.sample_data["project_data"]["gene.norm_counts"])
else:
self.write_warning("Not storing output files for old version of trinity. "
"If required, load the appropriate files with a 'manage_types' module")
# Move all files from temporary local dir to permanent base_dir
# Sees to copying local files to final destination (and other stuff)
self.local_finish(use_dir,self.base_dir)
self.create_low_level_script()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os;
import sys;
import datetime;
import string;
import shutil;
EPSILON = 1e-10;
Product_volume_dict = {};
START_TIME='20:55:00';
END_TIME='02:45:00';
if len(sys.argv) != 3:
print 'Usage: ./cmd src_md_dir dest_dir';
quit();
src_md_path = sys.argv[1];
dest_dir = sys.argv[2];
from_date_dir = datetime.date.today().strftime('%Y%m%d');
to_date_dir = from_date_dir;
date_dir = from_date_dir;
while date_dir <= to_date_dir:
if not os.path.exists(src_md_path+'/'+date_dir):
date_dir = (datetime.datetime.strptime(date_dir,'%Y%m%d')+datetime.timedelta(1)).strftime('%Y%m%d');
continue;
Product_volume_dict = {};
dest_night_dir = dest_dir+'/'+date_dir;
if not os.path.exists(dest_night_dir):
os.mkdir(dest_night_dir);
night_md_path = src_md_path+'/'+date_dir;
night_md_list = os.listdir(night_md_path);
for night_md_file in night_md_list:
if night_md_file.find('_n.csv') == -1:
continue;
night_md_file_path = night_md_path+'/'+night_md_file;
dest_night_md_path = dest_night_dir+'/'+night_md_file;
shutil.copyfile(md_file_zip_path, dest_night_md_path);
os.system('gzip -d '+dest_night_md_path);
dest_night_md_path = dest_night_md_path[0:len(dest_night_md_path)-3];
digit_index=0;
for c in md_file_zip:
if c>='0' and c<='9':
break;
digit_index = digit_index+1;
if digit_index == len(md_file_zip):
os.remove(dest_night_md_path);
continue;
product_name = md_file_zip[0:digit_index];
#print product_name;
#continue;
dest_fp = open(dest_night_md_path, 'r');
md_lines = dest_fp.readlines();
dest_fp.close();
#print dest_night_md_path,':',len(md_lines);
if len(md_lines) == 0:
os.remove(dest_night_md_path);
continue;
begin_line = md_lines[0];
begin_line_list = begin_line.split(',');
close_line = md_lines[-1];
close_line_list = close_line.split(',');
if begin_line_list[1]>START_TIME or close_line_list[1]<END_TIME:
os.remove(dest_night_md_path);
continue;
close_volume = string.atof(close_line_list[3]);
volumes_list = Product_volume_dict.get(product_name);
if volumes_list == None:
volumes_list = [];
volumes_list.append(close_volume);
Product_volume_dict[product_name] = volumes_list;
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
elif len(volumes_list)==1:
if close_volume>volumes_list[0]:
volumes_list.append(volumes_list[0]);
volumes_list[0] = close_volume;
os.rename(dest_night_dir+'/'+product_name+'.csv',dest_night_dir+'/'+product_name+'2.csv');
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
else:
volumes_list.append(close_volume);
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'2.csv');
else:
if close_volume>volumes_list[0]:
volumes_list[1] = volumes_list[0];
volumes_list[0] = close_volume;
os.rename(dest_night_dir+'/'+product_name+'.csv',dest_night_dir+'/'+product_name+'2.csv');
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
elif close_volume>volumes_list[1]:
volumes_list[1] = close_volume;
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'2.csv');
else:
os.remove(dest_night_md_path);
print 'Finish processing',date_dir;
date_dir = (datetime.datetime.strptime(date_dir,'%Y%m%d')+datetime.timedelta(1)).strftime('%Y%m%d');
|
# coding: utf-8
"""Wrapper for C API of LightGBM."""
import ctypes
import json
import os
import warnings
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
from logging import Logger
from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Set, Union
import numpy as np
import scipy.sparse
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, is_dtype_sparse, pd_DataFrame, pd_Series
from .libpath import find_lib_path
class _DummyLogger:
def info(self, msg):
print(msg)
def warning(self, msg):
warnings.warn(msg, stacklevel=3)
_LOGGER = _DummyLogger()
def register_logger(logger):
"""Register custom logger.
Parameters
----------
logger : logging.Logger
Custom logger.
"""
if not isinstance(logger, Logger):
raise TypeError("Logger should inherit logging.Logger class")
global _LOGGER
_LOGGER = logger
def _normalize_native_string(func):
"""Join log messages from native library which come by chunks."""
msg_normalized = []
@wraps(func)
def wrapper(msg):
nonlocal msg_normalized
if msg.strip() == '':
msg = ''.join(msg_normalized)
msg_normalized = []
return func(msg)
else:
msg_normalized.append(msg)
return wrapper
def _log_info(msg):
_LOGGER.info(msg)
def _log_warning(msg):
_LOGGER.warning(msg)
@_normalize_native_string
def _log_native(msg):
_LOGGER.info(msg)
def _log_callback(msg):
"""Redirect logs from native library into Python."""
_log_native(str(msg.decode('utf-8')))
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8'))
return lib
_LIB = _load_lib()
NUMERIC_TYPES = (int, float, bool)
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_numpy_column_array(data):
"""Check whether data is a column numpy array."""
if not isinstance(data, np.ndarray):
return False
shape = data.shape
return len(shape) == 2 and shape[1] == 1
def cast_numpy_1d_array_to_dtype(array, dtype):
"""Cast numpy 1d array to given dtype."""
if array.dtype == dtype:
return array
return array.astype(dtype=dtype, copy=False)
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
return cast_numpy_1d_array_to_dtype(data, dtype)
elif is_numpy_column_array(data):
_log_warning('Converting column-vector to 1d array')
array = data.ravel()
return cast_numpy_1d_array_to_dtype(array, dtype)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, pd_Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list, numpy 1-D array or pandas Series")
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return f"[{','.join(map(str, x))}]"
else:
return str(x)
pairs.append(f"{key}={','.join(map(to_string, val))}")
elif isinstance(val, (str, NUMERIC_TYPES)) or is_numeric(val):
pairs.append(f"{key}={val}")
elif val is not None:
raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}')
return ' '.join(pairs)
class _TempFile:
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
class _ConfigAliases:
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"local_listen_port": {"local_listen_port",
"local_port",
"port"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"num_machines": {"num_machines",
"num_machine"},
"num_threads": {"num_threads",
"num_thread",
"nthread",
"nthreads",
"n_jobs"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"tree_learner": {"tree_learner",
"tree",
"tree_type",
"tree_learner_type"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]:
"""Get a single parameter value, accounting for aliases.
Parameters
----------
main_param_name : str
Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``.
params : dict
Dictionary of LightGBM parameters.
default_value : Any
Default value to use for the parameter, if none is found in ``params``.
Returns
-------
params : dict
A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed.
If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred.
"""
# avoid side effects on passed-in parameters
params = deepcopy(params)
# find a value, and remove other aliases with .pop()
# prefer the value of 'main_param_name' if it exists, otherwise search the aliases
found_value = None
if main_param_name in params.keys():
found_value = params[main_param_name]
for param in _ConfigAliases.get(main_param_name):
val = params.pop(param, None)
if found_value is None and val is not None:
found_value = val
if found_value is not None:
params[main_param_name] = found_value
else:
params[main_param_name] = default_value
return params
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, pd_DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
bad_index_cols_str = ', '.join(data.columns[bad_indices])
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
f"{bad_index_cols_str}")
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, pd_DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy)
pandas_str = f'\npandas_categorical:{categorical_json}\n'
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -os.path.getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = lines[-1].decode('utf-8').strip()
if not last_line.startswith(pandas_key):
last_line = lines[-2].decode('utf-8').strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class _InnerPredictor:
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediciton.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, str):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, dt_DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
_log_warning('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError(f'Cannot predict data for type {type(data).__name__}')
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if is_reshape and not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})')
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n'
'You can split your data into chunks'
'and then concatenate predictions for them')
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"linear_tree",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, str):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, str):
sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range(len(used_indices)):
for j in range(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
for i in range(num_data):
for j in range(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items():
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, str) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, int):
categorical_indices.add(name)
else:
raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature")
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
_log_warning(f'{cat_alias} in param dict is overridden.')
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, str):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif isinstance(data, dt_DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}')
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
_log_warning("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError(f'Wrong predictor type {type(predictor).__name__}')
# set feature names
return self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int32(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
_log_warning('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self
def _update_params(self, params):
if not params:
return self
params = deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception(f"Cannot set {field_name} before construct dataset")
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})")
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception(f"Cannot get {field_name} before construct Dataset")
tmp_out_len = ctypes.c_int(0)
out_type = ctypes.c_int(0)
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
_log_warning('Using categorical_feature in Dataset.')
return self
else:
_log_warning('categorical_feature in Dataset is overridden.\n'
f'New categorical_feature is {sorted(list(categorical_feature))}')
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match")
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, pd_DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, dt_DataTable):
self.data = self.data[self.used_indices, :]
else:
_log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n"
"Returning original raw data")
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
was_none = self.data is None
old_self_data_type = type(self.data).__name__
if other.data is None:
self.data = None
elif self.data is not None:
if isinstance(self.data, np.ndarray):
if isinstance(other.data, np.ndarray):
self.data = np.hstack((self.data, other.data))
elif scipy.sparse.issparse(other.data):
self.data = np.hstack((self.data, other.data.toarray()))
elif isinstance(other.data, pd_DataFrame):
self.data = np.hstack((self.data, other.data.values))
elif isinstance(other.data, dt_DataTable):
self.data = np.hstack((self.data, other.data.to_numpy()))
else:
self.data = None
elif scipy.sparse.issparse(self.data):
sparse_format = self.data.getformat()
if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data):
self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format)
elif isinstance(other.data, pd_DataFrame):
self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format)
elif isinstance(other.data, dt_DataTable):
self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format)
else:
self.data = None
elif isinstance(self.data, pd_DataFrame):
if not PANDAS_INSTALLED:
raise LightGBMError("Cannot add features to DataFrame type of raw data "
"without pandas installed. "
"Install pandas and restart your session.")
if isinstance(other.data, np.ndarray):
self.data = concat((self.data, pd_DataFrame(other.data)),
axis=1, ignore_index=True)
elif scipy.sparse.issparse(other.data):
self.data = concat((self.data, pd_DataFrame(other.data.toarray())),
axis=1, ignore_index=True)
elif isinstance(other.data, pd_DataFrame):
self.data = concat((self.data, other.data),
axis=1, ignore_index=True)
elif isinstance(other.data, dt_DataTable):
self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())),
axis=1, ignore_index=True)
else:
self.data = None
elif isinstance(self.data, dt_DataTable):
if isinstance(other.data, np.ndarray):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data)))
elif scipy.sparse.issparse(other.data):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray())))
elif isinstance(other.data, pd_DataFrame):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values)))
elif isinstance(other.data, dt_DataTable):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy())))
else:
self.data = None
else:
self.data = None
if self.data is None:
err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to "
f"{old_self_data_type} type of raw data.\n")
err_msg += ("Set free_raw_data=False when construct Dataset to avoid this"
if was_none else "Freeing raw data")
_log_warning(err_msg)
self.feature_name = self.get_feature_name()
_log_warning("Reseting categorical features.\n"
"You can set new categorical features via ``set_categorical_feature`` method")
self.categorical_feature = "auto"
self.pandas_categorical = None
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(filename)))
return self
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
params = _choose_param_value(
main_param_name="machines",
params=params,
default_value=None
)
# if "machines" is given, assume user wants to do distributed learning, and set up network
if params["machines"] is None:
params.pop("machines", None)
else:
machines = params["machines"]
if isinstance(machines, str):
num_machines_from_machine_list = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines_from_machine_list = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
params = _choose_param_value(
main_param_name="num_machines",
params=params,
default_value=num_machines_from_machine_list
)
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=12400
)
self.set_network(
machines=machines,
local_listen_port=params["local_listen_port"],
listen_time_out=params.get("time_out", 120),
num_machines=params["num_machines"]
)
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(
self,
machines: Union[List[str], Set[str], str],
local_listen_port: int = 12400,
listen_time_out: int = 120,
num_machines: int = 1
) -> "Booster":
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for distributed learning application.
Returns
-------
self : Booster
Booster with set network.
"""
if isinstance(machines, (list, set)):
machines = ','.join(machines)
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
The returned DataFrame has the following columns.
- ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree".
- ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc.
- ``node_index`` : string, unique identifier for a node.
- ``left_child`` : string, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes.
- ``right_child`` : string, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes.
- ``parent_index`` : string, ``node_index`` of this node's parent. ``None`` for the root node.
- ``split_feature`` : string, name of the feature used for splitting. ``None`` for leaf nodes.
- ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes.
- ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes.
- ``decision_type`` : string, logical operator describing how to compare a value to ``threshold``.
For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that
records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes.
- ``missing_direction`` : string, split direction that missing values should go to. ``None`` for leaf nodes.
- ``missing_type`` : string, describes what types of values are treated as missing.
- ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate.
- ``weight`` : float64 or int64, sum of hessian (second-order derivative of objective), summed over observations that fall in this node.
- ``count`` : int64, number of records in the training data that fall into this node.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed. '
'You must install pandas and restart your session to use this method.')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = f'{tree_index}-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = tree.get('split_index' if is_split else 'leaf_index', 0)
return f"{tree_num}{node_type}{node_num}"
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return pd_DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}')
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
Score is returned before any transformation,
e.g. it is raw margin instead of probability of positive class for binary task.
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of score for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of score for each sample point.
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError(f"Lengths of gradient({len(grad)}) and hessian({len(hess)}) don't match")
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(filename)))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : string, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string, it represents the path to txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
out_is_linear = ctypes.c_bool(False)
_safe_call(_LIB.LGBM_BoosterGetLinear(
self.handle,
ctypes.byref(out_is_linear)))
new_params = deepcopy(self.params)
new_params["linear_tree"] = out_is_linear.value
train_set = Dataset(data, label, silent=True, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, _, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.zeros(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, str):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], str):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, int) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return pd_DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.zeros(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if callable(feval):
feval = [feval]
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
for eval_function in feval:
if eval_function is None:
continue
feval_ret = eval_function(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError(f"Wrong length of predict results for data {data_idx}")
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of eval metrics
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [
ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
self.__name_inner_eval = [
string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval)
]
self.__higher_better_inner_eval = [
name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval
]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, str):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014 Etherios, Inc. All rights reserved.
# Etherios, Inc. is a Division of Digi International.
from epoxy.component import Component, Dependency
from epoxy.configuration import YamlConfigurationLoader
from epoxy.core import ComponentManager
from epoxy import core as epoxy_core
from epoxy.settings import StringSetting
import os
import unittest
import mock
VALID_TEST_YAML = os.path.join(os.path.dirname(__file__), "test_valid.yaml")
INVALID_TEST_YAML = os.path.join(os.path.dirname(__file__), "test_invalid.yaml")
class TestComponent(Component):
_comp_instantiation_counter = 0
previous = Dependency(required=False)
name = StringSetting(required=True,
help="Name of this component")
def __init__(self):
self.is_main = False
if self.previous is None:
self.count = 0
else:
self.count = self.previous.count + 1
def main(self):
self.is_main = True
class TestDependencyGraphResolution(unittest.TestCase):
def setUp(self):
self.mgr = ComponentManager()
self.loader = YamlConfigurationLoader(VALID_TEST_YAML)
self.invalid_loader = YamlConfigurationLoader(INVALID_TEST_YAML)
self.log = mock.Mock()
epoxy_core.log = self.log
def test_graph_ordering(self):
graph = self.mgr.build_component_graph(self.loader.load_configuration())
ordering = graph.get_ordering()
o = [x.name for x in ordering]
self.assertEqual(len(ordering), 5) # our 4 plus component_manager
self.assert_(o.index("b") > o.index("a"))
self.assert_(o.index("c") > o.index("a"))
self.assert_(o.index("d") > o.index("c"))
def test_graph_construction(self):
self.mgr.launch_configuration(self.loader.load_configuration())
a = self.mgr.components["a"]
b = self.mgr.components["b"]
c = self.mgr.components["c"]
d = self.mgr.components["d"]
self.assertEqual(a.previous, None)
self.assertEqual(b.previous, a)
self.assertEqual(c.previous, a)
self.assertEqual(d.previous, c)
def test_subgraph_construction(self):
self.mgr.launch_subgraph(self.loader.load_configuration(), 'd:main')
a = self.mgr.components["a"]
with self.assertRaises(KeyError):
self.mgr.components["b"]
c = self.mgr.components["c"]
d = self.mgr.components["d"]
self.assertEqual(a.previous, None)
self.assertEqual(c.previous, a)
self.assertEqual(d.previous, c)
self.assertFalse(a.is_main)
self.assertFalse(c.is_main)
self.assertTrue(d.is_main)
def test_subgraph_bad_entry_point(self):
with self.assertRaises(AttributeError):
self.mgr.launch_subgraph(self.loader.load_configuration(), 'd:fake')
last_log_message = self.log.call_args_list[0][0][0]
self.assertEqual(last_log_message, "Bad entry point 'd:fake'")
def test_bad_entry_point(self):
cfg = self.invalid_loader.load_configuration()
with self.assertRaises(AttributeError):
self.mgr.launch_configuration(cfg)
last_log_message = self.log.call_args_list[0][0][0]
self.assertEqual(last_log_message, "Bad entry point 'a:fake_method'")
def test_entry_point(self):
configuration = self.loader.load_configuration()
# Add entry-point to configuration
configuration['entry-point'] = 'd:main'
self.mgr.launch_configuration(configuration)
a = self.mgr.components["a"]
b = self.mgr.components["b"]
c = self.mgr.components["c"]
d = self.mgr.components["d"]
self.assertFalse(a.is_main)
self.assertFalse(b.is_main)
self.assertFalse(c.is_main)
self.assertTrue(d.is_main)
def test_invalid_class(self):
configuration = self.loader.load_configuration()
# Change class to invalid component
invalid_comp = 'epoxy.test.test_core:InvalidComponent'
configuration['components']['a']['class'] = invalid_comp
with self.assertRaises(AttributeError):
self.mgr.launch_subgraph(configuration, 'd:main')
last_logged_msg = self.log.call_args_list[0][0][0]
self.assertEqual(last_logged_msg,
"Class path '%s' is invalid, check your epoxy config" % invalid_comp)
def test_missing_component(self):
configuration = self.loader.load_configuration()
# Delete required component
del configuration['components']['a']
with self.assertRaises(ValueError):
self.mgr.launch_subgraph(configuration, 'd:main')
def test_cycle_detection(self):
configuration = self.loader.load_configuration()
# Change class to invalid component
configuration['components']['a']['dependencies'] = {'previous': 'd'}
with self.assertRaises(ValueError):
self.mgr.launch_configuration(configuration)
def test_subgraph_cycle_detection(self):
configuration = self.loader.load_configuration()
# Change class to invalid component
configuration['components']['a']['dependencies'] = {'previous': 'd'}
with self.assertRaises(ValueError):
self.mgr.launch_subgraph(configuration, 'd:main')
def test_settings(self):
config = self.loader.load_configuration()
self.mgr.launch_configuration(config)
a = self.mgr.components["a"]
b = self.mgr.components["b"]
c = self.mgr.components["c"]
d = self.mgr.components["d"]
self.assertEqual(a.name, 'alfred')
self.assertEqual(b.name, 'barry')
self.assertEqual(c.name, 'charles')
self.assertEqual(d.name, 'daniel')
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# __author__ = 'helton'
import sip
sip.setapi('QVariant', 2)
from math import cos, pi, sin
from PyQt4 import QtCore, QtGui
class RenderArea(QtGui.QWidget):
def __init__(self, path, parent=None):
super(RenderArea, self).__init__(parent)
self.path = path
self.penWidth = 1
self.rotationAngle = 0
self.setBackgroundRole(QtGui.QPalette.Base)
def minimumSizeHint(self):
return QtCore.QSize(50, 50)
def sizeHint(self):
return QtCore.QSize(100, 100)
def setFillRule(self, rule):
self.path.setFillRule(rule)
self.update()
def setFillGradient(self, color1, color2):
self.fillColor1 = color1
self.fillColor2 = color2
self.update()
def setPenWidth(self, width):
self.penWidth = width
self.update()
def setPenColor(self, color):
self.penColor = color
self.update()
def setRotationAngle(self, degrees):
self.rotationAngle = degrees
self.update()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.scale(self.width() / 100.0, self.height() / 100.0)
painter.translate(50.0, 50.0)
painter.rotate(-self.rotationAngle)
painter.translate(-50.0, -50.0)
painter.setPen(QtGui.QPen(self.penColor, self.penWidth,
QtCore.Qt.SolidLine, QtCore.Qt.RoundCap, QtCore.Qt.RoundJoin))
gradient = QtGui.QLinearGradient(0, 0, 0, 100)
gradient.setColorAt(0.0, self.fillColor1)
gradient.setColorAt(1.0, self.fillColor2)
painter.setBrush(QtGui.QBrush(gradient))
painter.drawPath(self.path)
class Window(QtGui.QWidget):
NumRenderAreas = 9
def __init__(self):
super(Window, self).__init__()
rectPath = QtGui.QPainterPath()
rectPath.moveTo(20.0, 30.0)
rectPath.lineTo(80.0, 30.0)
rectPath.lineTo(80.0, 70.0)
rectPath.lineTo(20.0, 70.0)
rectPath.closeSubpath()
roundRectPath = QtGui.QPainterPath()
roundRectPath.moveTo(80.0, 35.0)
roundRectPath.arcTo(70.0, 30.0, 10.0, 10.0, 0.0, 90.0)
roundRectPath.lineTo(25.0, 30.0)
roundRectPath.arcTo(20.0, 30.0, 10.0, 10.0, 90.0, 90.0)
roundRectPath.lineTo(20.0, 65.0)
roundRectPath.arcTo(20.0, 60.0, 10.0, 10.0, 180.0, 90.0)
roundRectPath.lineTo(75.0, 70.0)
roundRectPath.arcTo(70.0, 60.0, 10.0, 10.0, 270.0, 90.0)
roundRectPath.closeSubpath()
ellipsePath = QtGui.QPainterPath()
ellipsePath.moveTo(80.0, 50.0)
ellipsePath.arcTo(20.0, 30.0, 60.0, 40.0, 0.0, 360.0)
piePath = QtGui.QPainterPath()
piePath.moveTo(50.0, 50.0)
piePath.lineTo(65.0, 32.6795)
piePath.arcTo(20.0, 30.0, 60.0, 40.0, 60.0, 240.0)
piePath.closeSubpath()
polygonPath = QtGui.QPainterPath()
polygonPath.moveTo(10.0, 80.0)
polygonPath.lineTo(20.0, 10.0)
polygonPath.lineTo(80.0, 30.0)
polygonPath.lineTo(90.0, 70.0)
polygonPath.closeSubpath()
groupPath = QtGui.QPainterPath()
groupPath.moveTo(60.0, 40.0)
groupPath.arcTo(20.0, 20.0, 40.0, 40.0, 0.0, 360.0)
groupPath.moveTo(40.0, 40.0)
groupPath.lineTo(40.0, 80.0)
groupPath.lineTo(80.0, 80.0)
groupPath.lineTo(80.0, 40.0)
groupPath.closeSubpath()
textPath = QtGui.QPainterPath()
timesFont = QtGui.QFont("Times", 50)
timesFont.setStyleStrategy(QtGui.QFont.ForceOutline)
textPath.addText(10, 70, timesFont, "Qt")
bezierPath = QtGui.QPainterPath()
bezierPath.moveTo(20, 30)
bezierPath.cubicTo(80, 0, 50, 50, 80, 80)
starPath = QtGui.QPainterPath()
starPath.moveTo(90, 50)
for i in range(1, 5):
starPath.lineTo(50 + 40 * cos(0.8 * i * pi),
50 + 40 * sin(0.8 * i * pi))
starPath.closeSubpath()
self.renderAreas = [RenderArea(rectPath), RenderArea(roundRectPath),
RenderArea(ellipsePath), RenderArea(piePath),
RenderArea(polygonPath), RenderArea(groupPath),
RenderArea(textPath), RenderArea(bezierPath),
RenderArea(starPath)]
assert len(self.renderAreas) == 9
self.fillRuleComboBox = QtGui.QComboBox()
self.fillRuleComboBox.addItem("Odd Even", QtCore.Qt.OddEvenFill)
self.fillRuleComboBox.addItem("Winding", QtCore.Qt.WindingFill)
fillRuleLabel = QtGui.QLabel("Fill &Rule:")
fillRuleLabel.setBuddy(self.fillRuleComboBox)
self.fillColor1ComboBox = QtGui.QComboBox()
self.populateWithColors(self.fillColor1ComboBox)
self.fillColor1ComboBox.setCurrentIndex(
self.fillColor1ComboBox.findText("mediumslateblue"))
self.fillColor2ComboBox = QtGui.QComboBox()
self.populateWithColors(self.fillColor2ComboBox)
self.fillColor2ComboBox.setCurrentIndex(
self.fillColor2ComboBox.findText("cornsilk"))
fillGradientLabel = QtGui.QLabel("&Fill Gradient:")
fillGradientLabel.setBuddy(self.fillColor1ComboBox)
fillToLabel = QtGui.QLabel("to")
fillToLabel.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
self.penWidthSpinBox = QtGui.QSpinBox()
self.penWidthSpinBox.setRange(0, 20)
penWidthLabel = QtGui.QLabel("&Pen Width:")
penWidthLabel.setBuddy(self.penWidthSpinBox)
self.penColorComboBox = QtGui.QComboBox()
self.populateWithColors(self.penColorComboBox)
self.penColorComboBox.setCurrentIndex(
self.penColorComboBox.findText('darkslateblue'))
penColorLabel = QtGui.QLabel("Pen &Color:")
penColorLabel.setBuddy(self.penColorComboBox)
self.rotationAngleSpinBox = QtGui.QSpinBox()
self.rotationAngleSpinBox.setRange(0, 359)
self.rotationAngleSpinBox.setWrapping(True)
self.rotationAngleSpinBox.setSuffix('\xB0')
rotationAngleLabel = QtGui.QLabel("&Rotation Angle:")
rotationAngleLabel.setBuddy(self.rotationAngleSpinBox)
self.fillRuleComboBox.activated.connect(self.fillRuleChanged)
self.fillColor1ComboBox.activated.connect(self.fillGradientChanged)
self.fillColor2ComboBox.activated.connect(self.fillGradientChanged)
self.penColorComboBox.activated.connect(self.penColorChanged)
for i in range(Window.NumRenderAreas):
self.penWidthSpinBox.valueChanged.connect(self.renderAreas[i].setPenWidth)
self.rotationAngleSpinBox.valueChanged.connect(self.renderAreas[i].setRotationAngle)
topLayout = QtGui.QGridLayout()
for i in range(Window.NumRenderAreas):
topLayout.addWidget(self.renderAreas[i], i / 3, i % 3)
mainLayout = QtGui.QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 4)
mainLayout.addWidget(fillRuleLabel, 1, 0)
mainLayout.addWidget(self.fillRuleComboBox, 1, 1, 1, 3)
mainLayout.addWidget(fillGradientLabel, 2, 0)
mainLayout.addWidget(self.fillColor1ComboBox, 2, 1)
mainLayout.addWidget(fillToLabel, 2, 2)
mainLayout.addWidget(self.fillColor2ComboBox, 2, 3)
mainLayout.addWidget(penWidthLabel, 3, 0)
mainLayout.addWidget(self.penWidthSpinBox, 3, 1, 1, 3)
mainLayout.addWidget(penColorLabel, 4, 0)
mainLayout.addWidget(self.penColorComboBox, 4, 1, 1, 3)
mainLayout.addWidget(rotationAngleLabel, 5, 0)
mainLayout.addWidget(self.rotationAngleSpinBox, 5, 1, 1, 3)
self.setLayout(mainLayout)
self.fillRuleChanged()
self.fillGradientChanged()
self.penColorChanged()
self.penWidthSpinBox.setValue(2)
self.setWindowTitle("Painter Paths")
def fillRuleChanged(self):
rule = QtCore.Qt.FillRule(self.currentItemData(self.fillRuleComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillRule(rule)
def fillGradientChanged(self):
color1 = QtGui.QColor(self.currentItemData(self.fillColor1ComboBox))
color2 = QtGui.QColor(self.currentItemData(self.fillColor2ComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillGradient(color1, color2)
def penColorChanged(self):
color = QtGui.QColor(self.currentItemData(self.penColorComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setPenColor(color)
def populateWithColors(self, comboBox):
colorNames = QtGui.QColor.colorNames()
for name in colorNames:
comboBox.addItem(name, name)
def currentItemData(self, comboBox):
return comboBox.itemData(comboBox.currentIndex())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
"""Storage selection (SAS) functions: example with multiple fluxes out at steady state
Runs the rSAS model for a synthetic dataset with one flux in and
multiple fluxes out and steady state flow
Theory is presented in:
Harman, C. J. (2014), Time-variable transit time distributions and transport:
Theory and application to storage-dependent transport of chloride in a watershed,
Water Resour. Res., 51, doi:10.1002/2014WR015707.
"""
from __future__ import division
import rsas
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Initializes the random number generator so we always get the same result
np.random.seed(0)
# =====================================
# Load the input data
# =====================================
data = pd.read_csv('Q1.csv', index_col=0, parse_dates=[1])
# length of the dataset
N = len(data)
# The individual timeseries can be pulled out of the dataframe
S = data['S'].values
J = data['J'].values
Q = data['Q1'].values
C_J = data['C_J'].values-2
C_Q1 = data['C_Q1'].values
ST_min = data['ST_min'].values
ST_max = data['ST_max'].values
# =========================
# Parameters needed by rsas
# =========================
# The concentration of water older than the start of observations
C_old = ((J*C_J)[J>0]).sum()/((J)[J>0]).sum()
# =========================
# Create the rsas functions
# =========================
S_dead = 10.
#lam = 0.
# Uniform
# Parameters for the rSAS function
Q_rSAS_fun_type = 'uniform'
ST_min = np.zeros(N)
ST_max = S + S_dead
Q_rSAS_fun_parameters = np.c_[ST_min, ST_max]
rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
rSAS_fun = [rSAS_fun_Q1]
# Kumaraswami
## Parameters for the rSAS function
#Q_rSAS_fun_type = 'kumaraswami'
#ST_min = np.ones(N) * 0.
#ST_max = S + S_dead
#a = np.maximum(0.01, 2. + lam * (S - S.mean())/S.std())
#b = np.ones(N) * 5.
#Q_rSAS_fun_parameters = np.c_[a, b, ST_min, ST_max]
#rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters)
#rSAS_fun = [rSAS_fun_Q1]
# =================
# Initial condition
# =================
# Unknown initial age distribution, so just set this to zeros
ST_init = np.zeros(N + 1)
# =============
# Run the model
# =============
# Run it
outputs = rsas.solve(J, Q, rSAS_fun, ST_init=ST_init,
mode='RK4', dt = 1., n_substeps=3, C_J=C_J, C_old=[C_old], verbose=False, debug=False)
# Let's pull these out to make the outputs from rsas crystal clear
# State variables: age-ranked storage of water and solutes
# ROWS of ST, MS are T - ages
# COLUMNS of ST, MS are t - times
# LAYERS of MS are s - solutes
ST = outputs['ST']
MS = outputs['MS'][:,:,0]
# Timestep-averaged backwards TTD
# ROWS of PQ are T - ages
# COLUMNS of PQ are t - times
# LAYERS of PQ are q - fluxes
PQ1m = outputs['PQ'][:,:,0]
# Timestep-averaged outflow concentration
# ROWS of C_Q are t - times
# COLUMNS of PQ are q - fluxes
C_Q1m1 = outputs['C_Q'][:,0,0]
# Timestep averaged solute load out
# ROWS of MQ are T - ages
# COLUMNS of MQ are t - times
# LAYERS of MQ are q - fluxes
# Last dimension of MS are s - solutes
MQ1m = outputs['MQ'][:,:,0,0]
#%%
# ==================================
# Plot the rSAS function
# ==================================
STx = np.linspace(0,S.max()+S_dead,100)
Omega = np.r_[[rSAS_fun_Q1.cdf_i(STx,i) for i in range(N)]].T
import matplotlib.cm as cm
fig = plt.figure(0)
plt.clf()
for i in range(N):
plt.plot(STx, Omega[:,i], lw=1, color=cm.jet((S[i]-S.min())/S.ptp()))
plt.ylim((0,1))
plt.ylabel('$\Omega_Q(T)$')
plt.xlabel('age-ranked storage $S_T$')
plt.title('Cumulative rSAS function')
#%%
# ==================================
# Plot the transit time distribution
# ==================================
fig = plt.figure(1)
plt.clf()
plt.plot(PQ1m, lw=1)
plt.ylim((0,1))
plt.ylabel('$P_Q(T)$')
plt.xlabel('age $T$')
plt.title('Cumulative transit time distribution')
#%%
# =====================================================================
# Outflow concentration estimated using several different TTD
# =====================================================================
# Lets get the instantaneous value of the TTD at the end of each timestep
PQ1i = np.zeros((N+1, N+1))
PQ1i[:,0] = rSAS_fun_Q1.cdf_i(ST[:,0],0)
PQ1i[:,1:] = np.r_[[rSAS_fun_Q1.cdf_i(ST[:,i+1],i) for i in range(N)]].T
# Use the transit time distribution and input timeseries to estimate
# the output timeseries for the instantaneous and timestep-averaged cases
C_Q1i, C_Q1i_raw, Q1i_observed_fraction = rsas.transport(PQ1i, C_J, C_old)
C_Q1m2, C_Q1m2_raw, Q1m2_observed_fraction = rsas.transport(PQ1m, C_J, C_old)
# Plot the results
fig = plt.figure(2)
plt.clf()
plt.step(data['datetime'], C_Q1m1, 'g', ls='--', label='mean rsas internal', lw=2, where='post')
plt.step(data['datetime'], C_Q1m2, 'b', ls=':', label='mean rsas.transport', lw=2, where='post')
plt.step(data['datetime'], C_Q1m2_raw, '0.5', ls=':', label='mean rsas.transport (obs part)', lw=2, where='post')
plt.plot(data['datetime'], C_Q1i, 'b:o', label='inst. rsas.transport', lw=1)
#plt.plot(data['datetime'], data['C_Q1'], 'r.', label='observed', lw=2)
plt.ylim((-2, 0))
plt.legend(loc=0)
plt.ylabel('Concentration [-]')
plt.xlabel('time')
plt.title('Outflow concentration')
plt.show()
|
import autobahn.twisted.websocket as ws
import pytest
import urlparse
from twisted.internet import reactor
from twisted.internet.ssl import ClientContextFactory
from twisted.web import client
from .protocols import ProtocolFactory
class _UnsecureClientContextFactory(ClientContextFactory):
"""An SSL context factory that performs no cert checks."""
def getContext(self, hostname, port):
return ClientContextFactory.getContext(self)
_context_factory = _UnsecureClientContextFactory()
class _HTTP10Agent:
"""
A hacky attempt at an HTTP/1.0 version of t.w.c.Agent. Unfortunately
t.w.c.Agent only supports HTTP/1.1, so we have to create this ourselves. It
uses the old HTTPClientFactory implementation in Twisted.
Note that this only sort of implements Agent (it doesn't callback until the
response is received, and it doesn't even return the full response from
request()) and is really only useful for the purposes of these tests.
"""
def __init__(self, reactor):
self._reactor = reactor
class _FakeResponse:
def __init__(self, code):
self.code = code
def request(self, method, uri, headers=None, bodyProducer=None):
url = urlparse.urlparse(uri, scheme='http')
host = url.hostname
port = url.port
if port is None:
port = 443 if (url.scheme == 'https') else 80
# Translate from Agent's Headers object back into a dict.
if headers is not None:
old_headers = {}
for name, value_list in headers.getAllRawHeaders():
old_headers[name] = value_list[0]
headers = old_headers
f = client.HTTPClientFactory(uri, method=method, headers=headers,
timeout=2)
def gotResponse(page):
return _HTTP10Agent._FakeResponse(int(f.status))
f.deferred.addBoth(gotResponse)
if url.scheme == 'https':
self._reactor.connectSSL(host, port, f, ClientContextFactory())
else:
self._reactor.connectTCP(host, port, f)
return f.deferred
#
# Fixture Helper Functions
#
def fixture_connect(uri, protocol):
"""
Connects to the given WebSocket URI using an instance of the provided
WebSocketClientProtocol subclass.
This is intended to be called by pytest fixtures; it will block until a
connection is made and return the protocol instance that wraps the
connection.
"""
factory = ProtocolFactory(uri, protocol)
factory.setProtocolOptions(failByDrop=False,
openHandshakeTimeout=1,
closeHandshakeTimeout=1)
ws.connectWS(factory, timeout=1)
protocol = pytest.blockon(factory.connected)
pytest.blockon(protocol.opened)
return protocol
#
# Fixtures
#
@pytest.fixture
def agent():
"""Returns a t.w.c.Agent for use by tests."""
return client.Agent(reactor, _context_factory)
@pytest.fixture
def agent_10():
"""Returns an HTTP/1.0 "Agent"."""
return _HTTP10Agent(reactor)
|
import json
import re
from flask import Flask, flash, make_response
from flask.globals import request
from flask.templating import render_template
import requests
from twilio.rest import TwilioRestClient
PHONE_NUMBER_PATTERN = re.compile("^\\+?\\d{10,14}$")
PULL_REQUEST_OPENED = 'prOpened'
PULL_REQUEST_CLOSED = 'prClosed'
PULL_REQUEST_SYNCHRONIZE = 'prSynchronize'
PULL_REQUEST_REOPENED = 'prReopened'
REPOSITORIES = 'repositories'
REPOSITORY_PATTERN = re.compile("[A-Za-z0-9_\\.-]+/[A-Za-z0-9_\\.-]+")
SETTINGS_JSON_FILE_NAME = 'settings.json'
SETTINGS_TEMPLATE = 'settings.html'
TO_NUMBERS = 'toNumbers'
TWILIO_ACCOUNT_SID = 'twilioAccountSid'
TWILIO_AUTH_TOKEN = 'twilioAuthToken'
TWILIO_FROM_NUMBER = 'twilioFromNumber'
app = Flask(__name__)
short_urls = {}
@app.route('/')
def root():
return 'Thank you for using github-sms-notifier!'
@app.route('/admin', methods=['GET'])
def config():
settings = __read_settings()
return render_template(SETTINGS_TEMPLATE, settings=settings)
@app.route('/admin', methods=['POST'])
def save_config():
app.logger.debug(request.form)
pull_request_closed_enabled = False
if PULL_REQUEST_CLOSED in request.form:
pull_request_closed_enabled = True
pull_request_opened_enabled = False
if PULL_REQUEST_OPENED in request.form:
pull_request_opened_enabled = True
pull_request_reopened_enabled = False
if PULL_REQUEST_REOPENED in request.form:
pull_request_reopened_enabled = True
pull_request_synchronize_enabled = False
if PULL_REQUEST_SYNCHRONIZE in request.form:
pull_request_synchronize_enabled = True
settings = {TWILIO_ACCOUNT_SID: request.form[TWILIO_ACCOUNT_SID].strip(),
TWILIO_AUTH_TOKEN: request.form[TWILIO_AUTH_TOKEN].strip(),
TWILIO_FROM_NUMBER: request.form[TWILIO_FROM_NUMBER].strip(),
TO_NUMBERS: request.form[TO_NUMBERS].strip().split(), PULL_REQUEST_CLOSED: pull_request_closed_enabled,
PULL_REQUEST_OPENED: pull_request_opened_enabled, PULL_REQUEST_REOPENED: pull_request_reopened_enabled,
PULL_REQUEST_SYNCHRONIZE: pull_request_synchronize_enabled,
REPOSITORIES: request.form[REPOSITORIES].strip().split()}
errors = __validate_settings(settings)
if errors:
for error in errors:
flash(error, category='error')
else:
with open(SETTINGS_JSON_FILE_NAME, 'w+') as settings_file:
json.dump(settings, settings_file)
flash("Settings saved!")
return render_template(SETTINGS_TEMPLATE, settings=settings)
@app.route('/pullRequests', methods=['POST'])
def pull_requests():
settings = __read_settings()
if settings:
content = json.loads(request.data)
if 'pull_request' in content:
client = TwilioRestClient(settings[TWILIO_ACCOUNT_SID], settings[TWILIO_AUTH_TOKEN])
message = __build_sms_body(content)
app.logger.debug(request.data)
if message and not app.testing:
numbers = settings[TO_NUMBERS]
for number in numbers:
client.sms.messages.create(body=message, from_=settings[TWILIO_FROM_NUMBER], to=number)
else:
app.logger.warn("Not a pull request: {}".format(request.data))
else:
app.logger.warn("Cannot load settings.")
return make_response("", 204)
def __build_sms_body(request_body):
settings = __read_settings()
message_prefix = 'Pull request #' + str(request_body['number'])
message_suffix = request_body['repository']['full_name'] + ' ' + __get_short_url(
request_body['pull_request']['html_url'])
if request_body['action'] == 'opened':
if settings[PULL_REQUEST_OPENED] and __is_supported_repository(settings.get(REPOSITORIES),
request_body['repository']['full_name']):
return message_prefix + ' was opened in ' + message_suffix
elif request_body['action'] == 'closed':
if settings[PULL_REQUEST_CLOSED] and __is_supported_repository(settings.get(REPOSITORIES),
request_body['repository']['full_name']):
return message_prefix + ' was closed in ' + message_suffix
elif request_body['action'] == 'synchronize':
if settings[PULL_REQUEST_SYNCHRONIZE] and __is_supported_repository(settings.get(REPOSITORIES),
request_body['repository']['full_name']):
return message_prefix + ' was synchronized in ' + message_suffix
elif request_body['action'] == 'reopened':
if settings[PULL_REQUEST_REOPENED] and __is_supported_repository(settings.get(REPOSITORIES),
request_body['repository']['full_name']):
return message_prefix + ' was reopened in ' + message_suffix
else:
return 'Unsupported action \'' + request_body['action'] + '\' occurred on pull request #' + str(
request_body['number']) + ' in ' + message_suffix
def __get_short_url(url):
if short_urls.get(url):
return short_urls[url]
payload = {'url': url}
r = requests.post('http://git.io', data=payload)
short_urls[url] = r.headers.get('Location')
return short_urls[url]
def __is_supported_repository(repositories_settings, notification_repository):
if not repositories_settings:
return True
for repository in repositories_settings:
if notification_repository == repository:
return True
return False
def __is_valid_phone_number(phone_number):
if PHONE_NUMBER_PATTERN.match(phone_number):
return True
else:
return False
def __is_valid_repository_name(repository_name):
if REPOSITORY_PATTERN.match(repository_name):
return True
else:
return False
def __read_settings():
settings = {}
with open(SETTINGS_JSON_FILE_NAME, 'r+') as settings_file:
try:
settings = json.load(settings_file)
except ValueError:
app.logger.warning("Cannot load configuration.")
return settings
def __validate_settings(settings):
errors = []
if not settings.get(TWILIO_ACCOUNT_SID):
errors.append('Twilio Account Sid is required')
if not settings.get(TWILIO_AUTH_TOKEN):
errors.append('Twilio Auth Token is required')
if not settings.get(TWILIO_FROM_NUMBER):
errors.append('Twilio From Number is required')
else:
if not __is_valid_phone_number(settings.get(TWILIO_FROM_NUMBER)):
errors.append("Invalid Twilio From Number: " + settings.get(TWILIO_FROM_NUMBER))
if not settings.get(TO_NUMBERS):
errors.append('Numbers to send SMS to is required')
else:
for to_number in settings.get(TO_NUMBERS):
if not __is_valid_phone_number(to_number):
errors.append("Invalid phone number: " + to_number)
if settings.get(REPOSITORIES):
for repository in settings.get(REPOSITORIES):
if not __is_valid_repository_name(repository):
errors.append("Invalid repository name format: " + repository)
return errors
if __name__ == '__main__':
app.secret_key = 'Uqtbl6HxgNWcJsuycuXtHQyR8ExiaNHm'
app.debug = True
app.run()
|
"""
flickr.py
Copyright 2004-2006 James Clarke <james@jamesclarke.info>
Portions Copyright 2007-2008 Joshua Henderson <joshhendo@gmail.com>
THIS SOFTWARE IS SUPPLIED WITHOUT WARRANTY OF ANY KIND, AND MAY BE
COPIED, MODIFIED OR DISTRIBUTED IN ANY WAY, AS LONG AS THIS NOTICE
AND ACKNOWLEDGEMENT OF AUTHORSHIP REMAIN.
2013-01-12
Removed dependency on the token.txt token file.
2007-12-17
For an upto date TODO list, please see:
http://code.google.com/p/flickrpy/wiki/TodoList
For information on how to use the Authentication
module, plese see:
http://code.google.com/p/flickrpy/wiki/UserAuthentication
2006-12-19
Applied patches from Berco Beute and Wolfram Kriesing.
"""
__author__ = "James Clarke <james@jamesclarke.info>"
__version__ = "$Rev$"
__date__ = "$Date$"
__copyright__ = "Copyright: 2004-2010 James Clarke; Portions: 2007-2008 Joshua Henderson; Portions: 2011 Andrei Vlad Vacariu"
from urllib import urlencode, urlopen
from xml.dom import minidom
import hashlib
import os
HOST = 'https://flickr.com'
API = '/services/rest'
# set these here or using flickr.API_KEY in your application
API_TOKEN = None
API_KEY = ''
API_SECRET = ''
email = None
password = None
AUTH = False
debug = False
# The next 2 variables are only important if authentication is used
# this can be set here or using flickr.tokenPath in your application
# this is the path to the folder containing tokenFile (default: token.txt)
tokenPath = ''
# this can be set here or using flickr.tokenFile in your application
# this is the name of the file containing the stored token.
tokenFile = 'token.txt'
class FlickrError(Exception): pass
class Photo(object):
"""Represents a Flickr Photo."""
__readonly = ['id', 'secret', 'server', 'farm', 'isfavorite', 'license', 'rotation',
'owner', 'dateposted', 'datetaken', 'takengranularity',
'title', 'description', 'ispublic', 'isfriend', 'isfamily',
'cancomment', 'canaddmeta', 'comments', 'tags', 'permcomment',
'permaddmeta', 'url']
#XXX: Hopefully None won't cause problems
def __init__(self, id, owner=None, dateuploaded=None, \
title=None, description=None, ispublic=None, \
isfriend=None, isfamily=None, cancomment=None, \
canaddmeta=None, comments=None, tags=None, secret=None, \
isfavorite=None, server=None, farm=None, license=None, \
rotation=None, url=None):
"""Must specify id, rest is optional."""
self.__loaded = False
self.__cancomment = cancomment
self.__canaddmeta = canaddmeta
self.__comments = comments
self.__dateuploaded = dateuploaded
self.__description = description
self.__id = id
self.__license = license
self.__isfamily = isfamily
self.__isfavorite = isfavorite
self.__isfriend = isfriend
self.__ispublic = ispublic
self.__owner = owner
self.__rotation = rotation
self.__secret = secret
self.__server = server
self.__farm = farm
self.__tags = tags
self.__title = title
self.__dateposted = None
self.__datetaken = None
self.__takengranularity = None
self.__permcomment = None
self.__permaddmeta = None
self.__url = None
def __setattr__(self, key, value):
if key in self.__class__.__readonly:
raise AttributeError("The attribute %s is read-only." % key)
else:
super(Photo, self).__setattr__(key, value)
def __getattr__(self, key):
if not self.__loaded:
self._load_properties()
if key in self.__class__.__readonly:
return super(Photo, self).__getattribute__("_%s__%s" % (self.__class__.__name__, key))
else:
return super(Photo, self).__getattribute__(key)
def _load_properties(self):
"""Loads the properties from Flickr."""
self.__loaded = True
method = 'flickr.photos.getInfo'
data = _doget(method, photo_id=self.id)
photo = data.rsp.photo
self.__secret = photo.secret
self.__server = photo.server
self.__farm = photo.farm
self.__isfavorite = photo.isfavorite
self.__license = photo.license
self.__rotation = photo.rotation
owner = photo.owner
self.__owner = User(owner.nsid, username=owner.username,\
realname=owner.realname,\
location=owner.location)
self.__title = photo.title.text
self.__description = photo.description.text
self.__ispublic = photo.visibility.ispublic
self.__isfriend = photo.visibility.isfriend
self.__isfamily = photo.visibility.isfamily
self.__dateposted = photo.dates.posted
self.__datetaken = photo.dates.taken
self.__takengranularity = photo.dates.takengranularity
self.__cancomment = photo.editability.cancomment
self.__canaddmeta = photo.editability.canaddmeta
self.__comments = photo.comments.text
self.__url = photo.urls.url.text
try:
self.__permcomment = photo.permissions.permcomment
self.__permaddmeta = photo.permissions.permaddmeta
except AttributeError:
self.__permcomment = None
self.__permaddmeta = None
#TODO: Implement Notes?
if hasattr(photo.tags, "tag"):
if isinstance(photo.tags.tag, list):
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text) \
for tag in photo.tags.tag]
else:
tag = photo.tags.tag
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text)]
def __str__(self):
return '<Flickr Photo %s>' % self.id
def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
self._load_properties()
def addTags(self, tags):
"""Adds the list of tags to current tags. (flickr.photos.addtags)
"""
method = 'flickr.photos.addTags'
if isinstance(tags, list):
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
#load properties again
self._load_properties()
def removeTag(self, tag):
"""Remove the tag from the photo must be a Tag object.
(flickr.photos.removeTag)
"""
method = 'flickr.photos.removeTag'
tag_id = ''
try:
tag_id = tag.id
except AttributeError:
raise FlickrError, "Tag object expected"
_dopost(method, auth=True, photo_id=self.id, tag_id=tag_id)
self._load_properties()
def setMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photos.setMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photo_id=self.id)
self.__title = title
self.__description = description
def getAllContexts(self):
"""Retrieves lists of the pools/sets the photo is in"""
method = 'flickr.photos.getAllContexts'
data = _doget(method, photo_id=self.id)
d = {'pools': [], 'sets': []}
if hasattr(data.rsp, "pool"):
if isinstance(data.rsp.pool, list):
for pool in data.rsp.pool:
d["pools"].append({"id": pool.id, "title": pool.title})
else:
d["pools"].append({"id": data.rsp.pool.id, "title": data.rsp.pool.title})
if hasattr(data.rsp, "set"):
if isinstance(data.rsp.set, list):
for theset in data.rsp.set:
d["sets"].append({"id": theset.id, "title": theset.title})
else:
d["sets"].append({"id": data.rsp.set.id, "title": data.rsp.set.title})
return d
def getPoolCount(self):
"""Retrieves a count of the pools the photo is in"""
d = self.getAllContexts()
return len( d["pools"] )
def getSetCount(self):
"""Retrieves a count of the pools the photo is in"""
d = self.getAllContexts()
return len( d["sets"] )
def getURL(self, size='Medium', urlType='url'):
"""Retrieves a url for the photo. (flickr.photos.getSizes)
urlType - 'url' or 'source'
'url' - flickr page of photo
'source' - image file
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
for psize in data.rsp.sizes.size:
if psize.label == size:
return getattr(psize, urlType)
raise FlickrError, "No URL found"
def getSizes(self):
"""
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
ret = []
# The given props are those that we return and the according types, since
# return width and height as string would make "75">"100" be True, which
# is just error prone.
props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str}
for psize in data.rsp.sizes.size:
d = {}
for prop,convert_to_type in props.items():
d[prop] = convert_to_type(getattr(psize, prop))
ret.append(d)
return ret
#def getExif(self):
#method = 'flickr.photos.getExif'
#data = _doget(method, photo_id=self.id)
#ret = []
#for exif in data.rsp.photo.exif:
#print exif.label, dir(exif)
##ret.append({exif.label:exif.})
#return ret
##raise FlickrError, "No URL found"
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude]
def getComments(self):
""""
get list of comments for photo
returns a list of comment objects
comment text is in return [item].text
"""
method = "flickr.photos.comments.getList"
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # ???? what errors might there be????
return None
return data.rsp.comments
def _getDirectURL(self, size=None):
size = "_%s" % size if size else ""
return "http://farm%s.static.flickr.com/%s/%s_%s%s.jpg" % \
(self.farm, self.server, self.id, self.secret, size)
def getThumbnail(self):
"""
Return a string representation of the URL to the thumbnail
image (not the thumbnail image page).
"""
return self._getDirectURL('t')
def getSmallSquare(self):
"""
Return a string representation of the URL to the small square
image (not the small square image page).
"""
return self._getDirectURL('s')
def getSmall(self):
"""
Return a string representation of the URL to the small
image (not the small image page).
"""
return self._getDirectURL('m')
def getMedium(self):
"""
Return a string representation of the URL to the medium
image (not the medium image page).
"""
return self._getDirectURL()
def getMedium640(self):
"""
Return a string representation of the URL to the medium
image (not the medium image page).
"""
return self._getDirectURL('z')
def getLarge(self):
"""
Return a string representation of the URL to the large
image (not the large image page).
"""
return self._getDirectURL('b')
def getGalleryList(self, per_page='', page=''):
"""
get list of galleries which
contain the photo.
Galleries are returned sorted by
date which the photo was added
to the gallery
"""
if per_page > 500: # Max is 500
per_page = 500
method = "flickr.galleries.getListForPhoto"
try:
data = _doget(method, photo_id=self.id, per_page=per_page, \
page=page)
except FlickrError:
return None
return data.rsp.galleries.gallery
class Photoset(object):
"""A Flickr photoset."""
def __init__(self, id, title, primary, photos=0, description='', \
secret='', server=''):
self.__id = id
self.__title = title
self.__primary = primary
self.__description = description
self.__count = photos
self.__secret = secret
self.__server = server
id = property(lambda self: self.__id)
title = property(lambda self: self.__title)
description = property(lambda self: self.__description)
primary = property(lambda self: self.__primary)
def __len__(self):
return self.__count
def __str__(self):
return '<Flickr Photoset %s>' % self.id
def getPhotos(self):
"""Returns list of Photos."""
method = 'flickr.photosets.getPhotos'
data = _doget(method, photoset_id=self.id)
photos = data.rsp.photoset.photo
p = []
for photo in photos:
p.append(Photo(photo.id, title=photo.title, secret=photo.secret, \
server=photo.server))
return p
def editPhotos(self, photos, primary=None):
"""Edit the photos in this set.
photos - photos for set
primary - primary photo (if None will used current)
"""
method = 'flickr.photosets.editPhotos'
if primary is None:
primary = self.primary
ids = [photo.id for photo in photos]
if primary.id not in ids:
ids.append(primary.id)
_dopost(method, auth=True, photoset_id=self.id,\
primary_photo_id=primary.id,
photo_ids=ids)
self.__count = len(ids)
return True
def addPhoto(self, photo):
"""Add a photo to this set.
photo - the photo
"""
method = 'flickr.photosets.addPhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count += 1
return True
def removePhoto(self, photo):
"""Remove the photo from this set.
photo - the photo
"""
method = 'flickr.photosets.removePhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count = self.__count - 1
return True
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True
#XXX: Delete isn't handled well as the python object will still exist
def delete(self):
"""Deletes the photoset.
"""
method = 'flickr.photosets.delete'
_dopost(method, auth=True, photoset_id=self.id)
return True
def create(cls, photo, title, description=''):
"""Create a new photoset.
photo - primary photo
"""
if not isinstance(photo, Photo):
raise TypeError, "Photo expected"
method = 'flickr.photosets.create'
data = _dopost(method, auth=True, title=title,\
description=description,\
primary_photo_id=photo.id)
set = Photoset(data.rsp.photoset.id, title, Photo(photo.id),
photos=1, description=description)
return set
create = classmethod(create)
class User(object):
"""A Flickr user."""
def __init__(self, id, username=None, isadmin=None, ispro=None, \
realname=None, location=None, firstdate=None, count=None):
"""id required, rest optional."""
self.__loaded = False #so we don't keep loading data
self.__id = id
self.__username = username
self.__isadmin = isadmin
self.__ispro = ispro
self.__realname = realname
self.__location = location
self.__photos_firstdate = firstdate
self.__photos_count = count
#property fu
id = property(lambda self: self._general_getattr('id'))
username = property(lambda self: self._general_getattr('username'))
isadmin = property(lambda self: self._general_getattr('isadmin'))
ispro = property(lambda self: self._general_getattr('ispro'))
realname = property(lambda self: self._general_getattr('realname'))
location = property(lambda self: self._general_getattr('location'))
photos_firstdate = property(lambda self: \
self._general_getattr('photos_firstdate'))
photos_firstdatetaken = property(lambda self: \
self._general_getattr\
('photos_firstdatetaken'))
photos_count = property(lambda self: \
self._general_getattr('photos_count'))
icon_server= property(lambda self: self._general_getattr('icon_server'))
icon_url= property(lambda self: self._general_getattr('icon_url'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Load User properties from Flickr."""
method = 'flickr.people.getInfo'
data = _doget(method, user_id=self.__id)
self.__loaded = True
person = data.rsp.person
self.__isadmin = person.isadmin
self.__ispro = person.ispro
self.__icon_server = person.iconserver
if int(person.iconserver) > 0:
self.__icon_url = 'http://photos%s.flickr.com/buddyicons/%s.jpg' \
% (person.iconserver, self.__id)
else:
self.__icon_url = 'http://www.flickr.com/images/buddyicon.jpg'
self.__username = person.username.text
self.__realname = getattr((getattr(person, 'realname', u'')), 'text', u'')
self.__location = getattr((getattr(person, 'location', u'')), 'text', u'')
self.__photos_count = getattr((getattr(getattr(person, 'photos', None), 'count', u'')), 'text', u'')
if self.__photos_count:
self.__photos_firstdate = person.photos.firstdate.text
self.__photos_firstdatetaken = person.photos.firstdatetaken.text
else:
self.__photos_firstdate = None
self.__photos_firstdatetaken = None
def __str__(self):
return '<Flickr User %s>' % self.id
def getPhotosets(self):
"""Returns a list of Photosets."""
method = 'flickr.photosets.getList'
data = _doget(method, user_id=self.id)
sets = []
if not getattr(data.rsp.photosets, 'photoset',None):
return sets #N.B. returns an empty set
if isinstance(data.rsp.photosets.photoset, list):
for photoset in data.rsp.photosets.photoset:
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
else:
photoset = data.rsp.photosets.photoset
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
return sets
def getPublicFavorites(self, per_page='', page=''):
return favorites_getPublicList(user_id=self.id, per_page=per_page, \
page=page)
def getFavorites(self, per_page='', page=''):
return favorites_getList(user_id=self.id, per_page=per_page, \
page=page)
def getGalleries(self, per_page='', page=''):
return galleries_getList(user_id=self.id, per_page=per_page, \
page=page)
class Group(object):
"""Flickr Group Pool"""
def __init__(self, id, name=None, members=None, online=None,\
privacy=None, chatid=None, chatcount=None):
self.__loaded = False
self.__id = id
self.__name = name
self.__members = members
self.__online = online
self.__privacy = privacy
self.__chatid = chatid
self.__chatcount = chatcount
self.__url = None
id = property(lambda self: self._general_getattr('id'))
name = property(lambda self: self._general_getattr('name'))
members = property(lambda self: self._general_getattr('members'))
online = property(lambda self: self._general_getattr('online'))
privacy = property(lambda self: self._general_getattr('privacy'))
chatid = property(lambda self: self._general_getattr('chatid'))
chatcount = property(lambda self: self._general_getattr('chatcount'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.groups.getInfo'
data = _doget(method, group_id=self.id)
self.__loaded = True
group = data.rsp.group
self.__name = group.name.text
self.__description = group.description.text
self.__members = group.members.text
self.__privacy = group.privacy.text
def __str__(self):
return '<Flickr Group %s>' % self.id
def getPhotos(self, tags='', per_page='', page=''):
"""Get a list of photo objects for this group"""
method = 'flickr.groups.pools.getPhotos'
data = _doget(method, group_id=self.id, tags=tags,\
per_page=per_page, page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
def add(self, photo):
"""Adds a Photo to the group"""
method = 'flickr.groups.pools.add'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
def remove(self, photo):
"""Remove a Photo from the group"""
method = 'flickr.groups.pools.remove'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
class Tag(object):
def __init__(self, id, author, raw, text):
self.id = id
self.author = author
self.raw = raw
self.text = text
def __str__(self):
return '<Flickr Tag %s (%s)>' % (self.id, self.text)
class Gallery(object):
"""Represents a Flickr Gallery.
Takes gallery_id as argument.
"""
# There are other attributes a Gallery could have,
# but defining them here might create errors.
# Might be useful to define them here, though,
# if the user wants to change them when creating
# an instance.
def __init__(self, id, owner=None, title=None, description=None, \
date_create=None, date_update=None, count_photos=None, \
count_videos=None, primary_photo_id=None, \
primary_photo_server=None, primary_photo_farm=None, \
primary_photo_secret=None):
self.__loaded = False
self.__url = None
self.__id = id
self.__owner = owner
self.__title = title
self.__description = description
self.__date_create = date_create
self.__date_update = date_update
self.__count_photos = count_photos
self.__count_videos = count_videos
self.__primary_photo_id = primary_photo_id
self.__primary_photo_server = primary_photo_server
self.__primary_photo_farm = primary_photo_farm
self.__primary_photo_secret = primary_photo_secret
id = property(lambda self: self._general_getattr('id'))
url = property(lambda self: self._general_getattr('url'))
owner = property(lambda self: self._general_getattr('owner'))
title = property(lambda self: self._general_getattr('title'))
description = property(lambda self: self._general_getattr('description'))
date_create = property(lambda self: self._general_getattr('date_create'))
date_update = property(lambda self: self._general_getattr('date_update'))
count_photos = property(lambda self: self._general_getattr('count_photos'))
count_videos = property(lambda self: self._general_getattr('count_videos'))
primary_photo_id = property(lambda self: self._general_getattr('primary_photo_id'))
primary_photo_server = property(lambda self: self._general_getattr('primary_photo_server'))
primary_photo_farm = property(lambda self: self._general_getattr('primary_photo_farm'))
primary_photo_secret = property(lambda self: self._general_getattr('primary_photo_secret'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.galleries.getInfo'
data = _doget(method, gallery_id=self.id)
self.__loaded = True
gallery = data.rsp.gallery
self.__url = gallery.url
self.__owner = gallery.owner
self.__title = gallery.title.text
self.__description = gallery.description.text
self.__date_create = gallery.date_create
self.__date_update = gallery.date_update
self.__count_photos = gallery.count_photos
self.__count_videos = gallery.count_videos
self.__primary_photo_id = gallery.primary_photo_id
self.__primary_photo_server = gallery.primary_photo_server
self.__primary_photo_farm = gallery.primary_photo_farm
self.__primary_photo_secret = gallery.primary_photo_secret
def __str__(self):
return '<Flickr Gallery %s>' % self.id
def addPhoto(self, photo, comment=''):
"""Add a new Photo to the Gallery."""
method = 'flickr.galleries.addPhoto'
_dopost(method, auth=True, photo_id=photo.id, gallery_id=self.id, \
comment=comment)
return True
def editMeta(self, title='', description=''):
"""Modify the meta-data for a gallery.
In original API, title is required, but here, if not
specified, it will use the current title. (So it's optional)
Calling this function without any parameters will blank out the description.
"""
method = 'flickr.galleries.editMeta'
if title == '':
title = self.title
_dopost(method, auth=True, gallery_id=self.id, title=title, \
description=description)
return True
def editPhoto(self, photo, comment):
"""Change the comment for the given Photo."""
method = 'flickr.galleries.editPhoto'
_dopost(method, auth=True, gallery_id=self.id, photo_id=photo.id, \
comment=comment)
return True
def editPhotos(self, primary_photo, *photos):
"""Modify the photos in a gallery. Use this method to add,
remove and re-order photos."""
method = 'flickr.galleries.editPhotos'
photo_ids = ','.join([photo.id for photo in photos])
_dopost(method, auth=True, gallery_id=self.id, \
primary_photo_id=primary_photo.id, photo_ids=photo_ids)
return True
def getPhotos(self, per_page='', page='', **extras):
"""Return the list of photos for a gallery.
*extras (optional): A comma-delimited list of extra information
to fetch for each returned record. Currently supported fields are:
description, license, date_upload, date_taken, owner_name,
icon_server, original_format, last_update, geo, tags, machine_tags,
o_dims, views, media, path_alias, url_sq, url_t, url_s, url_m, url_o
"""
method = 'flickr.galleries.getPhotos'
extras = ','.join('%s=%s' % (i, v) for i, v in dict(extras).items())
data = _doget(method, gallery_id=self.id, per_page=per_page, \
page=page, extras=extras)
photos = {} # dict with photo instance as key and comment as value.
# if there's no comment, '' will be assigned.
for photo in data.rsp.photos.photo:
if photo.has_comment == '1':
photos[_parse_photo(photo)] = photo.comment.text
elif photo.has_comment == '0':
photos[_parse_photo(photo)] = ''
else: # Shouldn't EVER get here
raise FlickrError
return photos
#Flickr API methods
#see api docs http://www.flickr.com/services/api/
#for details of each param
#XXX: Could be Photo.search(cls)
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort='',\
safe_search='', content_type='' ):
"""Returns a list of Photo objects.
If auth=True then will auth the user. Can see private etc
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort, safe_search=safe_search, \
content_type=content_type, \
tag_mode=tag_mode)
photos = []
if data.rsp.photos.__dict__.has_key('photo'):
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def photos_search_pages(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort=''):
"""Returns the number of pages for the previous function (photos_search())
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort)
return data.rsp.photos.pages
def photos_get_recent(extras='', per_page='', page=''):
"""http://www.flickr.com/services/api/flickr.photos.getRecent.html
"""
method = 'flickr.photos.getRecent'
data = _doget(method, extras=extras, per_page=per_page, page=page)
photos = []
if data.rsp.photos.__dict__.has_key('photo'):
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
#XXX: Could be class method in User
def people_findByEmail(email):
"""Returns User object."""
method = 'flickr.people.findByEmail'
data = _doget(method, find_email=email)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def people_findByUsername(username):
"""Returns User object."""
method = 'flickr.people.findByUsername'
data = _doget(method, username=username)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
#XXX: Should probably be in User as a list User.public
def people_getPublicPhotos(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.people.getPublicPhotos'
data = _doget(method, user_id=user_id, per_page=per_page, page=page)
photos = []
if hasattr(data.rsp.photos, "photo"): # Check if there are photos at all (may be been paging too far).
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
#XXX: These are also called from User
def favorites_getList(user_id='', per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getList'
data = _doget(method, auth=True, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_getPublicList(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getPublicList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_add(photo_id):
"""Add a photo to the user's favorites."""
method = 'flickr.favorites.add'
_dopost(method, auth=True, photo_id=photo_id)
return True
def favorites_remove(photo_id):
"""Remove a photo from the user's favorites."""
method = 'flickr.favorites.remove'
_dopost(method, auth=True, photo_id=photo_id)
return True
def groups_getPublicGroups():
"""Get a list of groups the auth'd user is a member of."""
method = 'flickr.groups.getPublicGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name)]
return groups
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups
def tags_getListUser(user_id=''):
"""Returns a list of tags for the given user (in string format)"""
method = 'flickr.tags.getListUser'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def tags_getListUserPopular(user_id='', count=''):
"""Gets the popular tags for a user in dictionary form tag=>count"""
method = 'flickr.tags.getListUserPopular'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
result = {}
if isinstance(data.rsp.tags.tag, list):
for tag in data.rsp.tags.tag:
result[tag.text] = tag.count
else:
result[data.rsp.tags.tag.text] = data.rsp.tags.tag.count
return result
def tags_getrelated(tag):
"""Gets the related tags for given tag."""
method = 'flickr.tags.getRelated'
data = _doget(method, auth=False, tag=tag)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def contacts_getPublicList(user_id):
"""Gets the contacts (Users) for the user_id"""
method = 'flickr.contacts.getPublicList'
data = _doget(method, auth=False, user_id=user_id)
try:
if isinstance(data.rsp.contacts.contact, list):
return [User(user.nsid, username=user.username) \
for user in data.rsp.contacts.contact]
except AttributeError:
return "No users in the list"
except:
return "Unknown error"
# else:
# user = data.rsp.contacts.contact
# return [User(user.nsid, username=user.username)]
def interestingness():
method = 'flickr.interestingness.getList'
data = _doget(method)
photos = []
if isinstance(data.rsp.photos.photo , list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def galleries_create(title, description, primary_photo_id=None):
"""Create a new gallery."""
method = 'flickr.galleries.create'
if primary_photo_id is None:
_dopost(method, auth=True, title=title, description=description,
primary_photo_id=primary_photo_id)
elif primary_photo_id is not None:
_dopost(method, auth=True, title=title, description=description)
def galleries_getList(user_id='', per_page='', page=''):
"""Returns list of Gallery objects."""
method = 'flickr.galleries.getList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page, \
page=page)
galleries = []
if isinstance(data.rsp.galleries.gallery, list):
for gallery in data.rsp.galleries.gallery:
galleries.append(_parse_gallery(gallery))
else:
galleries = [_parse_gallery(data.rsp.galleries.gallery)]
return galleries
def test_login():
method = 'flickr.test.login'
data = _doget(method, auth=True)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def test_echo():
method = 'flickr.test.echo'
data = _doget(method)
return data.rsp.stat
#useful methods
def _doget(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do get %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s&method=%s&%s%s'% \
(HOST, API, API_KEY, method, urlencode(params),
_get_auth_url_suffix(method, auth, params))
#another useful debug print statement
if debug:
print "_doget", url
return _get_data(minidom.parse(urlopen(url)))
def _dopost(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do post %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s%s'% \
(HOST, API, API_KEY, _get_auth_url_suffix(method, auth, params))
# There's no reason this can't be str(urlencode(params)). I just wanted to
# have it the same as the rest.
payload = '%s' % (urlencode(params))
#another useful debug print statement
if debug:
print "_dopost url", url
print "_dopost payload", payload
return _get_data(minidom.parse(urlopen(url, payload)))
def _prepare_params(params):
"""Convert lists to strings with ',' between items."""
for (key, value) in params.items():
if isinstance(value, list):
params[key] = ','.join([item for item in value])
return params
def _get_data(xml):
"""Given a bunch of XML back from Flickr, we turn it into a data structure
we can deal with (after checking for errors)."""
data = unmarshal(xml)
if not data.rsp.stat == 'ok':
msg = "ERROR [%s]: %s" % (data.rsp.err.code, data.rsp.err.msg)
raise FlickrError, msg
return data
def _get_api_sig(params):
"""Generate API signature."""
token = userToken()
parameters = ['api_key', 'auth_token']
for item in params.items():
parameters.append(item[0])
parameters.sort()
api_string = [API_SECRET]
for item in parameters:
for chocolate in params.items():
if item == chocolate[0]:
api_string.append(item)
api_string.append(str(chocolate[1]))
if item == 'api_key':
api_string.append('api_key')
api_string.append(API_KEY)
if item == 'auth_token':
api_string.append('auth_token')
api_string.append(token)
api_signature = hashlib.md5(''.join(api_string)).hexdigest()
return api_signature
def _get_auth_url_suffix(method, auth, params):
"""Figure out whether we want to authorize, and if so, construct a suitable
URL suffix to pass to the Flickr API."""
authentication = False
# auth may be passed in via the API, AUTH may be set globally (in the same
# manner as API_KEY, etc). We do a few more checks than may seem necessary
# because we allow the 'auth' parameter to actually contain the
# authentication token, not just True/False.
if auth or AUTH:
token = userToken()
authentication = True
elif auth != False:
token = auth
authentication = True
elif AUTH != False:
token = AUTH
authentication = True
# If we're not authenticating, no suffix is required.
if not authentication:
return ''
full_params = params
full_params['method'] = method
return '&auth_token=%s&api_sig=%s' % (token, _get_api_sig(full_params))
def _parse_photo(photo):
"""Create a Photo object from photo data."""
owner = User(photo.owner)
title = photo.title
ispublic = photo.ispublic
isfriend = photo.isfriend
isfamily = photo.isfamily
secret = photo.secret
server = photo.server
p = Photo(photo.id, owner=owner, title=title, ispublic=ispublic,\
isfriend=isfriend, isfamily=isfamily, secret=secret, \
server=server)
return p
def _parse_gallery(gallery):
"""Create a Gallery object from gallery data."""
# This might not work!! NEEDS TESTING
url = gallery.url
owner = User(gallery.owner)
title = gallery.title.text
description = gallery.description.text
date_create = gallery.date_create
date_update = gallery.date_update
count_photos = gallery.count_photos
count_videos = gallery.count_videos
primary_photo_id = gallery.primary_photo_id
primary_photo_server = gallery.primary_photo_server
primary_photo_farm = gallery.primary_photo_farm
primary_photo_secret = gallery.primary_photo_secret
g = Gallery(gallery.id, owner=owner, title=title, description=description, \
date_create=date_create, date_update=date_update, \
count_photos=count_photos, count_videos=count_videos, \
primary_photo_id=primary_photo_id, \
primary_photo_server=primary_photo_server, \
primary_photo_farm=primary_photo_farm, \
primary_photo_secret=primary_photo_secret)
return g
#stolen methods
class Bag: pass
#unmarshal taken and modified from pyamazon.py
#makes the xml easy to work with
def unmarshal(element):
rc = Bag()
if isinstance(element, minidom.Element):
for key in element.attributes.keys():
setattr(rc, key, element.attributes[key].value)
childElements = [e for e in element.childNodes \
if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
if type(getattr(rc, key)) <> type([]):
setattr(rc, key, [getattr(rc, key)])
setattr(rc, key, getattr(rc, key) + [unmarshal(child)])
elif isinstance(child, minidom.Element) and \
(child.tagName == 'Details'):
# make the first Details element a key
setattr(rc,key,[unmarshal(child)])
#dbg: because otherwise 'hasattr' only tests
#dbg: on the second occurence: if there's a
#dbg: single return to a query, it's not a
#dbg: list. This module should always
#dbg: return a list of Details objects.
else:
setattr(rc, key, unmarshal(child))
else:
#jec: we'll have the main part of the element stored in .text
#jec: will break if tag <text> is also present
text = "".join([e.data for e in element.childNodes \
if isinstance(e, minidom.Text)])
setattr(rc, 'text', text)
return rc
#unique items from a list from the cookbook
def uniq(alist): # Fastest without order preserving
set = {}
map(set.__setitem__, alist, [])
return set.keys()
## Only the "getList" module is complete.
## Work in Progress; Nearly Finished
class Blogs():
def getList(self,auth=True):
"""blogs.getList requires READ authentication"""
# please read documentation on how to use this
method = 'flickr.blogs.getList'
if auth==True : data = _doget(method, auth=True)
if not auth==True : data = _doget(method, auth=False)
bID = []
bName = []
bNeedsPword = []
bURL = []
try:
for plog in data.rsp.blogs.blog:
bID.append(plog.id)
bName.append(plog.name)
bNeedsPword.append(plog.needspassword)
bURL.append(plog.url)
except TypeError:
try:
bID.append(data.rsp.blogs.blog.id)
bName.append(data.rsp.blogs.blog.name)
bNeedsPword.append(data.rsp.blogs.blog.needspassword)
bURL.append(data.rsp.blogs.blog.url)
except AttributeError:
return "AttributeError, unexplained!"
except:
return "Unknown error!"
except AttributeError:
return "There are no blogs!"
myReturn = [bID,bName,bNeedsPword,bURL]
return myReturn
def postPhoto(self, blogID, photoID, title, description, bpassword):
"""blogs.postPhoto requires WRITE authentication"""
method = 'flickr.blogs.postPhoto'
return None
class Urls():
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
return [data.rsp.user.nsid,data.rsp.user.url]
class Auth():
def getFrob(self):
"""Returns a frob that is used in authentication"""
method = 'flickr.auth.getFrob'
sig_str = API_SECRET + 'api_key' + API_KEY + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash)
return data.rsp.frob.text
def loginLink(self, permission, frob):
"""Generates a link that the user should be sent to"""
myAuth = Auth()
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'perms' + permission
signature_hash = hashlib.md5(sig_str).hexdigest()
perms = permission
link = "http://flickr.com/services/auth/?api_key=%s&perms=%s&frob=%s&api_sig=%s" % (API_KEY, perms, frob, signature_hash)
return link
def getToken(self, frob):
"""This token is what needs to be used in future API calls"""
method = 'flickr.auth.getToken'
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash,
api_key=API_KEY, frob=frob)
return data.rsp.auth.token.text
def userToken():
global API_TOKEN
# This method allows you flickr.py to retrive the saved token
# as once the token for a program has been got from flickr,
# it cannot be got again, so flickr.py saves it in a file
# called token.txt (default) somewhere.
if API_TOKEN is not None:
return API_TOKEN
if not tokenPath == '':
f = file(os.path.join(tokenPath, tokenFile), 'r')
else:
f = file(tokenFile, 'r')
API_TOKEN = f.read()
f.close()
return API_TOKEN
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
# This addition has been added upon request of
# nsteinmetz. It will be "cleaned up" at another
# time.
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
userurl = [data.rsp.user.nsid,data.rsp.user.url]
return userurl
if __name__ == '__main__':
print test_echo()
|
"""
A quiver plot for flow defined an unstructured grid.
Each arrow follows the local streamlines, and the spacing
between arrows is maintained to avoid overlap.
It's slow.
"""
import matplotlib.pyplot as plt
from matplotlib import collections
import numpy as np
from shapely import geometry
from .. import utils
from ..grid import exact_delaunay
from ..model import stream_tracer
class StreamlineQuiver(object):
max_short_traces=100 # abort loop when this many traces have come up short.
short_traces=0 # count of short traces so far.
streamline_count=1000
min_clearance=6.0 # streamlines are truncated when this close to each other
seed_clearance = 12.0 # streamlines are started when the circumradius >= this
coll_args=None
#cmap='jet'
#clim=[0,1.5]
max_t=60.0
max_dist=60.
size=1.0
lw=0.8
# don't start traces outside this xxyy bounding box.
clip=None
# If True, trace long streamlines, run them out in a deterministic direction,
# and try to keep just the part that abuts an obstactle
pack=False
def __init__(self,g,U,**kw):
self.coll_args={}
utils.set_keywords(self,kw)
self.g=g
self.U=U
self.island_points=[] # log weird island points for debugging
self.Umag=utils.mag(U)
self.boundary=g.boundary_polygon()
self.init_tri()
self.calculate_streamlines()
NOT_STREAM=0
STREAM=1
TRUNC=2
def init_tri(self):
self.tri=tri=exact_delaunay.Triangulation()
tri.add_cell_field('outside',np.zeros(0,np.bool8))
tri.add_node_field('tip',np.zeros(0,np.bool8))
# NOT_STREAM=0: not part of a streamline
# STREAM=1: streamline without truncation
# TRUNC=2: streamline that got truncated.
tri.add_node_field('stream_code',np.zeros(0,np.int32))
tri.cell_defaults['_center']=np.nan
tri.cell_defaults['_area']=np.nan
tri.cell_defaults['outside']=False
tri.node_defaults['tip']=False
bound_cycle=self.g.boundary_cycle()
tri.bulk_init( self.g.nodes['x'][bound_cycle] )
for a,b in utils.circular_pairs(np.arange(len(bound_cycle))):
tri.add_constraint(a,b)
tri.cells['_area']=np.nan
centers=tri.cells_centroid()
for c in tri.valid_cell_iter():
if not self.boundary.intersects(geometry.Point(centers[c])):
tri.cells['outside'][c]=True
else:
tri.cells['outside'][c]=False
return tri
def calculate_streamlines(self,count=None):
if count is None:
count=self.streamline_count
for i in range(self.streamline_count):
self.process_one_streamline()
if self.short_traces>self.max_short_traces:
break
def process_one_streamline(self):
xy=self.pick_starting_point()
if xy is None:
print("Stopping on seed clearance")
# should refactor stopping criteria
self.short_traces=self.max_short_traces + 1
return
if self.pack:
xy=self.pack_starting_point(xy)
trace=stream_tracer.steady_streamline_oneway(self.g,self.U,xy,
max_t=self.max_t,max_dist=self.max_dist)
else:
# max_t=20.0 was decent.
trace=stream_tracer.steady_streamline_twoways(self.g,self.U,xy,
max_t=self.max_t,max_dist=self.max_dist)
n_nodes=self.add_trace_to_tri(trace)
if n_nodes==1:
print(".",end="")
self.short_traces+=1
def pack_starting_point(self,xy):
"""
Pack the given starting point as far upstream as possible, based
on the seed_clearance.
"""
trace=stream_tracer.steady_streamline_oneway(self.g,-self.U,xy,
max_t=100*self.max_t,
max_dist=100*self.max_dist)
new_xy_t_idx=0
hint={}
for t_idx in range(len(trace.time)):
xy_i=trace.x.values[t_idx]
# quickly test clearance of this point:
rad,hint=self.tri.point_clearance(xy_i,hint=hint)
if rad < self.seed_clearance:
break
else:
new_xy_t_idx=t_idx
new_xy=trace.x.values[new_xy_t_idx]
return new_xy
def add_trace_to_tri(self,trace,min_clearance=None):
"""
trace: a trace Dataset as return from stream_tracer
"""
if min_clearance is None:
min_clearance=self.min_clearance
if 'root' in trace:
trace_root=trace.root.item()
else:
trace_root=0
xys=trace.x.values
if self.pack:
stops=[None,trace.stop_condition.item()]
else:
stops=trace.stop_conditions.values
if stops[0]=='leave_domain':
xys=xys[1:]
trace_root=max(0,trace_root-1)
if stops[-1]=='leave_domain':
xys=xys[:-1]
# Keep this in the order of the linestring
recent=[]
nroot=self.tri.add_node(x=xys[trace_root])
recent.append(nroot)
clearance=self.neighbor_clearance(nroot,recent)
if clearance<min_clearance:
print(".",end="")
self.tri.nodes['stream_code'][recent]=self.TRUNC
return len(recent)
stream_code=self.STREAM
if self.pack:
# Starting point is fine, only need to check as we go downstream
incrs=[1]
else:
# Check both ways.
incrs=[1,-1]
for incr in incrs:
xy_leg=xys[trace_root+incr::incr]
na=nroot
for xy in xy_leg:
if np.all(xy==self.tri.nodes['x'][na]):
# root is repeated. could happen in other cases, too.
continue
try:
nb=self.tri.add_node(x=xy)
except self.tri.DuplicateNode:
# Essentially a degenerate case of neighbor_clearance
# going to 0.
import pdb
pdb.set_trace()
print("x",end="")
stream_code=self.TRUNC
break
recent.append(nb)
clearance=self.neighbor_clearance(nb,recent)
if clearance<min_clearance:
# if it's too close, don't add an edge
print("-",end="")
stream_code=self.TRUNC
break
try:
self.tri.add_constraint(na,nb)
except self.tri.IntersectingConstraints:
print('!') # shouldn't happen..
break
na=nb
if incr>0:
self.tri.nodes['tip'][na]=True
recent=recent[::-1] # Second iteration goes reverse to the first.
self.tri.nodes['stream_code'][recent]=stream_code
return len(recent)
def pick_starting_point(self):
"""
Pick a starting point based on the triangle with the largest circumradius.
Complicated by the presence of constrained edges at the boundary of the
grid. Using constrained centers helps to some degree.
"""
while 1:
centers=self.tri.constrained_centers()
radii=utils.dist(centers - self.tri.nodes['x'][self.tri.cells['nodes'][:,0]])
radii[ self.tri.cells['outside'] | self.tri.cells['deleted']] = 0.0
if self.clip is not None:
clipped=( (centers[:,0]<self.clip[0])
| (centers[:,0]>self.clip[1])
| (centers[:,1]<self.clip[2])
| (centers[:,1]>self.clip[3]) )
radii[clipped]=0.0
radii[ ~np.isfinite(radii)]=0.0
best=np.argmax(radii)
if radii[best]<self.seed_clearance:
return None
xy=centers[best]
print("*",end="") # xy)
if not self.boundary.intersects( geometry.Point(xy) ):
# Either constrained_centers() did a bad job and the point isn't
# in the cell,
cpoly=self.tri.cell_polygon(best)
if not cpoly.intersects( geometry.Point(xy) ):
print("Constrained center %s fell outside cell %d. Lie and mark cell 'outside'"%(str(xy),best))
self.tri.cells['outside'][best]=True
continue
else:
# Assume this is an island.
print("Island")
self.island_points.append( xy )
self.tri.cells['outside'][best]=True
continue
break
return xy
# would have been keeping track of the recent nodes as they were
# created.
def neighbor_clearance(self,n,recent=[]):
# This is a bit tricky, as we could get into a spiral, and have only
# neighbors that are on our own streamline.
nbrs=self.tri.node_to_nodes(n)
nbr_dists=utils.dist( self.tri.nodes['x'][n] - self.tri.nodes['x'][nbrs])
# Only worry about recent nodes when the distance is some factor
# smaller than the along-path distance.
min_dist=np.inf
# node indices from n and moving away
nodes=np.r_[ n, recent[::-1]]
recent_path=self.tri.nodes['x'][nodes]
recent_dist=utils.dist_along(recent_path)
path_dists={ rn:rd for rn,rd in zip(nodes,recent_dist)}
for nbr,dist in zip(self.tri.node_to_nodes(n),nbr_dists):
if nbr in recent:
# What is the along path distance?
path_dist=path_dists[nbr]
# if the straightline distance is not much smaller
# than the along-path distance, then we're probably
# just seeing ourselves, and not grounds for clearance
# issues
if dist > 0.5 *path_dist:
continue
# otherwise, path may have looped back, and we should bail.
min_dist=min(min_dist,dist)
return min_dist
def fig_constrained(self,num=None):
fig,ax=plt.subplots(num=num)
sel=~(self.tri.cells['outside'] | self.tri.cells['deleted'] )
self.tri.plot_edges(color='k',lw=0.3,mask=self.tri.edges['constrained'])
ax.axis('off')
ax.set_position([0,0,1,1])
return fig,ax
def segments_and_speeds(self,include_truncated=True):
"""
Extract segments starting from nodes marked as tip.
include_truncate: include segments that are not as long as their
trace on account of truncation due to spacing.
"""
strings=self.tri.extract_linear_strings(edge_select=self.tri.edges['constrained'])
# Order them ending with the tip, and only strings that include
# a tip (gets rid of boundary)
segs=[]
for string in strings:
node_tips=self.tri.nodes['tip'][string]
if np.all( ~node_tips): continue
if not include_truncated and np.any(self.tri.nodes['stream_code'][string]==self.TRUNC):
continue
xy=self.tri.nodes['x'][string]
if node_tips[0]:
xy=xy[::-1]
elif node_tips[-1]:
pass
else:
print("Weird - there's a tip but it's not at the tip")
segs.append(xy)
tip_cells=[self.g.select_cells_nearest(seg[-1],inside=True) for seg in segs]
speeds=self.Umag[tip_cells]
return segs,speeds
sym=2.0 * np.array( [ [1.5, 0],
[-0.5, 1],
[0, 0],
[-0.5, -1]])
diam=np.array([ [0.5,0],
[0, 0.5],
[-0.5,0],
[0,-0.5]])
def manual_arrows(self,x,y,u,v,speeds,size=1.0):
# manual arrow heads.
angles=np.arctan2(v,u)
polys=[ utils.rot(angle,self.sym) for angle in angles]
polys=np.array(polys)
polys[speeds<0.1,:]=self.diam
polys *= size
polys[...,0] += x[:,None]
polys[...,1] += y[:,None]
pcoll=collections.PolyCollection(polys,array=speeds,**self.coll_args)
return pcoll
def plot_quiver(self,ax=None,include_truncated=True):
"""
Add the quiver plot to the given axes.
The quiver is split into two collections: a line (shaft) and
a polygon (arrow head).
This method should evolve to have a calling convention closer to
the MPL quiver function in terms of color, clim, cmap. For now
it uses self.clim, self.cmap, and defines color using the speed,
which in turn is defined at the downstream tip of the arrow.
"""
if ax is None:
ax=plt.gca()
segs,speeds=self.segments_and_speeds(include_truncated=include_truncated)
return self.plot_segs_and_speeds(segs,speeds,ax)
def plot_segs_and_speeds(self,segs,speeds,ax):
speeds=np.asanyarray(speeds)
result={}
result['lcoll']=collections.LineCollection(segs,
array=speeds,
lw=self.lw,**self.coll_args)
ax.add_collection(result['lcoll'])
# Need end points, end velocity for each segments
xyuvs=[]
for seg,speed in zip(segs,speeds):
seg=np.asanyarray(seg)
seg=seg[np.isfinite(seg[:,0])]
uv=speed*utils.to_unit(seg[-1]-seg[-2])
xyuvs.append( [seg[-1,0],seg[-1,1],uv[0],uv[1]])
xyuvs=np.array(xyuvs)
pcoll=self.manual_arrows(xyuvs[:,0],xyuvs[:,1],
xyuvs[:,2],xyuvs[:,3],
speeds,size=self.size)
pcoll.set_array(speeds)
#pcoll.set_cmap(self.cmap)
#pcoll.set_clim(self.clim)
pcoll.set_lw(0)
ax.add_collection(pcoll)
result['pcoll']=pcoll
return result
def quiverkey(self,X,Y,U,label,**kw):
"""
Add a basic key for the quiver
"""
ax=kw.get('ax',None) or plt.gca()
segs=[ [ [X,Y],[X+self.max_t*U,Y]] ]
speeds=[U]
pad_x=kw.get('pad_x',10)
ax.text(segs[0][-1][0]+pad_x,segs[0][-1][1],label,
va='center')
return self.plot_segs_and_speeds(segs=segs,speeds=speeds,ax=ax)
|
import glob
from PIL import Image
import numpy as np
import os
import tqdm
import multiprocessing
import threading
# used instead of proprocess_data.ipynb -> adjust dirs and path separator, here \\ for windows, replace by / for linux
# multithreading dies not work -> replaced by for loop
dest_dir = 'C:\git\python\glomeruli3\data_preprocessed'
dest_dir_masks = os.path.join( dest_dir, 'masks')
dest_dir_img = os.path.join( dest_dir, 'img')
palette = {(0, 0, 0) : 0 ,
(0, 0, 255) : 0 ,
(255, 255, 255) : 1
}
def convert_from_color_segmentation(arr_3d):
arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)
for c, i in palette.items():
m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)
arr_2d[m] = i
return arr_2d
images = glob.glob('C:\git\python\glomeruli3\data/*.jpg')
masks = glob.glob('C:\git\python\glomeruli3\data/*.png')
masks.sort()
images.sort()
assert( len(images) == len(masks))
def rotate(img, img_name, mask, mask_name, degree, postfix):
img = img.rotate(degree)
mask = mask.rotate(degree)
mask_arr = np.array(mask)
mask_conved = convert_from_color_segmentation(mask_arr)
img.save(os.path.join(dest_dir_img, postfix + img_name))
Image.fromarray(mask_conved).save(os.path.join(dest_dir_masks, postfix + mask_name))
return
def process(args):
image_src, mask_src = args
image_name = '_'.join(image_src.split('\\')[-3:]) # -1 for absolute directories!
mask_name = '_'.join(mask_src.split('\\')[-3:])
img = Image.open(image_src)
mask = Image.open(mask_src)
img = img.resize((512, 512), Image.NEAREST)
mask = mask.resize((512, 512), Image.NEAREST)
rotate(img, image_name, mask, mask_name, 90, "90_")
rotate(img, image_name, mask, mask_name, 180, "180_")
rotate(img, image_name, mask, mask_name, 270, "270_")
mask_arr = np.array(mask)
mask_conved = convert_from_color_segmentation(mask_arr)
img.save(os.path.join(dest_dir_img, image_name))
Image.fromarray(mask_conved).save(os.path.join(dest_dir_masks, mask_name))
# for i in range(len(masks)):
# print(str(i+1)+"/"+str(len(masks)) +": "+ images[i]+" / "+masks[i])
# process((images[i], masks[i]))
if __name__ == '__main__':
pool = multiprocessing.Pool(10)
tasks = []
for i in range(len(masks)):
tasks.append((images[i], masks[i]))
for _ in tqdm.tqdm(pool.imap_unordered(process, tasks), total=len(tasks)):
pass
|
from datetime import datetime
from mongoengine import (connect, ValidationError)
from nose.tools import (assert_true, assert_false, assert_equal,
assert_almost_equal, assert_raises)
from qirest_client.helpers import database
from qirest_client.model.subject import (Project, ImagingCollection, Subject)
from qirest_client.model.common import TumorExtent
from qirest_client.model.imaging import (
Session, Scan, Registration, Protocol,
MultiImageResource, SingleImageResource, LabelMap, SessionDetail,
Image, Point, Region, Modeling
)
from qirest_client.model.clinical import (
Biopsy, Evaluation, Surgery, PathologyReport, TumorLocation,
TNM, BreastSurgery, BreastPathology, ResidualCancerBurden,
ModifiedBloomRichardsonGrade, HormoneReceptorStatus,
SarcomaPathology, FNCLCCGrade, NecrosisPercentValue,
NecrosisPercentRange, necrosis_percent_as_score
)
class TestModel(object):
"""
Basic data model test. A more complete test is found in the qirest
server TestSeed test suite.
"""
def setup(self):
self._connection = connect(db='qiprofile_test')
self._connection.drop_database('qiprofile_test')
def tearDown(self):
self._connection.drop_database('qiprofile_test')
def test_project(self):
prj = Project()
# The project must have a name.
with assert_raises(ValidationError):
prj.validate()
prj.name = 'Breast'
prj.validate()
def test_collection(self):
# The collection must have a name.
with assert_raises(ValidationError):
ImagingCollection(project='QIN_Test').validate()
# The collection must have a project.
with assert_raises(ValidationError):
ImagingCollection(name='Breast').validate()
# A valid collection.
coll = ImagingCollection(project='QIN_Test', name='Breast')
coll.validate()
def test_subject(self):
subject = Subject()
# The collection must have a project.
with assert_raises(ValidationError):
subject.validate()
subject.project = 'QIN_Test'
# The collection must have a number.
with assert_raises(ValidationError):
subject.validate()
subject.number = 1
# The subject must have a collection.
with assert_raises(ValidationError):
subject.validate()
subject.collection = 'Breast'
subject.validate()
def test_race(self):
subject = Subject(project='QIN_Test', collection='Breast', number=1)
subject.races = ['White', 'Black', 'Asian', 'AIAN', 'NHOPI']
subject.validate()
subject = Subject(project='QIN_Test', collection='Breast', number=1)
subject.races = ['Invalid']
with assert_raises(ValidationError):
subject.validate()
# Races must be a list.
subject.races = 'White'
with assert_raises(ValidationError):
subject.validate()
def test_ethnicity(self):
subject = Subject(project='QIN_Test', collection='Breast', number=1)
subject.ethnicity = 'Non-Hispanic'
subject.validate()
# The ethnicity is a controlled value.
subject.ethnicity = 'Invalid'
with assert_raises(ValidationError):
subject.validate()
def test_breast_biopsy(self):
"""
This Breast biopsy test case is a variation of the Breast
surgery test case. Notably, this test case exercises multiple
tumors. There is no Sarcoma biopsy test case.
"""
subject = Subject(project='QIN_Test', collection='Breast', number=1)
# The pathology.
size = TNM.Size.parse('T3a')
size.validate()
grade = ModifiedBloomRichardsonGrade(
tubular_formation=2, nuclear_pleomorphism=1, mitotic_count=2
)
grade.validate()
tnm = TNM(tumor_type='Breast', grade=grade, size=size,
metastasis=False, resection_boundaries=1,
lymphatic_vessel_invasion=False)
tnm.validate()
estrogen1 = HormoneReceptorStatus(hormone='estrogen', positive=True,
intensity=80)
estrogen1.validate()
hormone_receptors1 = [estrogen1]
extent1 = TumorExtent(length=48, width=31, depth=19)
extent1.validate()
tumor1_pathology = BreastPathology(tnm=tnm, extent=extent1,
hormone_receptors=hormone_receptors1)
tumor1_pathology.validate()
estrogen2 = HormoneReceptorStatus(hormone='estrogen', positive=False)
estrogen2.validate()
hormone_receptors2 = [estrogen2]
extent2 = TumorExtent(length=27, width=16, depth=8)
extent2.validate()
tumor2_pathology = BreastPathology(extent=extent2,
hormone_receptors=hormone_receptors2)
tumor2_pathology.validate()
# The pathology aggregate.
pathology = PathologyReport(tumors=[tumor1_pathology, tumor2_pathology])
pathology.validate()
# Add the encounter to the subject.
date = datetime(2013, 1, 4)
biopsy = Biopsy(date=date, weight=54, pathology=pathology)
biopsy.validate()
subject.encounters = [biopsy]
subject.validate()
def test_rcb_index(self):
extent = TumorExtent(length=32, width=12)
rcb = ResidualCancerBurden(
tumor_cell_density=40,
dcis_cell_density=10,
positive_node_count=6,
largest_nodal_metastasis_length=8
)
path = BreastPathology(extent=extent, rcb=rcb)
rcb_index = path.rcb_index()
assert_almost_equal(rcb_index, 3.695, 3,
msg="The RCB index is incorrect: %f" % rcb_index)
def test_rcb_class(self):
path = BreastPathology()
for expected, rcb_index in enumerate((0, 1.2, 1.4, 3.5)):
actual = path.rcb_class(rcb_index)
assert_equal(actual, expected,
"The RCB class of RCB index %f is incorrect: %d" %
(rcb_index, actual))
def test_breast_surgery(self):
subject = Subject(project='QIN_Test', collection='Breast', number=1)
# The pathology report.
size = TNM.Size.parse('T2')
size.validate()
grade = ModifiedBloomRichardsonGrade(
tubular_formation=1, nuclear_pleomorphism=1, mitotic_count=1
)
grade.validate()
tnm = TNM(tumor_type='Breast', grade=grade, size=size,
metastasis=False, resection_boundaries=1,
lymphatic_vessel_invasion=False)
tnm.validate()
location = TumorLocation(sagittal_location='Left')
location.validate()
tumor_pathology = BreastPathology(tnm=tnm, location=location)
tumor_pathology.validate()
pathology = PathologyReport(tumors=[tumor_pathology])
pathology.validate()
# Add the encounter to the subject.
date = datetime(2013, 1, 4)
surgery = BreastSurgery(date=date, weight=54, surgery_type='Lumpectomy',
pathology=pathology)
surgery.validate()
subject.encounters = [surgery]
subject.validate()
def test_sarcoma_surgery(self):
subject = Subject(project='QIN_Test', collection='Sarcoma', number=1)
# The pathology report.
size = TNM.Size.parse('T3a')
size.validate()
grade = FNCLCCGrade(
differentiation=2, necrosis_score=1, mitotic_count=1
)
grade.validate()
tnm = TNM(tumor_type='Sarcoma', grade=grade, size=size,
metastasis=False, resection_boundaries=1,
lymphatic_vessel_invasion=False)
tnm.validate()
location = TumorLocation(body_part='Thigh', sagittal_location='Left',
coronal_location='Posterior')
location.validate()
tumor_pathology = SarcomaPathology(tnm=tnm, location=location)
tumor_pathology.validate()
pathology = PathologyReport(tumors=[tumor_pathology])
pathology.validate()
# Add the encounter to the subject.
date = datetime(2014, 6, 19)
surgery = Surgery(date=date, weight=47, pathology=pathology)
surgery.validate()
subject.encounters = [surgery]
subject.validate()
def test_tnm_size(self):
for value in ['T1', 'Tx', 'cT4', 'T1b', 'cT2a']:
size = TNM.Size.parse(value)
size.validate()
assert_equal(str(size), value, "The TNM parse is incorrect -"
" expected %s, found %s"
% (value, str(size)))
def test_necrosis_score(self):
fixture = {
0: dict(integer=0,
value=NecrosisPercentValue(value=0),
range=NecrosisPercentRange(
start=NecrosisPercentRange.LowerBound(value=0),
stop=NecrosisPercentRange.UpperBound(value=1))),
1: dict(integer=40,
value=NecrosisPercentValue(value=40),
range=NecrosisPercentRange(
start=NecrosisPercentRange.LowerBound(value=40),
stop=NecrosisPercentRange.UpperBound(value=50))),
2: dict(integer=50,
value=NecrosisPercentValue(value=50),
range=NecrosisPercentRange(
start=NecrosisPercentRange.LowerBound(value=50),
stop=NecrosisPercentRange.UpperBound(value=60)))
}
for expected, inputs in fixture.iteritems():
for in_type, in_val in inputs.iteritems():
actual = necrosis_percent_as_score(in_val)
assert_equal(actual, expected,
"The necrosis score for %s is incorrect: %d" %
(in_val, expected))
def test_treatment(self):
# TODO - add the treatment test case.
pass
def test_session(self):
# The test subject.
subject = Subject(project='QIN_Test', collection='Breast', number=1)
# The test session.
date = datetime(2013, 1, 4)
session = Session(date=date)
session.validate()
subject.encounters = [session]
subject.validate()
def test_add_encounter(self):
# The test subject.
subject = Subject(project='QIN_Test', collection='Breast', number=1)
# Add the test encounters.
encounters = [Biopsy(date=datetime(2014, m, 1)) for m in (3, 5, 7)]
subject.add_encounter(encounters[1])
assert_equal(subject.encounters, encounters[1:2])
subject.add_encounter(encounters[0])
assert_equal(subject.encounters, encounters[0:2])
subject.add_encounter(encounters[2])
assert_equal(subject.encounters, encounters)
def test_scan(self):
# The scan protocol.
protocol = database.get_or_create(Protocol, dict(technique='T1'))
# The scan.
scan = Scan(protocol=protocol, number=1)
scan.validate()
# Validate the session detail embedded scan.
detail = SessionDetail(scans=[scan])
detail.validate()
def test_registration(self):
# The scan protocol.
scan_pcl = database.get_or_create(Protocol, dict(technique='T1'))
# The scan.
scan = Scan(protocol=scan_pcl, number=1)
scan.validate()
# The registration protocol.
reg_pcl = database.get_or_create(Protocol,
dict(technique='FLIRT'))
# The registration volumes.
vol_imgs = [Image(name="volume00%s.nii.gz" % vol) for vol in range(1, 4)]
volumes = MultiImageResource(name='reg', images=vol_imgs)
# The registration time series.
time_series_img = Image(name='reg_ts.nii.gz')
time_series = SingleImageResource(name='reg_ts', image=time_series_img)
reg = Registration(protocol=reg_pcl, volumes=volumes, time_series=time_series)
reg.validate()
# Validate the session detail and embedded scan registration.
scan.registrations = [reg]
detail = SessionDetail(scans=[scan])
detail.scans = [scan]
detail.validate()
def test_roi(self):
# The scan protocol.
scan_pcl = database.get_or_create(Protocol,
dict(technique='T1'))
# The scan.
scan = Scan(protocol=scan_pcl, number=1)
scan.validate()
# The ROI.
centroid = Point(x=200, y=230, z=400)
intensity = 31
metadata = dict(centroid=centroid, average_intensity=intensity)
mask = Image(name='lesion1.nii.gz', metadata=metadata)
label_map = LabelMap(name='lesion1_color.nii.gz',
color_table='color_table.nii.gz')
label_map.validate()
roi = Region(mask=mask, resource='roi', label_map=label_map, centroid=centroid)
roi.validate()
# Validate the session detail and embedded scan ROI.
scan.rois = [roi]
detail = SessionDetail(scans=[scan])
detail.scans = [scan]
detail.validate()
def test_modeling_protocol(self):
# The modeling protocol content.
cfg = {'Fastfit': {'model_name': 'fxr.model'},
'R1': {'r1_0_val': 0.7, 'baseline_end_idx': 1}}
mdl_pcl_key = dict(technique='BOLERO', configuration=cfg)
mdl_pcl = database.get_or_create(Protocol, mdl_pcl_key)
assert_equal(mdl_pcl.configuration, cfg,
"The fetched modeling configuration is incorrect: %s" %
mdl_pcl.configuration)
def test_modeling(self):
# The test subject.
subject = Subject(project='QIN_Test', collection='Breast', number=1)
# The modeling protocol.
cfg = {'Fastfit': {'model_name': 'fxr.model'},
'R1': {'r1_0_val': 0.7, 'baseline_end_idx': 1}}
mdl_pcl_key = dict(technique='BOLERO', configuration=cfg)
mdl_pcl = database.get_or_create(Protocol, mdl_pcl_key)
# The source protocol.
scan_pcl = database.get_or_create(Protocol, dict(technique='T1'))
source = Modeling.Source(scan=scan_pcl)
# The modeling data.
ktrans_img = Image(name='ktrans.nii.gz')
ktrans = Modeling.ParameterResult(image=ktrans_img)
modeling = Modeling(protocol=mdl_pcl, source=source, resource='pk_01',
result=dict(ktrans=ktrans))
modeling.validate()
# Validate the subject embedded session modeling.
date = datetime(2014, 3, 1)
session = Session(date=date, modelings=[modeling])
session.validate()
subject.encounters = [session]
subject.validate()
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
|
import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
|
"""
mbed SDK
Copyright (c) 2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import splitext, basename
class CoIDE(Exporter):
NAME = 'CoIDE'
TOOLCHAIN = 'GCC_ARM'
TARGETS = [
'KL25Z',
'KL05Z',
]
# seems like CoIDE currently supports only one type
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'1',
's_sources':'1'
}
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in CoIDE.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
ctx = {
'name': self.program_name,
'source_files': source_files,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'library_paths': self.resources.lib_dirs,
'object_files': self.resources.objects,
'libraries': libraries,
'symbols': self.toolchain.get_symbols()
}
target = self.target.lower()
# Project file
self.gen_file('coide_%s.coproj.tmpl' % target, ctx, '%s.coproj' % self.program_name)
|
"""
Project Name: Twitter Tagcloud
Author: Alexandru Buliga
Email: bugaaa92@gmail.com
"""
import sys
import re
import logging
import json
import redis
from threading import currentThread, enumerate, Lock, Thread
from collections import Counter, OrderedDict
from datetime import datetime
import tweepy
import resource
class TweetRetriever:
"""
Retrieves tweets using the Tweeter API provided by tweepy
Performs authentication with OAuth protocol
"""
def __init__(self, creds, stopwords):
"""
Constructor method
@param creds: dictionary containins authentication tokens
@param stopwords: set of words that are not taken into account
"""
self.stopwords = stopwords
self.creds = creds
# Result per page constant defined here
self.RESULTS_PER_PAGE = 100
# OAuth Authentication
self.auth = tweepy.OAuthHandler(
creds['consumer_key'], creds['consumer_secret'])
self.auth.secure = True
self.auth.set_access_token(
creds['access_token'], creds['access_token_secret'])
# Setting the Teepy API
self.api = tweepy.API(self.auth)
# Used to guarantee atomic access to the global counter
self.lock = Lock()
# List used to hold the word counts
self.globalCountList = []
# Setting local redis server
self.redisServer = redis.Redis('localhost')
self.redisServer.flushall()
def doWork(self, tweetList):
"""
Function associated with worker thread; gets all the words and its
occurances in the tweetList and updated the global counter
@param tweetList: a list of tweets for the worker thread
"""
# Get the list of words
wordList = []
cleanWordList = []
for tweetText in tweetList:
wordList.extend(re.findall(r"[\w']+", tweetText.lower()))
# Convert the strings to ascii by uncommenting the line after next
for word in wordList:
# word = word.encode('ascii', 'ignore')
if word not in self.stopwords:
self.redisServer.incr(word)
def run(self, durationInterval, wordCount):
"""
Tweets retrieval method
@param durationInterval: the duration of the data fetch process
@param wordCount [optional]: how many results to show
"""
counter = 0
startTime = None
tweetList = []
if durationInterval <= 0:
return
# Get tweepy cursor
cursor = tweepy.Cursor(self.api.search,
q = "a",
count = self.RESULTS_PER_PAGE,
result_type = "recent",
lang = "en").items()
# Iterate all tweets in the past durationInterval seconds using Cursor
while True:
try:
tweet = cursor.next()
except tweepy.TweepError:
print "Error. Exceeded Twitter request limit.", \
"Try again in 15 minutes."
break
# Store info about the tweet
postTime = tweet.created_at
tweetList.append(tweet.text)
if startTime:
# Check if durationInterval has passed and we have to stop
if abs((postTime - startTime).total_seconds()) > durationInterval:
# Start last worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
break
else:
# Mark the current time of the first retrieved tweet and count
# durationInterval seconds starting from here
startTime = postTime
counter += 1
if counter == self.RESULTS_PER_PAGE:
# Start worker thread
Thread(target = TweetRetriever.doWork,
args = (self, tweetList)).start()
counter = 0
tweetList = []
# Wait threads to finish their work
main_thread = currentThread()
for thread in enumerate():
if thread is main_thread:
continue
thread.join()
keysList = self.redisServer.keys(pattern = '*')
for key in keysList:
self.globalCountList.append((key, int(self.redisServer.get(key))))
self.globalCountList.sort(key = lambda x: x[1], reverse = True)
if (wordCount >= 0):
# Count how many other words there are
otherWordCounter = self.globalCountList[wordCount::]
otherCount = sum(count for _, count in otherWordCounter)
# Update the global counter with the special word, other
self.globalCountList = self.globalCountList[:wordCount:]
self.globalCountList.append(('other', otherCount))
# Write results to a local JSON file
self.writeResult()
def writeResult(self):
"""
Write results to a local JSON file
"""
wcList = []
# Convert list elements to dictionary for pretty printing
for elem in self.globalCountList:
wcList.append(OrderedDict([('word', elem[0]), ('count', elem[1])]))
with open('results.json', 'w') as out_file:
json.dump(wcList, out_file, indent = 4, separators = (',', ': '))
def main():
"""
Main function definition
"""
# Disabling some ugly warnings
logging.captureWarnings(True)
# Verifying if the command-line arguments are passed
if len(sys.argv) < 2:
print "Error. Run: python tagcloud.py <duration> [<wordCount>]"
sys.exit()
# Getting the duration of the data fetch process
durationInterval = sys.argv[1]
wordCount = -1
try:
durationInterval = int(durationInterval)
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# If the word count argument is passed, get it
if len(sys.argv) == 3:
try:
wordCount = int(sys.argv[2])
except ValueError:
print "Error. Arguments must be numbers!"
sys.exit()
# Start retrieving tweets
tweetRetriever = TweetRetriever(resource.creds, resource.stopwords)
tweetRetriever.run(durationInterval, wordCount)
"""
Start main
"""
if __name__ == '__main__':
main()
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
from enum import IntEnum
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__name__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except InterruptedError:
continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from keystoneclient import exceptions
from keystoneclient.tests.v2_0 import utils
from keystoneclient.v2_0 import client
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def setUp(self):
super(AuthenticateAgainstKeystoneTests, self).setUp()
self.TEST_RESPONSE_DICT = {
"access": {
"token": {
"expires": "2020-01-01T00:00:10.000123Z",
"id": self.TEST_TOKEN,
"tenant": {
"id": self.TEST_TENANT_ID
},
},
"user": {
"id": self.TEST_USER
},
"serviceCatalog": self.TEST_SERVICE_CATALOG,
},
}
self.TEST_REQUEST_BODY = {
"auth": {
"passwordCredentials": {
"username": self.TEST_USER,
"password": self.TEST_TOKEN,
},
"tenantId": self.TEST_TENANT_ID,
},
}
def test_authenticate_success_expired(self):
resp_a = copy.deepcopy(self.TEST_RESPONSE_DICT)
resp_b = copy.deepcopy(self.TEST_RESPONSE_DICT)
headers = {'Content-Type': 'application/json'}
# Build an expired token
resp_a['access']['token']['expires'] = (
(timeutils.utcnow() - datetime.timedelta(1)).isoformat())
# Build a new response
TEST_TOKEN = "abcdef"
resp_b['access']['token']['expires'] = '2020-01-01T00:00:10.000123Z'
resp_b['access']['token']['id'] = TEST_TOKEN
# return expired first, and then the new response
self.stub_auth(response_list=[{'json': resp_a, 'headers': headers},
{'json': resp_b, 'headers': headers}])
cs = client.Client(tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL,
username=self.TEST_USER,
password=self.TEST_TOKEN)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token, TEST_TOKEN)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_authenticate_failure(self):
_auth = 'auth'
_cred = 'passwordCredentials'
_pass = 'password'
self.TEST_REQUEST_BODY[_auth][_cred][_pass] = 'bad_key'
error = {"unauthorized": {"message": "Unauthorized",
"code": "401"}}
self.stub_auth(status_code=401, json=error)
# Workaround for issue with assertRaises on python2.6
# where with assertRaises(exceptions.Unauthorized): doesn't work
# right
def client_create_wrapper():
client.Client(username=self.TEST_USER,
password="bad_key",
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertRaises(exceptions.Unauthorized, client_create_wrapper)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_auth_redirect(self):
self.stub_auth(status_code=305, text='Use Proxy',
headers={'Location': self.TEST_ADMIN_URL + "/tokens"})
self.stub_auth(base_url=self.TEST_ADMIN_URL,
json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_authenticate_success_password_scoped(self):
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_authenticate_success_password_unscoped(self):
del self.TEST_RESPONSE_DICT['access']['serviceCatalog']
del self.TEST_REQUEST_BODY['auth']['tenantId']
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(username=self.TEST_USER,
password=self.TEST_TOKEN,
auth_url=self.TEST_URL)
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertFalse('serviceCatalog' in cs.service_catalog.catalog)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_auth_url_token_authentication(self):
fake_token = 'fake_token'
fake_url = '/fake-url'
fake_resp = {'result': True}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', [fake_url], json=fake_resp,
base_url=self.TEST_ADMIN_IDENTITY_ENDPOINT)
cl = client.Client(auth_url=self.TEST_URL,
token=fake_token)
json_body = jsonutils.loads(self.requests.last_request.body)
self.assertEqual(json_body['auth']['token']['id'], fake_token)
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
token = self.requests.last_request.headers.get('X-Auth-Token')
self.assertEqual(self.TEST_TOKEN, token)
def test_authenticate_success_token_scoped(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(token=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
auth_url=self.TEST_URL)
self.assertEqual(cs.management_url,
self.TEST_RESPONSE_DICT["access"]["serviceCatalog"][3]
['endpoints'][0]["adminURL"])
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_authenticate_success_token_scoped_trust(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.TEST_REQUEST_BODY['auth']['trust_id'] = self.TEST_TRUST_ID
response = self.TEST_RESPONSE_DICT.copy()
response['access']['trust'] = {"trustee_user_id": self.TEST_USER,
"id": self.TEST_TRUST_ID}
self.stub_auth(json=response)
cs = client.Client(token=self.TEST_TOKEN,
tenant_id=self.TEST_TENANT_ID,
trust_id=self.TEST_TRUST_ID,
auth_url=self.TEST_URL)
self.assertTrue(cs.auth_ref.trust_scoped)
self.assertEqual(cs.auth_ref.trust_id, self.TEST_TRUST_ID)
self.assertEqual(cs.auth_ref.trustee_user_id, self.TEST_USER)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_authenticate_success_token_unscoped(self):
del self.TEST_REQUEST_BODY['auth']['passwordCredentials']
del self.TEST_REQUEST_BODY['auth']['tenantId']
del self.TEST_RESPONSE_DICT['access']['serviceCatalog']
self.TEST_REQUEST_BODY['auth']['token'] = {'id': self.TEST_TOKEN}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
cs = client.Client(token=self.TEST_TOKEN,
auth_url=self.TEST_URL)
self.assertEqual(cs.auth_token,
self.TEST_RESPONSE_DICT["access"]["token"]["id"])
self.assertFalse('serviceCatalog' in cs.service_catalog.catalog)
self.assertRequestBodyIs(json=self.TEST_REQUEST_BODY)
def test_allow_override_of_auth_token(self):
fake_url = '/fake-url'
fake_token = 'fake_token'
fake_resp = {'result': True}
self.stub_auth(json=self.TEST_RESPONSE_DICT)
self.stub_url('GET', [fake_url], json=fake_resp,
base_url=self.TEST_ADMIN_IDENTITY_ENDPOINT)
cl = client.Client(username='exampleuser',
password='password',
tenant_name='exampleproject',
auth_url=self.TEST_URL)
self.assertEqual(cl.auth_token, self.TEST_TOKEN)
# the token returned from the authentication will be used
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
token = self.requests.last_request.headers.get('X-Auth-Token')
self.assertEqual(self.TEST_TOKEN, token)
# then override that token and the new token shall be used
cl.auth_token = fake_token
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
token = self.requests.last_request.headers.get('X-Auth-Token')
self.assertEqual(fake_token, token)
# if we clear that overridden token then we fall back to the original
del cl.auth_token
resp, body = cl.get(fake_url)
self.assertEqual(fake_resp, body)
token = self.requests.last_request.headers.get('X-Auth-Token')
self.assertEqual(self.TEST_TOKEN, token)
|
import praw
import urllib.request
import json
import requests
import requests.auth
import os.path
import re
from imgurpython import ImgurClient
from bs4 import BeautifulSoup
imgur_gif_regex = re.compile("https?:\/\/i\.imgur\.com\/[a-z0-9]+.gif")
def gyazo_link_parser(link):
"""
Parses Gyazo links into their raw (.png or .gif) form (i.gyazo)
"""
# opens the gyazo link
response = urllib.request.urlopen(link)
# reads the reponse
html = response.read()
# parses the html using beautifulsoup, and gives me the image link
parsed = BeautifulSoup(html)
return parsed.img['src']
# old method of handling gyazo links
#title = parsed.title.string
#print(str(title))
#return "http://i.gyazo.com/" + title.replace("Gyazo - ", "")
def imgur_uploader(link, imgur_client):
"""
Uploads passed image to imgur, and then outputs the link from the JSON/dict provided.
I"m calling it JSON.
"""
# tries to upload the image to imgur
try:
uploaded_image = imgur_client.upload_from_url(url=link, config=None, anon=True)
except:
# if it crashes, it'll just return False
print("Error when uploading the image to imgur.")
return False
else:
# otherwise, yay, we return a link
print("Successful convert of", link, "to an imgur link", uploaded_image["link"])
if len(imgur_gif_regex.findall(uploaded_image["link"])) != 0:
return uploaded_image["link"] + "v"
return uploaded_image["link"]
def comment_prep(content):
"""
Prepares the comment so we can have sme basic context.
"""
# same comment structure, so we'll just do it in a function
text = "Imgur link: " + content
text += "\n\n\n------\n"
text += "This action was performed by a bot. Message +/u/arrivance for further details."
return text
def comment_poster(comment, content):
try:
comment.reply(content)
except praw.errors.RateLimitExceeded as e:
print("Rate limit exceeded:", e)
except praw.errors.APIException as e:
print("API Exception:", e)
except:
print("Other unknown fault.")
else:
print("Successfully commented on comment ID", comment.id)
def file_checker(filename):
if os.path.isfile(filename) == True:
return True
else:
return False
def file_maker(filename, structure):
with open(filename, "w") as data_file:
json.dump(structure, filename)
return True
def reddit_oauth_token(login_details, user_agent):
client_auth = requests.auth.HTTPBasicAuth(login_details["reddit_client_id"], login_details["reddit_client_secret"])
post_data = {"grant_type": "password", "username": login_details["reddit_user"], "password": login_details["reddit_pass"]}
headers = {"User-Agent": user_agent}
print("Attempting to get the access_token from reddit...")
response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
access_token = response.json()["access_token"]
print("access_token succesfully gotten:", access_token)
return access_token
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MonitorsOperations:
"""MonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.elastic.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ElasticMonitorResourceListResponse"]:
"""List all monitors under the specified subscription.
List all monitors under the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.elastic.models.ElasticMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Elastic/monitors'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ElasticMonitorResourceListResponse"]:
"""List all monitors under the specified resource group.
List all monitors under the specified resource group.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticMonitorResourceListResponse or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.elastic.models.ElasticMonitorResourceListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResourceListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResourceListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors'} # type: ignore
async def get(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> "_models.ElasticMonitorResource":
"""Get the properties of a specific monitor resource.
Get the properties of a specific monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ElasticMonitorResource, or the result of cls(response)
:rtype: ~azure.mgmt.elastic.models.ElasticMonitorResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResource"] = None,
**kwargs
) -> "_models.ElasticMonitorResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ElasticMonitorResource')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResource"] = None,
**kwargs
) -> AsyncLROPoller["_models.ElasticMonitorResource"]:
"""Create a monitor resource.
Create a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body: Elastic monitor resource model.
:type body: ~azure.mgmt.elastic.models.ElasticMonitorResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ElasticMonitorResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.elastic.models.ElasticMonitorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def update(
self,
resource_group_name: str,
monitor_name: str,
body: Optional["_models.ElasticMonitorResourceUpdateParameters"] = None,
**kwargs
) -> "_models.ElasticMonitorResource":
"""Update a monitor resource.
Update a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:param body: Elastic resource model update parameters.
:type body: ~azure.mgmt.elastic.models.ElasticMonitorResourceUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ElasticMonitorResource, or the result of cls(response)
:rtype: ~azure.mgmt.elastic.models.ElasticMonitorResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ElasticMonitorResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'ElasticMonitorResourceUpdateParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ElasticMonitorResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ResourceProviderDefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
monitor_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a monitor resource.
Delete a monitor resource.
:param resource_group_name: The name of the resource group to which the Elastic resource
belongs.
:type resource_group_name: str
:param monitor_name: Monitor resource name.
:type monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
monitor_name=monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'monitorName': self._serialize.url("monitor_name", monitor_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Elastic/monitors/{monitorName}'} # type: ignore
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from catalyst.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
|
#! -*-coding=gbk-*-
import os
import subprocess
import sys
import commands
import time
def walk_in_dir(root_path):
assd=""
try:
for root, dirs, files in os.walk(root_path, True):
for name in files:
if len(name)>0:
if name[0]!="$":
front_name,ext_name=os.path.splitext(name)
if ext_name.lower() in [".rar",".7z",".zip"]:
zipfilepath=root+os.path.sep+name
cmd = '"'+'./7za.exe'+'" l "'+ windows_cmd_sep_copy(zipfilepath)+'"'
# assd=cmd.decode("gbk")
#print assd
#print type(assd)
#time.sleep(1)
run_result=run_in_subprocesspopen(cmd)
if run_result is not None:
if run_result["model"]!=[]:
print root+os.path.sep+name+" find model"
print "\n".join(run_result["model"])+"\n"
if run_result["motion"]!=[]:
print root+os.path.sep+name+" find motion"
print "\n".join(run_result["motion"])+"\n"
else:
print "unzip error "+root+os.path.sep+name
if ext_name.lower() in [".pmd",".mpo",".pmx",".x"]:
print root+os.path.sep+name+" find model"
print ""
if ext_name.lower() in [".vmd"]:
print root+os.path.sep+name+" find motion"
print ""
except Exception,e:
print str(e)
print assd
def which_platform():
import platform
pythonVersion = platform.python_version()
uname = platform.uname()
if len(uname)>0:
return uname[0]
def run_in_subprocesspopen(cmd):
try:
flag_mmd_cell={}
flag_mmd_cell["model"]=[]
flag_mmd_cell["motion"]=[]
res = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
k=0
result = res.stdout.readlines()
for i in result:
line_c=i.strip()
if len(line_c)>6:
line_array=line_c.split( )
if line_c[0:5]=="-----" and len(line_array)==5:
if k==0:
k=1
first_part=" ".join(line_array[0:4])+" "
file_part_len=len(first_part)
else:
k+=1
else:
if k==1:
if len(line_c)>(file_part_len+2):
part_line_c=line_c[file_part_len+1:]
front_name,ext_name=os.path.splitext(part_line_c)
if ext_name.lower() in [".pmd",".mpo",".pmx",".x"]:
flag_mmd_cell["model"].append(part_line_c)
if ext_name.lower() in [".vmd"]:
flag_mmd_cell["motion"].append(part_line_c)
return flag_mmd_cell
except:
s=sys.exc_info()
print "Error '%s' happened on line %d" % (s[1],s[2].tb_lineno)
return None
def windows_cmd_sep_copy(org_path):
path=org_path.replace('\\','\\\\')
return path.encode("gbk")
if len(sys.argv)==2:
#walk_in_dir(u"e:\\")
walk_in_dir(sys.argv[1].decode("gbk"))
else:
print "Usage:search_run.py {dir path}"
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Fix.error'
db.delete_column('lint_fix', 'error')
# Adding field 'Fix.solution'
db.add_column('lint_fix', 'solution', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Fix.error'
raise RuntimeError("Cannot reverse this migration. 'Fix.error' and its values cannot be restored.")
# Deleting field 'Fix.solution'
db.delete_column('lint_fix', 'solution')
models = {
'lint.fix': {
'Meta': {'object_name': 'Fix'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.PositiveIntegerField', [], {}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fixes'", 'to': "orm['lint.Report']"}),
'solution': ('django.db.models.fields.TextField', [], {}),
'source': ('django.db.models.fields.TextField', [], {})
},
'lint.report': {
'Meta': {'ordering': "['-created_on']", 'object_name': 'Report'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stage': ('django.db.models.fields.CharField', [], {'default': "'waiting'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['lint']
|
from gitlab.base import RequiredOptional, RESTManager, RESTObject
from gitlab.mixins import CRUDMixin, NoUpdateMixin, ObjectDeleteMixin, SaveMixin
__all__ = [
"Hook",
"HookManager",
"ProjectHook",
"ProjectHookManager",
"GroupHook",
"GroupHookManager",
]
class Hook(ObjectDeleteMixin, RESTObject):
_url = "/hooks"
_short_print_attr = "url"
class HookManager(NoUpdateMixin, RESTManager):
_path = "/hooks"
_obj_cls = Hook
_create_attrs = RequiredOptional(required=("url",))
class ProjectHook(SaveMixin, ObjectDeleteMixin, RESTObject):
_short_print_attr = "url"
class ProjectHookManager(CRUDMixin, RESTManager):
_path = "/projects/%(project_id)s/hooks"
_obj_cls = ProjectHook
_from_parent_attrs = {"project_id": "id"}
_create_attrs = RequiredOptional(
required=("url",),
optional=(
"push_events",
"issues_events",
"confidential_issues_events",
"merge_requests_events",
"tag_push_events",
"note_events",
"job_events",
"pipeline_events",
"wiki_page_events",
"enable_ssl_verification",
"token",
),
)
_update_attrs = RequiredOptional(
required=("url",),
optional=(
"push_events",
"issues_events",
"confidential_issues_events",
"merge_requests_events",
"tag_push_events",
"note_events",
"job_events",
"pipeline_events",
"wiki_events",
"enable_ssl_verification",
"token",
),
)
class GroupHook(SaveMixin, ObjectDeleteMixin, RESTObject):
_short_print_attr = "url"
class GroupHookManager(CRUDMixin, RESTManager):
_path = "/groups/%(group_id)s/hooks"
_obj_cls = GroupHook
_from_parent_attrs = {"group_id": "id"}
_create_attrs = RequiredOptional(
required=("url",),
optional=(
"push_events",
"issues_events",
"confidential_issues_events",
"merge_requests_events",
"tag_push_events",
"note_events",
"confidential_note_events",
"job_events",
"pipeline_events",
"wiki_page_events",
"deployment_events",
"releases_events",
"subgroup_events",
"enable_ssl_verification",
"token",
),
)
_update_attrs = RequiredOptional(
required=("url",),
optional=(
"push_events",
"issues_events",
"confidential_issues_events",
"merge_requests_events",
"tag_push_events",
"note_events",
"confidential_note_events",
"job_events",
"pipeline_events",
"wiki_page_events",
"deployment_events",
"releases_events",
"subgroup_events",
"enable_ssl_verification",
"token",
),
)
|
#!/usr/bin/env python
"""
Initialization of the project global variables
"""
import cv2
import os
from var import variables
def init():
initvariables()
def initvariables():
variables.app_path = os.path.dirname(os.path.realpath(__file__))
variables.datasets_path = variables.app_path + '/data'
"""
variables.datasets_name = {
1: 'grazptz1',
2: 'grazptz2',
3: 'pets091',
4: 'pets092',
5: 'pets093',
6: 'pets094',
7: 'pets095',
8: 'pets096',
9: 'pets097',
10: 'pets098',
11: 'pets099',
12: 'oxtown'}
"""
"""
variables.datasets_name = {
1: 'caviar01',
2: 'caviar02',
3: 'caviar03',
4: 'caviar04',
5: 'caviar05'
}
"""
variables.datasets_name = {
1: 'pets01_crop',
2: 'pets091',
3: 'ewap01',
4: 'oxtown',
5: 'grazptz1',
6: 'pets094',
7: 'pets095'
}
variables.app_window_name = 'Main Window'
variables.app_window = cv2.namedWindow(
variables.app_window_name, cv2.WINDOW_NORMAL)
variables.app_window_trackbar_name = 'Main Background Window'
variables.app_window_trackbar = cv2.namedWindow(
variables.app_window_trackbar_name, cv2.WINDOW_NORMAL)
variables.app_window_trackbar_name_2 = 'Secondary Background Window'
variables.app_window_trackbar_2 = cv2.namedWindow(
variables.app_window_trackbar_name_2, cv2.WINDOW_NORMAL)
|
##Author: https://github.com/alexwchan
#!/usr/bin/python
# -*- encoding: utf8 -*-
"""imessage_export.py - this script takes the chat.db or sms.db SQL database
used to store messages on OS X or iOS and spits out a set of JSON files, one
for each thread.
The script is fairly rudimentary, and doesn't do anything to catch or
compensate for errors in the SQL, or malicious input. There also isn't any
support for merging and/or incremental updates (yet).
Use at your own risk, and back up your iMessage database before proceeding.
This depends on the following modules from my drabbles/ repo:
* confirmation.py
* filesequence.py
"""
import argparse
import datetime
from collections import namedtuple
import json
import os
import re
import sqlite3
import sys
from unidecode import unidecode
import confirmation
import filesequence
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
def slugify(ustr):
"""Convert Unicode string into an ASCII slug.
Written by Dr. Drang: http://www.leancrew.com/all-this/2014/10/asciifying/
"""
ustr = re.sub(u'[–—/:;,.]', '-', ustr) # replace separating punctuation
astr = unidecode(ustr).lower() # best ASCII subs, lowercase
astr = re.sub(r'[^a-z0-9 -]', '', astr) # delete any other characters
astr = astr.replace(' ', '-') # spaces to hyphens
astr = re.sub(r'-+', '-', astr) # condense repeated hyphens
return astr
def imessage_date_str(date_int):
"""Dates in iMessage are stored as an integer, which counts the number of
seconds since 1 Jan 2001. This function takes an int as input, and returns
an ISO 8601 formatted date string.
"""
start = datetime.datetime(2001, 1, 1)
diff = datetime.timedelta(seconds=int(date_int))
return str(start + diff)
def cp_attachment(src, dst):
"""Copy an attachment to the attachments/ directory. If a file by that name
already exists, use the MD5 checksum to see if they're the same, and if
not, go to the next sequential filename.
"""
if src[0] == '~':
src = os.environ['HOME'] + src[1:]
safe_dst = filesequence.safe_file_copy(src, dst)
return safe_dst
#------------------------------------------------------------------------------
# Set up namedtuple instances
#------------------------------------------------------------------------------
Attachment = namedtuple('Attachment', ['guid', 'filename'])
Message = namedtuple('Message', ['guid', 'text', 'handle_id', 'subject', 'date', 'is_from_me', 'attachments'])
def message_dict(message):
"""Convert a Message instance to a dict which can be written to JSON."""
msg_dict = dict()
msg_dict['guid'] = message.guid
msg_dict['text'] = message.text
msg_dict['handle_id'] = message.handle_id
msg_dict['date'] = message.date
msg_dict['is_from_me'] = bool(message.is_from_me)
if message.subject is not None:
msg_dict['subject'] = message.subject
if message.attachments:
msg_dict['attachments'] = message.attachments
return msg_dict
#------------------------------------------------------------------------------
# Functions for reading the SQL database
#------------------------------------------------------------------------------
def handles(cursor):
"""Returns a dict of handles that form (ROWID, id) pairs."""
sql_handles = cursor.execute("SELECT ROWID, id FROM handle").fetchall()
all_handles = dict()
for handle in sql_handles:
rowid, contact_id = handle
all_handles[rowid] = contact_id
return all_handles
def chats(cursor):
"""Returns a dict of chats that form (ROWID, guid) pairs."""
sql_chats = cursor.execute("SELECT ROWID, guid FROM chat").fetchall()
all_chats = dict()
for chat in sql_chats:
rowid, guid = chat
all_chats[rowid] = guid
return all_chats
def attachments(cursor):
"""Returns a dict of attachments, in which the keys are the ROWIDs and the
values are Attachment() namedtuple instances.
"""
sql_attachments = cursor.execute("SELECT ROWID, guid, filename "
"from attachment").fetchall()
all_attachments = dict()
for attachment in sql_attachments:
ROWID, guid, filename = attachment
new_attach = Attachment(guid, filename)
all_attachments[ROWID] = new_attach
return all_attachments
def messages(cursor):
"""Returns a dict of messages, in which the keys are the ROWIDs and the
values are Message() instances.
"""
sql_messages = cursor.execute("SELECT ROWID, guid, text, handle_id, "
"subject, date, is_from_me from "
"message").fetchall()
all_messages = dict()
for message in sql_messages:
ROWID, args = message[0], message[1:]
new_message = Message(*args, attachments=[])
all_messages[ROWID] = new_message
return all_messages
def join_table(cursor, row1, row2):
"""Returns a list of joins, in which the values are a dict of IDs from each
table. row1 and row2 must be specified in the order the table is named.
"""
sql_joins = cursor.execute("SELECT * from %s_%s_join" % (row1, row2))
all_joins = list()
for join in sql_joins:
all_joins.append(join)
return all_joins
#------------------------------------------------------------------------------
# Join everything together: given a list of messages, chats, handles and
# attachments, create a collection of human-readable threads
#------------------------------------------------------------------------------
def unify_message_threads(sql_path, output_dir):
conn = sqlite3.connect(sql_path)
cursor = conn.cursor()
all_messages = messages(cursor)
all_attachments = attachments(cursor)
attachment_dir = os.path.join(output_dir, 'attachments')
thread_dir = os.path.join(output_dir, 'threads')
for output_dir in [attachment_dir, thread_dir]:
if not os.path.isdir(output_dir):
#os.makedirs(output_dir) #permission always gets denied
os.system("sudo mkdir -p ./" + output_dir) #changed this, should work - Connor
# Go through the message_attachment_join table, and append each attachment
# to the associated message.
join_msg_attach = join_table(cursor, 'message', 'attachment')
for join in join_msg_attach:
msg_id, attach_id = join
msg = all_messages[msg_id]
new_attachment = all_attachments[attach_id]
# Update the list of attachments
updated_attachments = msg.attachments
updated_attachments.append(new_attachment)
msg = msg._replace(attachments=updated_attachments)
all_messages[msg_id] = msg
# Replace the handle_id in each Message instance with the actual handle,
# and then discard the all_handles list: we won't use it again
all_handles = handles(cursor)
group_msgs_rowids = list()
for ROWID in all_messages:
msg = all_messages[ROWID]
# For reasons unknown, anything in a group message has handle_id 0. I
# haven't worked out how to work out who sent a particular message, so
# for now I'm just dropping anything with handle_id 0.
try:
true_handle = all_handles[msg.handle_id]
except KeyError:
group_msgs_rowids.append(ROWID)
continue
msg = msg._replace(handle_id=true_handle)
# While we're going through the messages, let's also replace the date
# string with something more human friendly
msg = msg._replace(date=imessage_date_str(msg.date))
# And we'll copy all the attachments into an 'attachments' folder to
# make them easier to find
"""
new_attachments = []
for attachment in msg.attachments:
dst = cp_attachment(attachment.filename, attachment_dir)
new_attachments.append(os.path.basename(dst))
msg = msg._replace(attachments=new_attachments)
"""
all_messages[ROWID] = msg
# Clean up all the group messages
for ROWID in group_msgs_rowids:
del all_messages[ROWID]
# Create a list of threads, go through and assign messages to that thread
all_threads = dict()
all_chats = chats(cursor)
join_chat_message = join_table(cursor, 'chat', 'message')
for join in join_chat_message:
chat_id, msg_id = join
chat_guid = all_chats[chat_id]
# Skip group messages
try:
msg = message_dict(all_messages[msg_id])
except KeyError:
continue
chat_messages = all_threads.get(chat_guid, [])
chat_messages.append(msg)
all_threads[chat_guid] = chat_messages
return all_threads
#------------------------------------------------------------------------------
# Mainline program flow
#------------------------------------------------------------------------------
def main(sql_path, output_dir):
"""Main program flow. This function gets input from the user about where
the SQL database is and where to save the files, checks that it's not about
to overwrite any existing exports
"""
"""
#--------------------------------------------------------------------------
# Set up the options for argparse
#--------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description="A script for exporting threads and attachments from the "
"iMessage database to JSON.",
epilog="You should back up your iMessage database BEFORE using this "
"script. I am not responsible for any damage caused to your "
"database by this script."
)
parser.add_argument('-i', '--input', dest='sql_path',
help="path to the chat.db or sms.db SQL file from "
"the iMessage database")
parser.add_argument('-o', '--output', dest='output_dir',
help="path to the output directory to write the "
"thread and attachment data")
args = parser.parse_args()
"""
#--------------------------------------------------------------------------
# Validate user input: check that we have both an input and output file,
# that the input file exists, and that we don't already have export in
# the output directory.
#--------------------------------------------------------------------------
if (sql_path is None) and (output_dir is None):
print("Use the -h or --help flag for a help message.")
sys.exit(1)
if (sql_path is None) or (output_dir is None):
print("Please supply both --input and --output arguments. Use the -h "
"or --help flag for a help message.")
sys.exit(1)
if not os.path.exists(sql_path):
print("The database file %s does not exist." % sql_path)
sys.exit(1)
attach_dir = os.path.join(output_dir, 'attachments')
thread_dir = os.path.join(output_dir, 'threads')
os.system("sudo mkdir -p " + attach_dir) #had to add these
os.system("sudo mkdir -p " + thread_dir) #had to add these
os.system("sudo chmod -R 777 " + attach_dir) #had to add these
os.system("sudo chmod -R 777 " + thread_dir) #had to add these
if os.path.isdir(attach_dir) or os.path.isdir(thread_dir):
result = confirmation.twostep_confirm(
"There is already an export of the iMessage database in the "
"directory %s. Part of this export may be overwritten if you "
"proceed with the script." % output_dir,
'continue'
)
if not result:
print("Okay, stopping. Nothing has been changed.")
sys.exit(1)
#--------------------------------------------------------------------------
# Assuming we've validated the output, go ahead and export the database.
#--------------------------------------------------------------------------
unified_threads = unify_message_threads(sql_path, output_dir)
#for thread_guid, messages in unified_threads.iteritems():
for thread_guid, messages in unified_threads.items():
output_dict = dict({
'guid': thread_guid,
'messages': messages
})
outfile = os.path.join(thread_dir,
"thread_%s.json" % slugify(thread_guid))
with open(outfile, 'w') as ff:
json.dump(output_dict, ff, sort_keys=True, indent=2)
if __name__ == '__main__':
main()
|
##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import os
import re
## Implements a Preset that represents changes between two Parameter objects.
# The comparison on elements in a ClassVectorParameters takes in consideration both the parameter name and
# the loaded class name in order to consider the "same" element. We do that do try to work around the fact
# that the parameter names ("p0", "p1", etc) are very simple and easy to reapper after a sequence of removal/addition
# operations in a ClassVectorParameter. The method is not 100% safe but should work for most cases.
# \todo Consider adding a protected member that is responsible for that comparison and enable derived classes to
# do other kinds of comparisons, for example, using additional parameters such as user labels.
#
class RelativePreset( IECore.Preset ) :
## \param currParameter, IECore.Parameter, represents the parameter state after all changes have been made.
## \param oldParameter, IECore.Parameter, represents the parameter state before any changes.
## \param compareFilter, callable function that receives currParameter and oldParameter child and it should
## return a boolean to indicate if the difference should be computed or not.
def __init__( self, currParameter=None, oldParameter=None, compareFilter = None ) :
IECore.Preset.__init__( self )
self.__data = IECore.CompoundObject()
if compareFilter is None :
self.__compareFilter = lambda x,y: True
else :
self.__compareFilter = compareFilter
# accepts no parameters at all.
if currParameter is None and oldParameter is None :
return
if not isinstance( currParameter, IECore.Parameter ) :
raise TypeError, "Parameter currParameter must be a IECore.Parameter object!"
if not oldParameter is None :
if not isinstance( oldParameter, IECore.Parameter ) :
raise TypeError, "Parameter oldParameter must be a IECore.Parameter object!"
if currParameter.typeId() != oldParameter.typeId() :
raise TypeError, "Mismatching types for currParameter and oldParameter!"
self.__grabParameterChanges( currParameter, oldParameter, self.__data )
## \see IECore.Preset.applicableTo
def applicableTo( self, parameterised, rootParameter ) :
return RelativePreset.__applicableTo( rootParameter, self.__data )
def getDiffData( self ):
"""Returns a IECore.CompoundObject instance that contains the description of all the differences between the two parameters provided when creating this preset."""
return self.__data.copy()
def setDiffData( self, data ):
"""Use this function to recreate a RelativePreset from data previously returned by getDiffData()."""
if not isinstance( data, IECore.CompoundObject ):
raise TypeError, "Invalid data type! Must be a IECore.CompoundObject"
self.__data = data.copy()
## \see IECore.Preset.__call__
def __call__( self, parameterised, rootParameter ) :
if not self.applicableTo( parameterised, rootParameter ) :
raise RuntimeError, "Sorry, this preset is not applicable to the given parameter."
if len( self.__data ) :
self.__applyParameterChanges( rootParameter, self.__data )
def __grabParameterChanges( self, currParameter, oldParameter, data, paramPath = "" ) :
if not oldParameter is None:
if currParameter.staticTypeId() != oldParameter.staticTypeId() :
raise Exception, "Incompatible parameter %s!" % paramPath
if not self.__compareFilter( currParameter, oldParameter ) :
return
if isinstance( currParameter, IECore.ClassParameter ) :
self.__grabClassParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.ClassVectorParameter ) :
self.__grabClassVectorParameterChanges( currParameter, oldParameter, data, paramPath )
elif isinstance( currParameter, IECore.CompoundParameter ) :
self.__grabCompoundParameterChanges( currParameter, oldParameter, data, paramPath )
else :
self.__grabSimpleParameterChanges( currParameter, oldParameter, data, paramPath )
def __grabCompoundParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
for p in currParameter.keys() :
newData = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None :
if p in oldParameter.keys() :
childOldParam = oldParameter[p]
self.__grabParameterChanges(
currParameter[p],
childOldParam,
newData,
paramPath + "." + p
)
if len(newData) :
data[p] = newData
if len(data):
data["_type_"] = IECore.StringData( "CompoundParameter" )
def __grabSimpleParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
if not oldParameter is None :
if currParameter.getValue() == oldParameter.getValue() :
return
data["_type_"] = IECore.StringData( currParameter.typeName() )
data["_value_"] = currParameter.getValue().copy()
def __grabClassParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
c = currParameter.getClass( True )
className = c[1]
classVersion = c[2]
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
oldClassName = None
oldClassVersion = None
childOldParam = None
if not oldParameter is None :
oldClass = oldParameter.getClass( True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
if oldClass[0] :
childOldParam = oldClass[0].parameters()
classValue = IECore.CompoundObject()
if c[0] :
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
classValue,
paramPath
)
if len(classValue):
data["_classValue_"] = classValue
if len(data) or className != oldClassName or classVersion != oldClassVersion :
data["_className_"] = IECore.StringData(className)
data["_classVersion_"] = IECore.IntData(classVersion)
data["_classNameFilter_"] = IECore.StringData(classNameFilter)
data["_type_"] = IECore.StringData( "ClassParameter" )
def __grabClassVectorParameterChanges( self, currParameter, oldParameter, data, paramPath ) :
classes = currParameter.getClasses( True )
classNameFilter = "*"
try :
classNameFilter = currParameter.userData()["UI"]["classNameFilter"].value
except :
pass
classNameFilter = IECore.StringData( classNameFilter )
classNames = IECore.StringVectorData()
classVersions = IECore.IntVectorData()
classOrder = IECore.StringVectorData()
values = IECore.CompoundObject()
for c in classes:
pName = c[1]
classOrder.append( pName )
classNames.append( c[2] )
classVersions.append( c[3] )
v = IECore.CompoundObject()
childOldParam = None
if not oldParameter is None and pName in oldParameter.keys() :
oldClass = oldParameter.getClass( pName )
if oldClass :
childOldParam = oldClass.parameters()
self.__grabParameterChanges(
c[0].parameters(),
childOldParam,
v,
paramPath + "." + pName
)
if len(v) :
values[c[1]] = v
removedParams = []
if not oldParameter is None :
removedParams = list( set( oldParameter.keys() ).difference( classOrder ) )
if removedParams :
data["_removedParamNames_"] = IECore.StringVectorData( removedParams )
data["_removedClassNames_"] = IECore.StringVectorData()
for pName in removedParams :
oldClass = oldParameter.getClass( pName, True )
data["_removedClassNames_"].append( oldClass[1] )
modifiedParams = IECore.StringVectorData()
modifiedClassNames = IECore.StringVectorData()
modifiedClassVersions = IECore.IntVectorData()
addedParam = IECore.BoolVectorData()
for i in xrange(0,len(classOrder)):
pName = classOrder[i]
cName = classNames[i]
cVersion = classVersions[i]
oldClassName = None
oldClassVersion = None
if not oldParameter is None :
try:
oldClass = oldParameter.getClass( pName, True )
oldClassName = oldClass[1]
oldClassVersion = oldClass[2]
except Exception, e:
# added parameter...
pass
if cName != oldClassName or cVersion != oldClassVersion :
modifiedParams.append( pName )
modifiedClassNames.append( cName )
modifiedClassVersions.append( cVersion )
added = (oldClassName is None)
# if we are changing the class type, we have to mark as if we
# were removing it too
if cName != oldClassName and not oldClassName is None:
if not "_removedParamNames_" in data :
data["_removedParamNames_"] = IECore.StringVectorData()
data["_removedClassNames_"] = IECore.StringVectorData()
data["_removedParamNames_"].append(pName)
data["_removedClassNames_"].append(oldClassName)
removedParams.append(pName)
added = True
addedParam.append( added )
if len(modifiedParams) :
data["_modifiedParamsNames_"] = modifiedParams
data["_modifiedClassNames_"] = modifiedClassNames
data["_modifiedClassVersions_"] = modifiedClassVersions
data["_addedParam_"] = addedParam
# get all non-new parameters
parameterOrder = filter( lambda n: not n in modifiedParams or not addedParam[ modifiedParams.index(n) ], classOrder )
baseOrder = parameterOrder
if not oldParameter is None :
# get all non-deleted original parameters
baseOrder = filter( lambda n: not n in removedParams, oldParameter.keys() )
if baseOrder != parameterOrder :
if len(baseOrder) != len(parameterOrder):
raise Exception, "Unnexpected error. Unmatching parameter lists!"
# clamp to the smallest list containing the differences
for start in xrange(0,len(baseOrder)):
if baseOrder[start] != parameterOrder[start] :
break
for endPos in xrange(len(baseOrder),0,-1):
if baseOrder[endPos-1] != parameterOrder[endPos-1] :
break
data["_modifiedOrder_"] = IECore.StringVectorData( parameterOrder[start:endPos] )
if len(values):
# keep the original classes to which the parameters were edited
for pName in values.keys() :
values[pName]["_class_"] = IECore.StringData( classNames[classOrder.index(pName)] )
data["_values_"] = values
if len(data):
data["_classNameFilter_" ] = classNameFilter
data["_type_"] = IECore.StringData( "ClassVectorParameter" )
data["_paramNames_"] = classOrder
data["_classNames_"] = classNames
@staticmethod
def __applyParameterChanges( parameter, data, paramPath = "" ) :
if isinstance( parameter, IECore.ClassParameter ) :
RelativePreset.__applyClassParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.ClassVectorParameter ) :
RelativePreset.__applyClassVectorChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.CompoundParameter ) :
RelativePreset.__applyCompoundParameterChanges( parameter, data, paramPath )
elif isinstance( parameter, IECore.Parameter ) :
RelativePreset.__applySimpleParameterChanges( parameter, data, paramPath )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unrecognized type (%s) for parameter %s. Not affected by preset." % ( parameter.typeName(), parameter.name )
)
@staticmethod
def __applyCompoundParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "CompoundParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found CompoundParameter."
% ( paramPath, data["_type_"].value )
)
return
for p in data.keys() :
if p in [ "_type_", "_class_" ] :
continue
if paramPath :
newParamPath = paramPath + "." + p
else :
newParamPath = p
if p not in parameter :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Could not find parameter '%s'. Preset value ignored." % newParamPath
)
continue
RelativePreset.__applyParameterChanges( parameter[p], data[p], newParamPath )
@staticmethod
def __applySimpleParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != parameter.typeName() :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found %s."
% ( paramPath, data["_type_"].value, parameter.typeName() )
)
return
try:
parameter.setValue( data["_value_"] )
except Exception, e:
IECore.msg( IECore.Msg.Level.Warning, "IECore.RelativePreset", str(e) )
@staticmethod
def __applyClassParameterChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassParameter."
% ( paramPath, data["_type_"].value )
)
return
c = parameter.getClass( True )
className = data["_className_"].value
classVersion = data["_classVersion_"].value
if c[1] != className or c[2] != classVersion :
parameter.setClass( className, classVersion )
c = parameter.getClass( False )
if c and '_classValue_' in data :
RelativePreset.__applyParameterChanges( c.parameters(), data["_classValue_"], paramPath )
@staticmethod
def __applyClassVectorChanges( parameter, data, paramPath ) :
if data["_type_"].value != "ClassVectorParameter" :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to set preset on '%s'. Expected %s but found ClassVectorParameter."
% ( paramPath, data["_type_"].value )
)
return
# remove parameters if they match in parameter name and class name
if "_removedParamNames_" in data :
for (i,pName) in enumerate( data["_removedParamNames_"] ):
if pName in parameter.keys() :
c = parameter.getClass( pName, True )
if c and c[1] == data["_removedClassNames_"][i] :
parameter.removeClass( pName )
paramRemaps = {}
if "_modifiedParamsNames_" in data :
modifiedParams = data["_modifiedParamsNames_"]
modifiedClassNames = data["_modifiedClassNames_"]
modifiedClassVersions = data["_modifiedClassVersions_"]
addedParam = data["_addedParam_"]
addedCount = 0
# first modify items
for i in range( len( modifiedClassNames ) ) :
if addedParam[i] :
addedCount += 1
else :
# must find an existing matching parameter, no matter what
if modifiedParams[i] in parameter:
c = parameter.getClass( modifiedParams[i], True )
if modifiedClassNames[i] == c[1] :
if modifiedClassVersions[i] != c[2] :
parameter.setClass( modifiedParams[i], modifiedClassNames[i], modifiedClassVersions[i] )
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Parameter '%s.%s' has a different class. Expected %s but found %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], modifiedClassNames[i], c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring class change on this parameter."
% ( paramPath, modifiedParams[i], parameter.name )
)
# get a list of classes before the addition of new items
newOrder = False
newClassList = map( lambda c: c[1:], parameter.getClasses( True ) )
newParamList = map( lambda c: c[0], newClassList )
# compare each class with whatever existed when we created the RelativePreset and see which ones are the same
sameClasses = set()
for c in newClassList :
if '_modifiedParamsNames_' in data :
# If the preset has added this parameter it should not match current parameters in the vector, no matter if the class matches. Is it always the case?
if c[0] in data['_modifiedParamsNames_'] :
if data['_addedParam_'][ data['_modifiedParamsNames_'].index(c[0]) ] :
continue
try :
i = data['_paramNames_'].index(c[0])
except :
continue
if c[1] == data['_classNames_'][i] :
sameClasses.add( c[0] )
if "_modifiedOrder_" in data :
# there was some kind of change in the order of parameters as well...
modifiedOrder = filter( lambda pName: pName in sameClasses, data["_modifiedOrder_"] )
# find the range of parameters that lie between the reordered parameters in the current vector
firstParam = None
lastParam = None
for (i,pName) in enumerate(newParamList) :
if pName in modifiedOrder :
if firstParam is None:
firstParam = i
lastParam = i
if firstParam != lastParam :
# adds one by one the unknown parameters that lied between the reordered parameters.
for pName in newParamList[firstParam:lastParam+1] :
if not pName in modifiedOrder :
modifiedOrder.insert( modifiedOrder.index(baseParam)+1, pName )
baseParam = pName
def classOrder( c1, c2 ):
# if both elements were on the original reordering operation we use their relationship
if c1[0] in modifiedOrder and c2[0] in modifiedOrder:
i1 = modifiedOrder.index( c1[0] )
i2 = modifiedOrder.index( c2[0] )
return cmp( i1, i2 )
# otherwise we use the current order.
i1 = newParamList.index( c1[0] )
i2 = newParamList.index( c2[0] )
return cmp( i1, i2 )
newClassList.sort( classOrder )
newParamList = map( lambda c: c[0], newClassList )
newOrder = True
if "_modifiedParamsNames_" in data :
# now add items to the appropriate spot in the newClassList and newParamList
if addedCount :
newOrder = True
prevActualParam = None
lastActualParamInsertion = None
currClasses = parameter.getClasses( True )
for pName in data["_paramNames_"] :
if pName in sameClasses :
if pName in newParamList :
prevActualParam = pName
continue
if pName in modifiedParams :
i = modifiedParams.index(pName)
if addedParam[ i ] :
if prevActualParam is None :
if lastActualParamInsertion is None :
# Here we assume that the new parameter should
# go to the top because its predecessors don't exist on the
# new vector. Maybe it could also print a warning message..
lastActualParamInsertion = 0
else :
lastActualParamInsertion += 1
else :
lastActualParamInsertion = newParamList.index( prevActualParam ) + 1
prevActualParam = None
if pName in parameter:
newParamName = parameter.newParameterName()
if not re.match("^p[0-9]+$", pName) :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Custom parameter %s.%s is being renamed to %s..."
% ( paramPath, pName, newParamName )
)
paramRemaps[ pName ] = newParamName
pName = newParamName
# add the parameter to the vector, so that next calls to parameter.newParameterName() will work.
parameter.setClass( pName, modifiedClassNames[i], modifiedClassVersions[i] )
# update our official new arrays
newParamList.insert(lastActualParamInsertion, pName)
newClassList.insert(lastActualParamInsertion, (pName,modifiedClassNames[i], modifiedClassVersions[i]) )
# update parameters with new order
if newOrder :
parameter.setClasses( newClassList )
if "_values_" in data :
for paramName in data["_values_"].keys() :
remapedParamName = paramRemaps.get( paramName, paramName )
presetValue = data["_values_"][paramName]
if remapedParamName in parameter.keys() :
c = parameter.getClass( remapedParamName, True )
if c[1] == presetValue["_class_"].value :
RelativePreset.__applyParameterChanges(
c[0].parameters(),
presetValue,
paramPath + "." + remapedParamName
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Ignoring preset values for parameter %s.%s. Expected class %s but found %s."
% ( paramPath, remapedParamName, presetValue["_class_"].value, c[1] )
)
else :
IECore.msg(
IECore.Msg.Level.Warning,
"IECore.RelativePreset",
"Unable to find parameter '%s.%s' in %s. Ignoring this preset changes."
% ( paramPath, remapedParamName, parameter.name )
)
@staticmethod
def __applicableTo( parameter, data ) :
if len(data) == 0 :
return True
if parameter.staticTypeId() == IECore.TypeId.CompoundParameter :
if data["_type_"].value != "CompoundParameter":
return False
elif isinstance( parameter, IECore.ClassParameter ) :
if data["_type_"].value != "ClassParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
elif isinstance( parameter, IECore.ClassVectorParameter ) :
if data["_type_"].value != "ClassVectorParameter":
return False
classNameFilter = "*"
try :
classNameFilter = parameter.userData()["UI"]["classNameFilter"].value
except :
pass
if classNameFilter != data["_classNameFilter_"].value:
return False
else :
if data["_type_"].value != parameter.typeName():
return False
if not parameter.valueValid( data["_value_"] )[0]:
return False
return True
IECore.registerRunTimeTyped( RelativePreset )
|
"""
Tests for the conditional_logit.py file. These tests do not include tests of
the functions that perform the mathematical calculations necessary to estimate
the MNL model.
"""
import warnings
import unittest
from collections import OrderedDict
import numpy as np
import numpy.testing as npt
import pandas as pd
import pylogit.conditional_logit as mnl
class HelperFuncTests(unittest.TestCase):
"""
Defines the tests for the 'helper' functions for estimating the MNL model.
"""
def setUp(self):
# Set up the fake arguments
self.fake_beta = np.arange(3)
self.fake_args = ["foo", 1]
self.fake_kwargs = {"fake_arg_1": "bar",
"fake_arg_2": 2,
"fake_arg_3": True}
self.fake_design = np.arange(6).reshape((2, 3))
self.fake_index = self.fake_design.dot(self.fake_beta)
def test_split_param_vec(self):
"""
Ensures that split_param_vec returns (None, None, index_coefs)
when called from within conditional_logit.py.
"""
# Store the results of split_param_vec()
split_results = mnl.split_param_vec(self.fake_beta,
return_all_types=False,
*self.fake_args,
**self.fake_kwargs)
# Check for expected results.
self.assertIsNone(split_results[0])
self.assertIsNone(split_results[1])
npt.assert_allclose(split_results[2], self.fake_beta)
# Store the results of split_param_vec()
split_results = mnl.split_param_vec(self.fake_beta,
return_all_types=True,
*self.fake_args,
**self.fake_kwargs)
# Check for expected results.
self.assertIsNone(split_results[0])
self.assertIsNone(split_results[1])
self.assertIsNone(split_results[2])
npt.assert_allclose(split_results[3], self.fake_beta)
return None
def test_mnl_utility_transform(self):
"""
Ensures that mnl_utility_transform returns a 2D version of the 1D
1D index array that is passed to it.
"""
# Get the results of _mnl_utiilty_transform()
transform_results = mnl._mnl_utility_transform(self.fake_index,
*self.fake_args,
**self.fake_kwargs)
# Check to make sure the results are as expected
self.assertIsInstance(transform_results, np.ndarray)
self.assertEqual(transform_results.shape, (2, 1))
npt.assert_allclose(transform_results, self.fake_index[:, None])
return None
def test_mnl_transform_deriv_c(self):
"""
Ensures that mnl_transform_deriv_c returns None.
"""
derivative_results = mnl._mnl_transform_deriv_c(self.fake_index,
*self.fake_args,
**self.fake_kwargs)
self.assertIsNone(derivative_results)
return None
def test_mnl_transform_deriv_alpha(self):
"""
Ensures that mnl_transform_deriv_alpha returns None.
"""
derivative_results = mnl._mnl_transform_deriv_alpha(self.fake_index,
*self.fake_args,
**self.fake_kwargs)
self.assertIsNone(derivative_results)
return None
class ChoiceObjectTests(unittest.TestCase):
"""
Defines the tests for the MNL model object's `__init__` function and its
other methods.
"""
def setUp(self):
# Create fake versions of the needed arguments for the MNL constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 2, 2, 3, 3],
"alt_id": [1, 2, 1, 2, 1, 2],
"choice": [0, 1, 0, 1, 1, 0],
"x": range(6)})
self.fake_specification = OrderedDict()
self.fake_specification["x"] = [[1, 2]]
self.fake_names = OrderedDict()
self.fake_names["x"] = ["x (generic coefficient)"]
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
self.fake_beta = np.array([1])
return None
def test_outside_intercept_error_in_constructor(self):
"""
Ensures that a ValueError is raised when the 'intercept_ref_pos' kwarg
is passed to the MNL model constructor. This prevents people from
expecting the use of outside intercept parameters to work with the MNL
model.
"""
# Create a variable for the standard arguments to this function.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
kwarg_map = {"intercept_ref_pos": 2}
self.assertRaises(ValueError,
mnl.MNL,
*standard_args,
**kwarg_map)
return None
def test_shape_ignore_msg_in_constructor(self):
"""
Ensures that a UserWarning is raised when the 'shape_ref_pos' or
'shape_names' keyword arguments are passed to the MNL model
constructor. This warns people against expecting the MNL to work with
shape parameters, and alerts them to the fact they are using an MNL
model when they might have been expecting to instantiate a different
choice model.
"""
# Create a variable for the standard arguments to this function.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
kwarg_map_1 = {"shape_ref_pos": 2}
kwarg_map_2 = {"shape_names": OrderedDict([("x", ["foo"])])}
# Test to ensure that the shape ignore message is printed when using
# either of these two kwargs
with warnings.catch_warnings(record=True) as context:
# Use this filter to always trigger the UserWarnings
warnings.simplefilter('always', UserWarning)
for pos, bad_kwargs in enumerate([kwarg_map_1, kwarg_map_2]):
# Create an MNL model object with the irrelevant kwargs.
# This should trigger a UserWarning
mnl_obj = mnl.MNL(*standard_args, **bad_kwargs)
# Check that the warning has been created.
self.assertEqual(len(context), pos + 1)
self.assertIsInstance(context[-1].category, type(UserWarning))
self.assertIn(mnl._shape_ignore_msg, str(context[-1].message))
return None
def test_outside_intercept_error_in_fit_mle(self):
"""
Ensures that a ValueError is raised when users try to use any other
type of initial value input methods other than the `init_vals`
argument of `fit_mle()`. This prevents people from expecting the use
of outside intercept or shape parameters to work with the MNL model.
"""
# Create a variable for the standard arguments to the MNL constructor.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create the mnl model object whose coefficients will be estimated.
base_mnl = mnl.MNL(*standard_args)
# Create a variable for the arguments to the fit_mle function.
fit_args = [self.fake_beta]
# Create variables for the incorrect kwargs.
# The print_res = False arguments are to make sure strings aren't
# printed to the console unnecessarily.
kwarg_map_1 = {"init_shapes": np.array([1, 2]),
"print_res": False}
kwarg_map_2 = {"init_intercepts": np.array([1]),
"print_res": False}
kwarg_map_3 = {"init_coefs": np.array([1]),
"print_res": False}
# Test to ensure that the kwarg ignore message is printed when using
# any of these three incorrect kwargs
for kwargs in [kwarg_map_1, kwarg_map_2, kwarg_map_3]:
self.assertRaises(ValueError, base_mnl.fit_mle,
*fit_args, **kwargs)
return None
def test_ridge_warning_in_fit_mle(self):
"""
Ensure that a UserWarning is raised when one passes the ridge keyword
argument to the `fit_mle` method of an MNL model object.
"""
# Create a variable for the standard arguments to the MNL constructor.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create the mnl model object whose coefficients will be estimated.
base_mnl = mnl.MNL(*standard_args)
# Create a variable for the fit_mle function's kwargs.
# The print_res = False arguments are to make sure strings aren't
# printed to the console unnecessarily.
kwargs = {"ridge": 0.5,
"print_res": False}
# Test to make sure that the ridge warning message is printed when
# using the ridge keyword argument
with warnings.catch_warnings(record=True) as w:
# Use this filter to always trigger the UserWarnings
warnings.simplefilter('always', UserWarning)
base_mnl.fit_mle(self.fake_beta, **kwargs)
self.assertGreaterEqual(len(w), 1)
self.assertIsInstance(w[0].category, type(UserWarning))
self.assertIn(mnl._ridge_warning_msg, str(w[0].message))
return None
def test_check_length_of_initial_values(self):
"""
Ensure that a ValueError is raised when one passes an init_vals
argument of the wrong length.
"""
# Create a variable for the standard arguments to the MNL constructor.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create the mnl model object whose coefficients will be estimated.
base_mnl = mnl.MNL(*standard_args)
# Create the EstimationObj
mapping_res = base_mnl.get_mappings_for_fit()
ridge = None
zero_vector = np.zeros(1)
split_params = mnl.split_param_vec
mnl_estimator = mnl.MNLEstimator(base_mnl,
mapping_res,
ridge,
zero_vector,
split_params)
# Alias the function to be checked
func = mnl_estimator.check_length_of_initial_values
for i in [2, 3]:
init_vals = np.ones(i)
self.assertRaises(ValueError, func, init_vals)
self.assertIsNone(func(np.ones(1)))
return None
def test_just_point_kwarg(self):
# Create a variable for the standard arguments to the MNL constructor.
standard_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create the mnl model object whose coefficients will be estimated.
base_mnl = mnl.MNL(*standard_args)
# Alias the function being tested
func = base_mnl.fit_mle
# Get the necessary kwargs
kwargs = {"just_point": True}
# Get the function results
func_result = func(self.fake_beta, **kwargs)
# Perform the desired tests to make sure we get back a dictionary with
# an "x" key in it and a value that is a ndarray.
self.assertIsInstance(func_result, dict)
self.assertIn("x", func_result)
self.assertIsInstance(func_result["x"], np.ndarray)
return None
|
import tensorflow as tf
import tensorblock as tb
import numpy as np
class recipe_input:
####### Add Input
def addInput( self , **args ):
pars = { **self.defs_input , **args }
pars['name'] = self.add_label(
self.inputs , 'Input' , pars['name'] , add_order = True )
pars = self.parse_input_pars( pars )
if pars['share'] is not None:
self.inputs.append( [ self.node( pars['share'] ) , pars ] )
else:
if pars['tensor'] is None:
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.inputs.append( [ tb.vars.placeholder( shape = pars['shape'] ,
dtype = pars['dtype'] ) , pars ] )
else: self.inputs.append( [ pars['tensor'] , pars ] )
self.curr_input = pars['name']
return self.inputs[-1][0]
####### Add Variable
def addVariable( self , **args ):
pars = { **self.defs_variable , **args }
pars['name'] = self.add_label(
self.variables , 'Variable' , pars['name'] , add_order = True )
pars = self.parse_input_pars( pars )
if pars['share'] is not None:
self.variables.append( [ self.node( pars['share'] ) , pars ] )
else:
if pars['tensor'] is None:
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.variables.append( [ pars['type']( pars['shape'] , pars ) , pars ] )
else:
if callable( pars['tensor'] ):
with tf.variable_scope( self.folder + pars['name'] , reuse = False ):
self.variables.append( [ pars['tensor']( pars['shape'] , pars ) , pars ] )
else:
if isinstance( pars['tensor'] , np.ndarray ):
self.variables.append( [ tb.vars.numpy( pars['tensor'] , pars ) , pars ] )
else:
self.variables.append( [ pars['tensor'] , pars ] )
return self.variables[-1][0]
####### Parse Pars
def parse_input_pars( self , pars ):
if pars['tensor'] is not None:
pars['first_none'] = False
if isinstance( pars['tensor'] , np.ndarray ):
pars['shape'] = pars['tensor'].shape
else:
pars['shape'] = tb.aux.tf_shape( pars['tensor'] )
if pars['copy'] is not None: # Copying
pars['type'] = tb.vars.copy
pars['shape'] = self.node( pars['copy'] )
copy_pars = self.pars( pars['copy'] )
pars['out_sides'] = copy_pars['out_sides']
pars['out_channels'] = copy_pars['out_channels']
else: # Nothing
pars['shape'] = list( pars['shape'] )
if pars['first_none'] and len( pars['shape'] ) > 1: pars['shape'][0] = None
shape = pars['shape']
if pars['out_sides'] is None:
if len( shape ) == 2: pars['out_sides'] = shape[1:2] ;
if len( shape ) == 4: pars['out_sides'] = shape[1:3] ;
if len( shape ) == 5: pars['out_sides'] = shape[1:4] ;
if pars['out_channels'] is None:
if len( shape ) == 2: pars['out_channels'] = 1
else: pars['out_channels'] = shape[-1]
return pars
|
from PyQt5.QtCore import QStandardPaths
import os
import json
import urllib.request
import gzip
import collections
class ReferenceCatalogues:
def __init__(self, database):
self.database = database
c = database.cursor()
cats = c.execute('SELECT id, "table", "name", spectra_url, gzipped, file_column, sptype_column FROM spectra_catalogues ORDER BY id ASC')
self.catalogues = collections.OrderedDict([(c[2], {'id':c[0],'table':c[1],'name':c[2],'url':c[3],'gzipped':c[4]==1, 'columns': {'sptype': c[6], 'file':c[5]} }) for c in cats])
def spectra(self, catalog):
cat_info = self.catalogues[catalog]
query = "SELECT {0}, {1} FROM {2} WHERE {1} <> '' ORDER BY {1} ASC".format(cat_info['columns']['file'], cat_info['columns']['sptype'], cat_info['table'])
c = self.database.cursor()
return [{'catalog': catalog, 'sptype': r[1], 'file': r[0]} for r in c.execute(query)]
def fits(self, entry):
catname = entry['catalog']
catalog = self.catalogues[catname]
return ReferenceCatalogues.get_fits(catname, entry['file'], catalog['url'], catalog['gzipped'])
def get_fits(catname, filename, url, is_gzipped):
cache_path = os.path.join(QStandardPaths.writableLocation(QStandardPaths.CacheLocation), catname)
file_path = os.path.join(cache_path, '{}.gz'.format(filename))
try:
os.makedirs(cache_path)
except FileExistsError:
pass
if not os.path.exists(file_path):
if is_gzipped:
urllib.request.urlretrieve(url.format("{}.gz".format(filename)), file_path )
else:
request = urllib.request.urlopen(url.format(filename))
with gzip.open(file_path, 'wb') as f:
f.write(request.read())
return file_path
|
from contextlib import contextmanager
import logging
from ..image import Image
from ..png import FormatError
from . import Rect
logger = logging.getLogger(__name__)
class SpriteNode(Rect):
def __init__(self, im, width, height, fname=None, pad=(0, 0)):
Rect.__init__(self, (0, 0, width, height))
self.im = im
self.fname = fname
(self.pad_x, self.pad_y) = pad
self.close = im.close
def __str__(self):
clsnam = type(self).__name__
arg = self.fname if self.fname else self.im
args = (clsnam, arg, self.width, self.height)
return "<%s %s (%dx%d)>" % args
def calc_box(self, pos):
x1, y1 = pos
return (x1, y1, x1 + self.width, y1 + self.height)
@classmethod
def from_image(cls, im, *args, **kwds):
args = im.size + args
return cls(im, *args, **kwds)
@classmethod
def load_file(cls, fo, fname=None, pad=(0, 0), **kwds):
if not hasattr(fo, "read"):
if not fname:
fname = fo
fo = open(fo, "rb")
elif not fname and hasattr(fo, "name"):
fname = fo.name
return cls.from_image(Image.load(fo), fname=fname, pad=pad)
@contextmanager
def open_sprites(fnames, **kwds):
fs = [(fn, open(str(fn), "rb")) for fn in fnames]
sprites = []
try:
for fn, fo in fs:
try:
sprites.append(SpriteNode.load_file(fo, fname=fn, **kwds))
except FormatError, e:
logger.warn('%s: invalid image file: %s', fn, e)
yield sprites
finally:
for fn, fo in fs:
fo.close()
|
'''Game main module.
Contains the entry point used by the run_game.py script.
Feel free to put all your game code here, or in other modules in this "gamelib"
package.
'''
import json
import pygame
import data
import engine
class Game(object):
'''A class that delegates its engine functionalities to a hot-swappable
module'''
FPS = 60.0
def __init__(self):
self.running = False
self.data = { "gamestate": "newtitle" }
# Swapping state
self.swapped = False
# Error states
self.input_handle_error = None
self.simulate_error = None
self.render_error = None
def run(self):
engine.init()
clock = pygame.time.Clock()
self.running = True
dt = 0
frames = 0
while self.running:
self.handle_input()
if self.swapped:
self.swapped = False
continue
self.simulate(dt)
self.render()
dt = clock.tick(self.FPS)
frames += 1
# Report framerate on exit
ticks = pygame.time.get_ticks()
framerate = frames / (ticks / 1000.0)
print("Framerate was {0}".format(framerate))
def handle_input(self):
try:
engine.handle_input(self, self.data)
self.input_handling_error = None
except Exception as error:
if self.input_handling_error != error.message:
print("Unable to handle input, reason:")
print(error)
self.input_handling_error = error.message
def simulate(self, dt):
try:
engine.simulate(self, self.data, dt)
self.simulate_error = None
except Exception as error:
if self.simulate_error != error.message:
print("Unable to render, reason:")
print(error)
self.simulate_error = error.message
def render(self):
try:
engine.render(self.data)
self.render_error = None
except Exception as error:
if self.render_error != error.message:
print("Unable to render, reason:")
print(error)
self.render_error = error.message
def quit(self):
self.dump_data()
self.running = False
def request_swap(self):
try:
print("Attempting to swap engine...")
reload(engine)
print("Engine swapped. Reinitializing engine...")
engine.init()
print("Engine reinitialized\n")
except Exception as error:
print("Errors were thrown in the engine swap:")
print(error)
def dump_data(self):
print("Saving the gamestate...")
try:
with open("gamestate.json", "wt") as fout:
json_data = json.dumps(self.data, indent=4)
print(json_data)
fout.write(json_data)
print("Gamestate saved\n")
except Exception as error:
print("Unable to dump the data, reason:")
print(error)
def load_data(self):
print("Restoring the gamestate...")
try:
with open("gamestate.json", "rt") as fin:
new_data = json.load(fin)
self.data = new_data
print("Gamestate restored")
except Exception as error:
print("Unable to load the data, reason:")
print(error)
def main():
game = Game()
# game.load_data()
game.run()
|
import argparse
import os
import codecs
import sys
import StringIO
import second_layer
from tree import read_conll
import tree
from features import convert_toNumbers
import json
import time
def train(args):
"""
main() to launch everything
"""
if not args.no_ccprop:
if not os.path.exists(os.path.join(args.output,u"ccprop")):
os.makedirs(os.path.join(args.output,u"ccprop"))
cc_trainf=codecs.open(os.path.join(args.output,u"ccprop","train.txt"),"wt",u"utf-8")
ccprop=second_layer.ConjPropagation()
else:
ccprop=None
if not args.no_rel:
if not os.path.exists(os.path.join(args.output,u"rel")):
os.makedirs(os.path.join(args.output,u"rel"))
rel_trainf=codecs.open(os.path.join(args.output,u"rel","train.txt"),"wt",u"utf-8")
rel=second_layer.Relativizers()
else:
rel=None
count=0
print >> sys.stderr, "collecting training data"
for comments,sent in read_conll(args.input):
t=tree.Tree(sent)
if rel is not None:
rel.learn(t,rel_trainf)
if ccprop is not None:
ccprop.learn(t,cc_trainf)
count+=1
print >> sys.stderr, "sentences:",count
print >> sys.stderr, "converting training files"
if not args.no_ccprop:
cc_trainf.close()
convert_toNumbers(False,u"ccprop",args.output)
if not args.no_rel:
rel_trainf.close()
convert_toNumbers(False,u"rel",args.output)
## TODO svm
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Trains the parser in a multi-core setting.')
g=parser.add_argument_group("Input/Output")
g.add_argument('input', nargs='?', help='Training file name, or nothing for training on stdin')
g.add_argument('-o', '--output', required=True, help='Name of the output model.')
g=parser.add_argument_group("Training config")
g.add_argument('--no_ccprop', required=False, dest='no_ccprop', action="store_true", default=False, help='Do not train conjunct propagation model. (default %(default)d)')
g.add_argument('--no_rel', required=False, dest='no_rel', action="store_true", default=False, help='Do not train relativizer prediction model. (default %(default)d)')
# g.add_argument('-p', '--processes', type=int, default=4, help='How many training workers to run? (default %(default)d)')
# g.add_argument('--max_sent', type=int, default=0, help='How many sentences to read from the input? 0 for all. (default %(default)d)')
# g=parser.add_argument_group("Training algorithm choices")
# g.add_argument('-i', '--iterations', type=int, default=10, help='How many iterations to run? If you want more than one, you must give the input as a file. (default %(default)d)')
# g.add_argument('--dim', type=int, default=5000000, help='Dimensionality of the trained vector. (default %(default)d)')
# g.add_argument('--beam_size', type=int, default=40, help='Size of the beam. (default %(default)d)')
args = parser.parse_args()
train(args)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Aladom SAS & Hosting Dvpt SAS
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import (
CampaignMailHeader, MailHeader, Subscription, SubscriptionType,
CampaignStaticAttachment, MailStaticAttachment,
)
__all__ = [
'CampaignMailHeaderForm', 'MailHeaderForm', 'SubscriptionsManagementForm',
'CampaignStaticAttachmentForm', 'MailStaticAttachmentForm',
]
class CampaignMailHeaderForm(forms.ModelForm):
class Meta:
model = CampaignMailHeader
fields = '__all__'
widgets = {
'value': forms.Textarea(attrs={'rows': 1}),
}
help_texts = {
'value': _("May contain template variables."),
}
class MailHeaderForm(forms.ModelForm):
class Meta:
model = MailHeader
fields = '__all__'
widgets = {
'value': forms.Textarea(attrs={'rows': 1}),
}
class SubscriptionsManagementForm(forms.Form):
def __init__(self, *args, **kwargs):
self.email = kwargs.pop('email')
subscription_types = SubscriptionType.objects.all()
for subscription_type in subscription_types:
self.base_fields['subscribed_{}'.format(subscription_type.pk)] = (
forms.BooleanField(
label=subscription_type.name,
help_text=subscription_type.description,
initial=subscription_type.is_subscribed(self.email),
required=False,
)
)
super().__init__(*args, **kwargs)
def save(self):
subscriptions = {
s.subscription_type_id: s
for s in Subscription.objects.filter(email=self.email)
}
for field, value in self.cleaned_data.items():
pk = int(field.split('_')[-1])
if pk not in subscriptions:
Subscription(email=self.email, subscription_type_id=pk,
subscribed=value).save()
elif subscriptions[pk].subscribed != value:
subscriptions[pk].subscribed = value
subscriptions[pk].save()
class CampaignStaticAttachmentForm(forms.ModelForm):
class Meta:
model = CampaignStaticAttachment
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['attachment'].widget.choices.insert(0, ('', '------'))
class MailStaticAttachmentForm(forms.ModelForm):
class Meta:
model = MailStaticAttachment
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['attachment'].widget.choices.insert(0, ('', '------'))
|
from Cantera import exceptions
from Cantera.num import array
from Cantera.elements import elementMoles
def det3(A):
"""Determinant of a 3x3 matrix."""
return (A[0,0]*(A[1,1]*A[2,2] - A[1,2]*A[2,1])
- A[0,1]*(A[1,0]*A[2,2] - A[1,2]*A[2,0])
+ A[0,2]*(A[1,0]*A[2,1] - A[2,0]*A[1,1]))
def stoich_fuel_to_oxidizer(mix, fuel, oxidizer):
"""Fuel to oxidizer ratio for stoichiometric combustion.
This function only works for fuels composed of carbon, hydrogen,
and/or oxygen. The fuel to oxidizer ratio is returned that results in
"""
# fuel
mix.setMoleFractions(fuel)
f_carbon = elementMoles(mix, 'C')
f_oxygen = elementMoles(mix, 'O')
f_hydrogen = elementMoles(mix, 'H')
#oxidizer
mix.setMoleFractions(oxidizer)
o_carbon = elementMoles(mix, 'C')
o_oxygen = elementMoles(mix, 'O')
o_hydrogen = elementMoles(mix, 'H')
B = array([f_carbon, f_hydrogen, f_oxygen],'d')
A = array([[1.0, 0.0, -o_carbon],
[0.0, 2.0, -o_hydrogen],
[2.0, 1.0, -o_oxygen]], 'd')
num = array(A,'d')
num[:,2] = B
r = det3(num)/det3(A)
if r <= 0.0:
raise CanteraError('negative or zero computed stoichiometric fuel/oxidizer ratio!')
return 1.0/r
if __name__ == "__main__":
g = GRI30()
print stoich_fuel_to_oxidizer(g, 'CH4:1', 'O2:1')
|
from __future__ import unicode_literals
import datetime
import requests
from requests_oauthlib import OAuth1
from oauthlib.oauth1 import (SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER,
SIGNATURE_HMAC)
from six.moves.urllib.parse import urlencode, parse_qs
from .constants import (XERO_BASE_URL, XERO_PARTNER_BASE_URL,
REQUEST_TOKEN_URL, AUTHORIZE_URL, ACCESS_TOKEN_URL)
from .exceptions import *
OAUTH_EXPIRY_SECONDS = 3600 # Default unless a response reports differently
class PrivateCredentials(object):
"""An object wrapping the 2-step OAuth process for Private Xero API access.
Usage:
1) Construct a PrivateCredentials() instance:
>>> from xero.auth import PrivateCredentials
>>> credentials = PrivateCredentials(<consumer_key>, <rsa_key>)
rsa_key should be a multi-line string, starting with:
-----BEGIN RSA PRIVATE KEY-----\n
2) Use the credentials:
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, rsa_key):
self.consumer_key = consumer_key
self.rsa_key = rsa_key
self.base_url = XERO_BASE_URL
# Private API uses consumer key as the OAuth token.
self.oauth_token = consumer_key
self.oauth = OAuth1(
self.consumer_key,
resource_owner_key=self.oauth_token,
rsa_key=self.rsa_key,
signature_method=SIGNATURE_RSA,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
)
class PublicCredentials(object):
"""An object wrapping the 3-step OAuth process for Public Xero API access.
Usage:
1) Construct a PublicCredentials() instance:
>>> from xero import PublicCredentials
>>> credentials = PublicCredentials(<consumer_key>, <consumer_secret>)
2) Visit the authentication URL:
>>> credentials.url
If a callback URI was provided (e.g., https://example.com/oauth),
the user will be redirected to a URL of the form:
https://example.com/oauth?oauth_token=<token>&oauth_verifier=<verifier>&org=<organization ID>
from which the verifier can be extracted. If no callback URI is
provided, the verifier will be shown on the screen, and must be
manually entered by the user.
3) Verify the instance:
>>> credentials.verify(<verifier string>)
4) Use the credentials.
>>> from xero import Xero
>>> xero = Xero(credentials)
>>> xero.contacts.all()
...
"""
def __init__(self, consumer_key, consumer_secret,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None,
oauth_expires_at=None, oauth_authorization_expires_at=None):
""" Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self.base_url = XERO_BASE_URL
self._signature_method = SIGNATURE_HMAC
# These are not strictly used by Public Credentials, but
# are reserved for use by other credentials (i.e. Partner)
self.rsa_key = None
self.client_cert = None
self.oauth_session_handle = None
self.oauth_token = None
self.oauth_token_secret = None
self._init_credentials(oauth_token, oauth_token_secret)
def _init_credentials(self, oauth_token, oauth_token_secret):
""" Depending on the state passed in, get self._oauth up and running.
"""
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
# This is a brand new set of credentials - we need to generate
# an oauth token so it's available for the url property.
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
url = self.base_url + REQUEST_TOKEN_URL
response = requests.post(url=url, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
def _init_oauth(self, oauth_token, oauth_token_secret):
""" Store and initialize a verified set of OAuth credentials.
"""
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self._oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
def _process_oauth_response(self, response):
""" Extracts the fields from an oauth response.
"""
if response.status_code == 200:
credentials = parse_qs(response.text)
# Initialize the oauth credentials
self._init_oauth(
credentials.get('oauth_token')[0],
credentials.get('oauth_token_secret')[0]
)
# If tokens are refreshable, we'll get a session handle
self.oauth_session_handle = credentials.get(
'oauth_session_handle', [None])[0]
# Calculate token/auth expiry
oauth_expires_in = credentials.get(
'oauth_expires_in',
[OAUTH_EXPIRY_SECONDS])[0]
oauth_authorisation_expires_in = credentials.get(
'oauth_authorization_expires_in',
[OAUTH_EXPIRY_SECONDS])[0]
self.oauth_expires_at = datetime.datetime.now() + \
datetime.timedelta(seconds=int(
oauth_expires_in))
self.oauth_authorization_expires_at = \
datetime.datetime.now() + \
datetime.timedelta(seconds=int(
oauth_authorisation_expires_in))
else:
self._handle_error_response(response)
def _handle_error_response(self, response):
""" Raise exceptions for error codes.
"""
if response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
@property
def state(self):
""" Obtain the useful state of this credentials object so that we can
reconstruct it independently.
"""
return dict(
(attr, getattr(self, attr))
for attr in (
'consumer_key', 'consumer_secret', 'callback_uri',
'verified', 'oauth_token', 'oauth_token_secret',
'oauth_session_handle', 'oauth_expires_at',
'oauth_authorization_expires_at'
)
if getattr(self, attr) is not None
)
def verify(self, verifier):
""" Verify an OAuth token
"""
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
verifier=verifier,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
# Make the verification request, gettiung back an access token
url = self.base_url + ACCESS_TOKEN_URL
response = requests.post(url=url, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
self.verified = True
@property
def url(self):
""" Returns the URL that can be visited to obtain a verifier code.
"""
# The authorize url is always api.xero.com
url = XERO_BASE_URL + AUTHORIZE_URL + '?' + \
urlencode({'oauth_token': self.oauth_token})
return url
@property
def oauth(self):
"Returns the requests-compatible OAuth object"
if self._oauth is None:
raise XeroNotVerified("OAuth credentials haven't been verified")
return self._oauth
def expired(self, now=None):
""" Check whether expired.
"""
if now is None:
now = datetime.datetime.now()
# Credentials states from older versions might not have
# oauth_expires_at available
if self.oauth_expires_at is None:
raise XeroException(None, "Expiry time is not available")
# Allow a bit of time for clock differences and round trip times
# to prevent false negatives. If users want the precise expiry,
# they can use self.oauth_expires_at
CONSERVATIVE_SECONDS = 30
return self.oauth_expires_at <= \
(now + datetime.timedelta(seconds=CONSERVATIVE_SECONDS))
class PartnerCredentials(PublicCredentials):
"""An object wrapping the 3-step OAuth process for Partner Xero API access.
Usage is very similar to Public Credentials with the following changes:
1) You'll need to pass the private key for your RSA certificate.
>>> rsa_key = "-----BEGIN RSA PRIVATE KEY----- ..."
2) You'll need to pass a tuple to the Entrust certificate pair.
>>> client_cert = ('/path/to/entrust-cert.pem',
'/path/to/entrust-private-nopass.pem')
3) Once a token has expired, you can refresh it to get another 30 mins
>>> credentials = PartnerCredentials(**state)
>>> if credentials.expired():
credentials.refresh()
4) Authorization expiry and token expiry become different things.
oauth_expires_at tells when the current token expires (~30 min window)
oauth_authorization_expires_at tells when the overall access
permissions expire (~10 year window)
"""
def __init__(self, consumer_key, consumer_secret, rsa_key, client_cert,
callback_uri=None, verified=False,
oauth_token=None, oauth_token_secret=None,
oauth_expires_at=None, oauth_authorization_expires_at=None,
oauth_session_handle=None):
"""Construct the auth instance.
Must provide the consumer key and secret.
A callback URL may be provided as an option. If provided, the
Xero verification process will redirect to that URL when
"""
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.callback_uri = callback_uri
self.verified = verified
self._oauth = None
self.oauth_expires_at = oauth_expires_at
self.oauth_authorization_expires_at = oauth_authorization_expires_at
self._signature_method = SIGNATURE_RSA
self.base_url = XERO_PARTNER_BASE_URL
self.rsa_key = rsa_key
self.client_cert = client_cert
self.oauth_session_handle = oauth_session_handle
self._init_credentials(oauth_token, oauth_token_secret)
def refresh(self):
"Refresh an expired token"
# Construct the credentials for the verification request
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.oauth_token,
resource_owner_secret=self.oauth_token_secret,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
# Make the verification request, getting back an access token
params = {'oauth_session_handle': self.oauth_session_handle}
response = requests.post(url=self.base_url + ACCESS_TOKEN_URL,
params=params, auth=oauth, cert=self.client_cert)
self._process_oauth_response(response)
|
"""
Tests for the game class
"""
import unittest
import warnings
import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import integers
import pytest
import nashpy as nash
import nashpy.learning
class TestGame(unittest.TestCase):
"""
Tests for the game class
"""
@given(A=arrays(np.int8, (4, 5)), B=arrays(np.int8, (4, 5)))
def test_bi_matrix_init(self, A, B):
"""Test that can create a bi matrix game"""
g = nash.Game(A, B)
self.assertEqual(g.payoff_matrices, (A, B))
if np.array_equal(A, -B): # Check if A or B are non zero
self.assertTrue(g.zero_sum)
else:
self.assertFalse(g.zero_sum)
# Can also init with lists
A = A.tolist()
B = B.tolist()
g = nash.Game(A, B)
self.assertTrue(np.array_equal(g.payoff_matrices[0], np.asarray(A)))
self.assertTrue(np.array_equal(g.payoff_matrices[1], np.asarray(B)))
def test_incorrect_dimensions_init(self):
"""Tests that ValueError is raised for unequal dimensions"""
A = np.array([[1, 2, 3], [4, 5, 6]])
B = np.array([[1, 2], [3, 4]])
with pytest.raises(ValueError):
nash.Game(A, B)
def test_bi_matrix_repr(self):
"""Test that can create a bi matrix game"""
A = np.array([[1, 2], [2, 1]])
B = np.array([[2, 1], [1, 2]])
g = nash.Game(A, B)
string_repr = """Bi matrix game with payoff matrices:
Row player:
[[1 2]
[2 1]]
Column player:
[[2 1]
[1 2]]"""
self.assertEqual(g.__repr__(), string_repr)
@given(A=arrays(np.int8, (4, 5)))
def test_zero_sum_game_init(self, A):
"""Test that can create a zero sum game"""
g = nash.Game(A)
self.assertTrue(np.array_equal(g.payoff_matrices[0], A))
self.assertTrue(np.array_equal(g.payoff_matrices[0], -g.payoff_matrices[1]))
self.assertTrue(g.zero_sum)
# Can also init with lists
A = A.tolist()
g = nash.Game(A)
self.assertTrue(np.array_equal(g.payoff_matrices[0], np.asarray(A)))
self.assertTrue(np.array_equal(g.payoff_matrices[0], -g.payoff_matrices[1]))
self.assertTrue(g.zero_sum)
def test_zero_sum_repr(self):
"""Test that can create a bi matrix game"""
A = np.array([[1, -1], [-1, 1]])
g = nash.Game(A)
string_repr = """Zero sum game with payoff matrices:
Row player:
[[ 1 -1]
[-1 1]]
Column player:
[[-1 1]
[ 1 -1]]"""
self.assertEqual(g.__repr__(), string_repr)
@given(A=arrays(np.int8, (4, 5)))
def test_zero_sum_property_from_bi_matrix(self, A):
"""Test that can create a zero sum game"""
B = -A
g = nash.Game(A, B)
self.assertTrue(g.zero_sum)
@given(A=arrays(np.int8, (3, 4)), B=arrays(np.int8, (3, 4)))
def test_property_support_enumeration(self, A, B):
"""Property based test for the equilibria calculation"""
g = nash.Game(A, B)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for equilibrium in g.support_enumeration():
for i, s in enumerate(equilibrium):
# Test that have a probability vector (subject to numerical
# error)
self.assertAlmostEqual(s.sum(), 1)
# Test that it is of the correct size
self.assertEqual(s.size, [3, 4][i])
# Test that it is non negative
self.assertTrue(all(s >= 0))
def test_support_enumeration_for_bi_matrix(self):
"""Test for the equilibria calculation support enumeration"""
A = np.array([[160, 205, 44], [175, 180, 45], [201, 204, 50], [120, 207, 49]])
B = np.array([[2, 2, 2], [1, 0, 0], [3, 4, 1], [4, 1, 2]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([0, 0, 3 / 4, 1 / 4]), np.array([1 / 28, 27 / 28, 0]))
]
for obtained, expected in zip(g.support_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.array_equal(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
A = np.array([[1, 0], [-2, 3]])
B = np.array([[3, 2], [-1, 0]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1 / 2, 1 / 2]), np.array([1 / 2, 1 / 2])),
]
for obtained, expected in zip(g.support_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.array_equal(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3])),
]
for obtained, expected in zip(g.support_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.array_equal(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
def test_support_enumeration_for_degenerate_bi_matrix_game(self):
"""Test for the equilibria calculation support enumeration with a
degenerate game"""
A = np.array([[-1, 0], [-1, 1]])
B = np.array([[1, 0], [1, -1]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([1, 0])),
]
with warnings.catch_warnings(record=True) as w:
obtained_equilibria = list(g.support_enumeration())
for obtained, expected in zip(obtained_equilibria, expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.array_equal(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
A = np.array([[3, 3], [2, 5], [0, 6]])
B = np.array([[3, 3], [2, 6], [3, 1]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0, 0]), np.array([1, 0])),
(np.array([0, 1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3])),
]
with warnings.catch_warnings(record=True) as w:
obtained_equilibria = list(g.support_enumeration())
for obtained, expected in zip(obtained_equilibria, expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.allclose(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
A = np.array([[0, 0], [0, 0]])
B = np.array([[0, 0], [0, 0]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([1, 0]), np.array([0, 1])),
(np.array([0, 1]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
]
with warnings.catch_warnings(record=True) as w:
obtained_equilibria = list(g.support_enumeration())
for obtained, expected in zip(obtained_equilibria, expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
np.allclose(s1, s2),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
def test_support_enumeration_for_deg_bi_matrix_game_with_non_deg(self):
A = np.array([[0, 0], [0, 0]])
g = nash.Game(A)
with warnings.catch_warnings(record=True) as w:
obtained_equilibria = list(g.support_enumeration(non_degenerate=True))
self.assertEqual(len(obtained_equilibria), 4)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
def test_support_enumeration_for_deg_bi_matrix_game_with_low_tol(self):
A = np.array([[0, 0], [0, 0]])
g = nash.Game(A)
with warnings.catch_warnings(record=True) as w:
obtained_equilibria = list(g.support_enumeration(tol=0))
self.assertEqual(len(obtained_equilibria), 4)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
def test_support_enumeration_for_particular_game(self):
"""
This particular game was raised in
https://github.com/drvinceknight/Nashpy/issues/67. Two users reported
that it
did not return any equilibria under support enumeration. I was unable to
reproduce this error locally as I was using a pre compiled install of
numpy. However when using a pip installed version I was able to
reproduce the error.
Rounding the particular input matrices to 5 decimal places however fixes
the error. This is an underlying precision error related to numpy (I
think).
"""
A = [
[52.46337363, 69.47195938, 0.0, 54.14372075],
[77.0, 88.0, 84.85714286, 92.4],
[77.78571429, 87.35294118, 93.5, 91.38461538],
[66.37100751, 43.4530444, 0.0, 60.36191831],
]
B = [
[23.52690518, 17.35459006, 88.209, 20.8021711],
[16.17165, 0.0, 14.00142857, 6.46866],
[0.0, 5.76529412, 0.0, 0.0],
[15.68327304, 40.68156322, 84.00857143, 11.06596804],
]
A = np.round(A, 5)
B = np.round(B, 5)
game = nash.Game(A, B)
eqs = list(game.support_enumeration())
assert len(eqs) == 1
row_strategy, col_strategy = eqs[0]
expected_row_strategy, expected_column_strategy = (
np.array([7.33134761e-17, 2.62812089e-01, 7.37187911e-01, 0.00000000e00]),
np.array([0.4516129, 0.5483871, 0.0, 0.0]),
)
assert np.all(np.isclose(row_strategy, expected_row_strategy))
assert np.all(np.isclose(col_strategy, expected_column_strategy))
def test_vertex_enumeration_for_bi_matrix(self):
"""Test for the equilibria calculation using vertex enumeration"""
A = np.array([[160, 205, 44], [175, 180, 45], [201, 204, 50], [120, 207, 49]])
B = np.array([[2, 2, 2], [1, 0, 0], [3, 4, 1], [4, 1, 2]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([0, 0, 3 / 4, 1 / 4]), np.array([1 / 28, 27 / 28, 0]))
]
for obtained, expected in zip(g.vertex_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
all(np.isclose(s1, s2)),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
A = np.array([[1, 0], [-2, 3]])
B = np.array([[3, 2], [-1, 0]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1 / 2, 1 / 2]), np.array([1 / 2, 1 / 2])),
]
for obtained, expected in zip(g.vertex_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
all(np.isclose(s1, s2)),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
A = np.array([[2, 1], [0, 2]])
B = np.array([[2, 0], [1, 2]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
(np.array([1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3])),
]
for obtained, expected in zip(g.vertex_enumeration(), expected_equilibria):
for s1, s2 in zip(obtained, expected):
self.assertTrue(
all(np.isclose(s1, s2)),
msg="obtained: {} !=expected: {}".format(obtained, expected),
)
def test_lemke_howson_for_bi_matrix(self):
"""Test for the equilibria calculation using lemke howson"""
A = np.array([[160, 205, 44], [175, 180, 45], [201, 204, 50], [120, 207, 49]])
B = np.array([[2, 2, 2], [1, 0, 0], [3, 4, 1], [4, 1, 2]])
g = nash.Game(A, B)
expected_equilibria = (
np.array([0, 0, 3 / 4, 1 / 4]),
np.array([1 / 28, 27 / 28, 0]),
)
equilibria = g.lemke_howson(initial_dropped_label=4)
for eq, expected in zip(equilibria, expected_equilibria):
self.assertTrue(all(np.isclose(eq, expected)))
def test_particular_lemke_howson_raises_warning(self):
"""
This is a degenerate game so the algorithm fails.
This was raised in
https://github.com/drvinceknight/Nashpy/issues/35
"""
A = np.array([[-1, -1, -1], [0, 0, 0], [-1, -1, -10000]])
B = np.array([[-1, -1, -1], [0, 0, 0], [-1, -1, -10000]])
game = nash.Game(A, B)
with warnings.catch_warnings(record=True) as w:
eqs = game.lemke_howson(initial_dropped_label=0)
self.assertEqual(len(eqs[0]), 2)
self.assertEqual(len(eqs[1]), 4)
self.assertGreater(len(w), 0)
self.assertEqual(w[-1].category, RuntimeWarning)
def test_lemke_howson_enumeration(self):
"""Test for the enumeration of equilibrium using Lemke Howson"""
A = np.array([[3, 1], [0, 2]])
B = np.array([[2, 1], [0, 3]])
g = nash.Game(A, B)
expected_equilibria = [
(np.array([1, 0]), np.array([1, 0])),
(np.array([0, 1]), np.array([0, 1])),
] * 2
equilibria = g.lemke_howson_enumeration()
for equilibrium, expected_equilibrium in zip(equilibria, expected_equilibria):
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
A = np.array([[3, 1], [1, 3]])
B = np.array([[1, 3], [3, 1]])
g = nash.Game(A, B)
expected_equilibria = [(np.array([1 / 2, 1 / 2]), np.array([1 / 2, 1 / 2]))] * 4
equilibria = g.lemke_howson_enumeration()
for equilibrium, expected_equilibrium in zip(equilibria, expected_equilibria):
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
def test_get_item(self):
"""Test solve indifference"""
A = np.array([[1, -1], [-1, 1]])
g = nash.Game(A)
row_strategy = [0, 1]
column_strategy = [1, 0]
self.assertTrue(
np.array_equal(g[row_strategy, column_strategy], np.array((-1, 1)))
)
row_strategy = [1 / 2, 1 / 2]
column_strategy = [1 / 2, 1 / 2]
self.assertTrue(
np.array_equal(g[row_strategy, column_strategy], np.array((0, 0)))
)
@given(
A=arrays(np.int8, (4, 5)),
B=arrays(np.int8, (4, 5)),
seed=integers(min_value=0, max_value=2 ** 32 - 1),
)
def test_fictitious_play(self, A, B, seed):
"""Test for the fictitious play algorithm"""
g = nash.Game(A, B)
iterations = 25
np.random.seed(seed)
expected_outcome = tuple(
nashpy.learning.fictitious_play.fictitious_play(
*g.payoff_matrices, iterations=iterations
)
)
np.random.seed(seed)
outcome = tuple(g.fictitious_play(iterations=iterations))
assert len(outcome) == iterations + 1
assert len(expected_outcome) == iterations + 1
for plays, expected_plays in zip(outcome, expected_outcome):
row_play, column_play = plays
expected_row_play, expected_column_play = expected_plays
assert np.array_equal(row_play, expected_row_play)
assert np.array_equal(column_play, expected_column_play)
# assert expected_outcome == outcome
@given(
A=arrays(np.int8, (4, 3), elements=integers(1, 20)),
B=arrays(np.int8, (4, 3), elements=integers(1, 20)),
seed=integers(min_value=0, max_value=2 ** 32 - 1),
)
def test_stochastic_fictitious_play(self, A, B, seed):
"""Test for the stochastic fictitious play algorithm"""
np.random.seed(seed)
iterations = 10
g = nash.Game(A, B)
expected_outcome = tuple(
nashpy.learning.stochastic_fictitious_play.stochastic_fictitious_play(
*g.payoff_matrices, iterations=iterations
)
)
np.random.seed(seed)
outcome = tuple(g.stochastic_fictitious_play(iterations=iterations))
assert len(outcome) == iterations + 1
assert len(expected_outcome) == iterations + 1
for (plays, distributions), (
expected_plays,
expected_distributions,
) in zip(outcome, expected_outcome):
row_play, column_play = plays
expected_row_play, expected_column_play = expected_plays
row_dist, column_dist = distributions
expected_row_dist, expected_column_dist = expected_distributions
assert np.allclose(column_dist, expected_column_dist)
assert np.allclose(row_dist, expected_row_dist)
assert np.allclose(column_play, expected_column_play)
assert np.allclose(row_play, expected_row_play)
def test_replicator_dynamics(self):
"""Test for the replicator dynamics algorithm"""
A = np.array([[3, 2], [4, 1]])
game = nash.Game(A)
y0 = np.array([0.9, 0.1])
timepoints = np.linspace(0, 10, 100)
xs = game.replicator_dynamics(y0, timepoints)
expected_xs = np.array([[0.50449178, 0.49550822]])
assert np.allclose(xs[-1], expected_xs)
def test_replicator_dynamics_5x5(self):
"""Test for the replicator dynamics algorithm with a 5x5 matrix"""
A = np.array(
[
[3, 2, 4, 2, 3],
[5, 1, 1, 3, 2],
[6, 2, 3, 2, 1],
[1, 3, 4, 7, 2],
[1, 4, 4, 1, 3],
]
)
game = nash.Game(A)
y0 = np.array([0.1, 0.1, 0.3, 0.2, 0.3])
timepoints = np.linspace(0, 10, 100)
xs = game.replicator_dynamics(y0, timepoints)
expected_xs = np.array(
[
[
-5.35867454e-13,
-2.93213324e-11,
-9.66651436e-13,
1.00000000e00,
-1.78136715e-14,
]
]
)
assert np.allclose(xs[-1], expected_xs)
def test_asymmetric_replicator_dynamics(self):
"""Test for asymmetric replicator dynamics algorithm"""
A = np.array([[5, 1], [4, 2]])
B = np.array([[3, 5], [2, 1]])
game = nash.Game(A, B)
x0 = np.array([0.6, 0.4])
y0 = np.array([0.5, 0.5])
timepoints = np.linspace(0, 100, 100)
xs_A, xs_B = game.asymmetric_replicator_dynamics(
x0=x0, y0=y0, timepoints=timepoints
)
expected_A_xs = np.array([0.17404745, 0.82595255])
expected_B_xs = np.array([0.28121086, 0.71878914])
assert np.allclose(xs_A[-1], expected_A_xs)
assert np.allclose(xs_B[-1], expected_B_xs)
def test_is_best_response(self):
"""Test for the best response check"""
A = np.array([[3, 0], [5, 1]])
B = np.array([[3, 5], [0, 1]])
game = nash.Game(A, B)
sigma_r = np.array([0, 1])
sigma_c = np.array([1, 0])
row_check, column_check = game.is_best_response(sigma_r, sigma_c)
assert row_check is True
assert column_check is False
|
import sys
from optparse import OptionParser
import ROOT
from ROOT import *
ROOT.gROOT.SetBatch(ROOT.kTRUE)
from Plotting_Header import *
import numpy as np
import timeit
parser = OptionParser()
parser.add_option("-d", "--dir", action="store", dest="pwd", default="/home/storage/andrzejnovak/80Trees/", help="directory where files are")
parser.add_option("-s", "--stacked", action="store_true", dest="stack", default=False, help="stack distributions",)
parser.add_option("-n", "--normed", action="store_true", dest="norm", default=False, help="Normalize distributions to 1",)
parser.add_option("-v", "--variables", action="store", dest="vars", default=["ZPRIMEM"], help="[LepPt,METPt, TAGPt, TPRIMEM, WPt, lepJetPt, TRPIMEM, ZPRIMEM]")
parser.add_option("--all", action="store_true", dest="all", default=False, help="[LepPt,METPt, TAGPt, TPRIMEM, WPt, lepJetPt, TRPIMEM, ZPRIMEM]")
parser.add_option("-c", "--lepcut", action="store", dest="lepcut", default="", help="lep cut")
parser.add_option("--diff", action="store_true", dest="diff", default=False, help="difference",)
(options, args) = parser.parse_args()
if options.all == True: options.vars = ["LepPt", "METPt", "TAGPt", "WPt", "lepJetPt", "TPRIMEM", "ZPRIMEM"]
def varplot(varname, xmin=None, xmax=None, pwd="/home/storage/andrzejnovak/March/", cut= "(LepPt<50.)", normed=False, stacked=False):
if xmax is None:
tosize = TFile("root://cmsxrootd.fnal.gov/"+pwd+"SM.root")
xmax, xmin = tosize.Get("tree_T1").GetMaximum(varname)*0.4, tosize.Get("tree_T1").GetMinimum(varname)
VAR = [varname, 50, xmin, xmax]
YT = "events / "+str((VAR[3]-VAR[2])/VAR[1])+" GeV"
XT = varname+" (GeV)"
H = "Type 1 (e) control region"
Cut = cut
treename="tree_T1"
lumi = str(12700)
W = TH1F("W", "", VAR[1], VAR[2], VAR[3])
W.SetLineColor(kGreen-6)
W.SetLineWidth(2)
quickplot(pwd+"WJetsToQQ.root", treename, W, VAR[0], Cut, "("+lumi+"*weight)")
#for w in ["100To200", "200To400", "400To600", "600To800", "800To1200", "1200To2500", "2500ToInf"]:
for w in ["200To400", "400To600", "600To800"]:
quickplot(pwd+"WJetsToLNu_HT-"+w+".root", treename, W, VAR[0], Cut, "("+lumi+"*weight)")
TT = TH1F("TT", "", VAR[1], VAR[2], VAR[3])
TT.SetLineColor(kRed-4)
TT.SetLineWidth(2)
quickplot(pwd+"TT.root", treename,TT, VAR[0], Cut, "("+lumi+"*weight)")
ntt = TT.Integral()
nw = W.Integral()
return nw, ntt
def rate(window):
n = 50
xs, cuts, rates = [], [], []
for i in range(60,100,1):
c = window + "&TAGTau32<"
taucut = float(i)/100
cuts.append(c+str(taucut))
xs.append(taucut)
wtot, tttot = varplot("TAGM", pwd=options.pwd, cut=window, stacked=options.stack, normed=options.norm)
if wtot < 5: return float(0)
for i, cut in enumerate(cuts):
a, b = varplot("TAGM", pwd=options.pwd,cut =cut, stacked=options.stack, normed=options.norm)
rate = a/wtot
rates.append(rate)
if rate > 0.3:
print rate, xs[i]
return wtot #xs[i]
def matrix(lepcut=""):
mbins = [[90,110],[110,130],[130,150],[150,170],[170,190],[190,210],[210,230],[230,300]]
ptbins = [[380,430],[430,480],[480,530],[530,580],[580,630],[630,680],[680,730],[780,830],[830,880],[880,930],[930,980],[980,2000]]
mbins = [[150,170],[170,190],[190,210],[210,230],[230,300]]
ptbins = [[580,630],[630,680],[680,730],[780,830],[830,880],[880,930],[930,980],[980,2000]]
m, pt, tau32 = [], [], []
start_time = timeit.default_timer()
for i,mbin in enumerate(mbins):
window = lepcut+"TAGM>"+str(mbin[0])+"&TAGM<"+str(mbin[1])
for j, ptbin in enumerate(ptbins):
newwindow = window + "&TAGPt>"+str(ptbin[0])+"&TAGPt<"+str(ptbin[1])
print newwindow
tau32.append(rate(newwindow))
pt.append(ptbin[0])
m.append(mbin[0])
t = timeit.default_timer()
perloop = (t-start_time)/((i+1)*(j+1))
print perloop*(len(mbins)*len(ptbins)-(i+1)*(j+1))/60, "min left"
return m, pt, tau32
def makeplot(lepcut=""):
m, pt, tau32 = matrix(lepcut=lepcut)
x = array("d", m)
y = array("d", pt)
z = array("d", tau32)
C = TCanvas("C", "", 800, 800)
C.cd()
P = TGraph2D(len(m), x,y,z)
P.SetTitle("Wjet 30\% threshold")
#P.SetMarkerStyle(21)
P.GetXaxis().SetTitle("TAGM")
P.GetYaxis().SetTitle("TAGPt")
P.GetZaxis().SetTitle("TagTau32 Cut")
P.Draw("surf1")
C.SaveAs("outputs/"+lepcut+"Wcount.png")
C.SaveAs("outputs/"+lepcut+"Wcount.root")
def makediff():
m, pt, tau32_1 = matrix(lepcut="LepType>0&")
x = array("d", m)
y = array("d", pt)
m, pt, tau32_2 = matrix(lepcut="LepType<0&")
z = array("d", np.array(tau32_1)-np.array(tau32_2))
C = TCanvas("C", "", 800, 800)
C.cd()
P = TGraph2D(len(m), x,y,z)
P.SetTitle("Wjet 30\% threshold")
#P.SetMarkerStyle(21)
P.GetXaxis().SetTitle("TAGM")
P.GetYaxis().SetTitle("TAGPt")
P.GetZaxis().SetTitle("TagTau32 Cut")
P.Draw("surf1")
C.SaveAs("outputs/"+"ElMUDiff"+"Wcut.png")
C.SaveAs("outputs/"+"ElMUDiff"+"Wcut.root")
if options.diff == True:
makediff()
else:
makeplot(options.lepcut)
|
"""
Test and verify the memory-mapped command bridge (memmap_command_bridge).
Unlike the actual designs this test does not use a generic peripheral
but instead a specific peripheral / slave is used for each bus type,
other tests verify the generic ability.
"""
from __future__ import print_function, division
from random import randint
import traceback
import myhdl
from myhdl import (Signal, intbv, always_seq, always_comb,
instance, delay, StopSimulation,)
from rhea import Global, Clock, Reset, Signals
from rhea.system import Barebone, FIFOBus
from rhea.cores.memmap import command_bridge
from rhea.cores.fifo import fifo_fast
from rhea.utils import CommandPacket
from rhea.utils.test import run_testbench, tb_args, tb_default_args
@myhdl.block
def memmap_peripheral_bb(clock, reset, bb):
""" Emulate Barebone memory-mapped reads and writes"""
assert isinstance(bb, Barebone)
mem = {}
@always_seq(clock.posedge, reset=reset)
def beh_writes():
addr = int(bb.address)
bb.done.next = not (bb.write or bb.read)
if bb.write:
mem[addr] = int(bb.write_data)
@always_comb
def beh_reads():
addr = int(bb.address)
if bb.read:
if addr not in mem:
mem[addr] = 0
bb.read_data.next = mem[addr]
else:
bb.read_data.next = 0
return beh_writes, beh_reads
def test_memmap_command_bridge(args=None):
nloops = 37
args = tb_default_args(args)
clock = Clock(0, frequency=50e6)
reset = Reset(0, active=1, async=False)
glbl = Global(clock, reset)
fifobus = FIFOBus()
memmap = Barebone(glbl, data_width=32, address_width=28)
fifobus.clock = clock
@myhdl.block
def bench_command_bridge():
tbclk = clock.gen()
tbdut = command_bridge(glbl, fifobus, memmap)
readpath, writepath = FIFOBus(), FIFOBus()
readpath.clock = writepath.clock = clock
tbmap = fifobus.assign_read_write_paths(readpath, writepath)
tbftx = fifo_fast(glbl, writepath) # user write path
tbfrx = fifo_fast(glbl, readpath) # user read path
# @todo: add other bus types
tbmem = memmap_peripheral_bb(clock, reset, memmap)
# save the data read ...
read_value = []
@instance
def tbstim():
yield reset.pulse(32)
fifobus.read.next = False
fifobus.write.next = False
assert not fifobus.full
assert fifobus.empty
assert fifobus.read_data == 0
fifobus.write_data.next = 0
try:
# test a single address
pkt = CommandPacket(True, 0x0000)
yield pkt.put(readpath)
yield pkt.get(writepath, read_value, [0])
pkt = CommandPacket(False, 0x0000, [0x5555AAAA])
yield pkt.put(readpath)
yield pkt.get(writepath, read_value, [0x5555AAAA])
# test a bunch of random addresses
for ii in range(nloops):
randaddr = randint(0, (2**20)-1)
randdata = randint(0, (2**32)-1)
pkt = CommandPacket(False, randaddr, [randdata])
yield pkt.put(readpath)
yield pkt.get(writepath, read_value, [randdata])
except Exception as err:
print("Error: {}".format(str(err)))
traceback.print_exc()
yield delay(2000)
raise StopSimulation
wp_read, wp_valid = Signals(bool(0), 2)
wp_read_data = Signal(intbv(0)[8:])
wp_empty, wp_full = Signals(bool(0), 2)
@always_comb
def tbmon():
wp_read.next = writepath.read
wp_read_data.next = writepath.read_data
wp_valid.next = writepath.read_valid
wp_full.next = writepath.full
wp_empty.next = writepath.empty
return tbclk, tbdut, tbmap, tbftx, tbfrx, tbmem, tbstim, tbmon
run_testbench(bench_command_bridge, args=args)
if __name__ == '__main__':
test_memmap_command_bridge(tb_args())
|
# -*- coding: utf-8 -*-
"""
Text normalization
"""
import re
import warnings
from pythainlp import thai_above_vowels as above_v
from pythainlp import thai_below_vowels as below_v
from pythainlp import thai_follow_vowels as follow_v
from pythainlp import thai_lead_vowels as lead_v
from pythainlp import thai_tonemarks as tonemarks
_DANGLING_CHARS = f"{above_v}{below_v}{tonemarks}\u0e3a\u0e4c\u0e4d\u0e4e"
_RE_REMOVE_DANGLINGS = re.compile(f"^[{_DANGLING_CHARS}]+")
_ZERO_WIDTH_CHARS = "\u200b\u200c" # ZWSP, ZWNJ
_REORDER_PAIRS = [
("\u0e40\u0e40", "\u0e41"), # Sara E + Sara E -> Sara Ae
(
f"([{tonemarks}\u0e4c]+)([{above_v}{below_v}]+)",
"\\2\\1",
), # TONE/Thanthakhat + ABV/BLW VOWEL -> ABV/BLW VOWEL + TONE/Thanthakhat
(
f"\u0e4d([{tonemarks}]*)\u0e32",
"\\1\u0e33",
), # Nikhahit + TONEMARK + Sara Aa -> TONEMARK + Sara Am
(
f"([{follow_v}]+)([{tonemarks}]+)",
"\\2\\1",
), # FOLLOW VOWEL + TONEMARK+ -> TONEMARK + FOLLOW VOWEL
]
# VOWELS + Phinthu, Thanthakhat, Nikhahit, Yamakkan
_NOREPEAT_CHARS = (
f"{follow_v}{lead_v}{above_v}{below_v}\u0e3a\u0e4c\u0e4d\u0e4e"
)
_NOREPEAT_PAIRS = list(
zip([f"({ch}[ ]*)+{ch}" for ch in _NOREPEAT_CHARS], _NOREPEAT_CHARS)
)
_RE_TONEMARKS = re.compile(f"[{tonemarks}]+")
_RE_REMOVE_NEWLINES = re.compile("[ \n]*\n[ \n]*")
def _last_char(matchobj): # to be used with _RE_NOREPEAT_TONEMARKS
return matchobj.group(0)[-1]
def remove_dangling(text: str) -> str:
"""
Remove Thai non-base characters at the beginning of text.
This is a common "typo", especially for input field in a form,
as these non-base characters can be visually hidden from user
who may accidentally typed them in.
A character to be removed should be both:
* tone mark, above vowel, below vowel, or non-base sign AND
* located at the beginning of the text
:param str text: input text
:return: text without dangling Thai characters at the beginning
:rtype: str
"""
return _RE_REMOVE_DANGLINGS.sub("", text)
def remove_dup_spaces(text: str) -> str:
"""
Remove duplicate spaces. Replace multiple spaces with one space.
Multiple newline characters and empty lines will be replaced
with one newline character.
:param str text: input text
:return: text without duplicated spaces and newlines
:rtype: str
"""
while " " in text:
text = text.replace(" ", " ")
text = _RE_REMOVE_NEWLINES.sub("\n", text)
text = text.strip()
return text
def remove_tonemark(text: str) -> str:
"""
Remove all Thai tone marks from the text.
Thai script has four tone marks indicating four tones as follows:
* Down tone (Thai: ไม้เอก _่ )
* Falling tone (Thai: ไม้โท _้ )
* High tone (Thai: ไม้ตรี _๊ )
* Rising tone (Thai: ไม้จัตวา _๋ )
Putting wrong tone mark is a common mistake in Thai writing.
By removing tone marks from the string, it could be used to
for a approximate string matching
:param str text: input text
:return: text without Thai tone marks
:rtype: str
:Example:
::
from pythainlp.util import delete_tone
delete_tone('สองพันหนึ่งร้อยสี่สิบเจ็ดล้านสี่แสนแปดหมื่นสามพันหกร้อยสี่สิบเจ็ด')
# output: สองพันหนึงรอยสีสิบเจ็ดลานสีแสนแปดหมืนสามพันหกรอยสีสิบเจ็ด
"""
for ch in tonemarks:
while ch in text:
text = text.replace(ch, "")
return text
def remove_zw(text: str) -> str:
"""
Remove zero-width characters.
These non-visible characters may cause unexpected result from the
user's point of view. Removing them can make string matching more robust.
Characters to be removed:
* Zero-width space (ZWSP)
* Zero-width non-joiner (ZWJP)
:param str text: input text
:return: text without zero-width characters
:rtype: str
"""
for ch in _ZERO_WIDTH_CHARS:
while ch in text:
text = text.replace(ch, "")
return text
def reorder_vowels(text: str) -> str:
"""
Reorder vowels and tone marks to the standard logical order/spelling.
Characters in input text will be reordered/transformed,
according to these rules:
* Sara E + Sara E -> Sara Ae
* Nikhahit + Sara Aa -> Sara Am
* tone mark + non-base vowel -> non-base vowel + tone mark
* follow vowel + tone mark -> tone mark + follow vowel
:param str text: input text
:return: text with vowels and tone marks in the standard logical order
:rtype: str
"""
for pair in _REORDER_PAIRS:
text = re.sub(pair[0], pair[1], text)
return text
def remove_repeat_vowels(text: str) -> str:
"""
Remove repeating vowels, tone marks, and signs.
This function will call reorder_vowels() first, to make sure that
double Sara E will be converted to Sara Ae and not be removed.
:param str text: input text
:return: text without repeating Thai vowels, tone marks, and signs
:rtype: str
"""
text = reorder_vowels(text)
for pair in _NOREPEAT_PAIRS:
text = re.sub(pair[0], pair[1], text)
# remove repeating tone marks, use last tone mark
text = _RE_TONEMARKS.sub(_last_char, text)
return text
def normalize(text: str) -> str:
"""
Normalize and clean Thai text with normalizing rules as follows:
* Remove zero-width spaces
* Remove duplicate spaces
* Reorder tone marks and vowels to standard order/spelling
* Remove duplicate vowels and signs
* Remove duplicate tone marks
* Remove dangling non-base characters at the beginning of text
normalize() simply call remove_zw(), remove_dup_spaces(),
remove_repeat_vowels(), and remove_dangling(), in that order.
If a user wants to customize the selection or the order of rules
to be applied, they can choose to call those functions by themselves.
Note: for Unicode normalization, see unicodedata.normalize().
:param str text: input text
:return: normalized text according to the fules
:rtype: str
:Example:
::
from pythainlp.util import normalize
normalize('เเปลก') # starts with two Sara E
# output: แปลก
normalize('นานาาา')
# output: นานา
"""
text = remove_zw(text)
text = remove_dup_spaces(text)
text = remove_repeat_vowels(text)
text = remove_dangling(text)
return text
def delete_tone(text: str) -> str:
"""
DEPRECATED: Please use remove_tonemark().
"""
warnings.warn(
"delete_tone is deprecated, use remove_tonemark instead",
DeprecationWarning,
)
return remove_tonemark(text)
|
#import all of the things we will be using
from django.db import models
from tagging.fields import TagField
# to help with translation of field names
from django.utils.translation import ugettext_lazy as _
# to have a generic foreign key for any model
from django.contrib.contenttypes import generic
# stores model info so this can be applied to any model
from django.contrib.contenttypes.models import ContentType
class Book(models.Model):
"""
The details of a Book
"""
# fields that describe this book
name = models.CharField(_('name'), max_length=48)
isbn = models.CharField(_('isbn'), max_length=16)
url = models.URLField(_('url'), verify_exists=False, blank=True)
description = models.TextField(_('description'))
# to add to any model
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type',
'object_id')
# for the list of tags for this book
tags = TagField()
# misc fields
deleted = models.BooleanField(default=0)
created = models.DateTimeField(auto_now_add=True)
# so that {{book.get_absolute_url}} outputs the whole url
@models.permalink
def get_absolute_url(self):
return ("book_details", [self.pk])
# outputs name when printing this object as a string
def __unicode__(self):
return self.name
|
# Copyright 2006 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import ctypes
from quodlibet.util import load_library
try:
_libxine, name = load_library(["libxine.so.2", "libxine.so.1"])
except OSError as e:
raise ImportError(e)
if name.endswith("2"):
_version = 2
else:
_version = 1
class xine_event_t(ctypes.Structure):
if _version == 1:
_fields_ = [
('type', ctypes.c_int),
('stream', ctypes.c_void_p),
('data', ctypes.c_void_p),
('data_length', ctypes.c_int),
]
elif _version == 2:
_fields_ = [
('stream', ctypes.c_void_p),
('data', ctypes.c_void_p),
('data_length', ctypes.c_int),
('type', ctypes.c_int),
]
class xine_ui_message_data_t(ctypes.Structure):
_fields_ = [
('compatibility_num_buttons', ctypes.c_int),
('compatibility_str_len', ctypes.c_int),
('compatibility_str', 256 * ctypes.c_char),
('type', ctypes.c_int),
('explanation', ctypes.c_int),
('num_parameters', ctypes.c_int),
('parameters', ctypes.c_void_p),
('messages', ctypes.c_char),
]
# event listener callback type
xine_event_listener_cb_t = ctypes.CFUNCTYPE(
ctypes.c_void_p, ctypes.c_void_p,
ctypes.POINTER(xine_event_t))
# event types
XINE_EVENT_UI_PLAYBACK_FINISHED = 1
XINE_EVENT_UI_CHANNELS_CHANGED = 2
XINE_EVENT_UI_SET_TITLE = 3
XINE_EVENT_UI_MESSAGE = 4
XINE_EVENT_FRAME_FORMAT_CHANGE = 5
XINE_EVENT_AUDIO_LEVEL = 6
XINE_EVENT_QUIT = 7
XINE_EVENT_PROGRESS = 8
# stream parameters
XINE_PARAM_SPEED = 1 # see below
XINE_PARAM_AV_OFFSET = 2 # unit: 1/90000 ses
XINE_PARAM_AUDIO_CHANNEL_LOGICAL = 3 # -1 => auto, -2 => off
XINE_PARAM_SPU_CHANNEL = 4
XINE_PARAM_VIDEO_CHANNEL = 5
XINE_PARAM_AUDIO_VOLUME = 6 # 0..100
XINE_PARAM_AUDIO_MUTE = 7 # 1=>mute, 0=>unmute
XINE_PARAM_AUDIO_COMPR_LEVEL = 8 # <100=>off, % compress otherw
XINE_PARAM_AUDIO_AMP_LEVEL = 9 # 0..200, 100=>100% (default)
XINE_PARAM_AUDIO_REPORT_LEVEL = 10 # 1=>send events, 0=> don't
XINE_PARAM_VERBOSITY = 11 # control console output
XINE_PARAM_SPU_OFFSET = 12 # unit: 1/90000 sec
XINE_PARAM_IGNORE_VIDEO = 13 # disable video decoding
XINE_PARAM_IGNORE_AUDIO = 14 # disable audio decoding
XINE_PARAM_IGNORE_SPU = 15 # disable spu decoding
XINE_PARAM_BROADCASTER_PORT = 16 # 0: disable, x: server port
XINE_PARAM_METRONOM_PREBUFFER = 17 # unit: 1/90000 sec
XINE_PARAM_EQ_30HZ = 18 # equalizer gains -100..100
XINE_PARAM_EQ_60HZ = 19 # equalizer gains -100..100
XINE_PARAM_EQ_125HZ = 20 # equalizer gains -100..100
XINE_PARAM_EQ_250HZ = 21 # equalizer gains -100..100
XINE_PARAM_EQ_500HZ = 22 # equalizer gains -100..100
XINE_PARAM_EQ_1000HZ = 23 # equalizer gains -100..100
XINE_PARAM_EQ_2000HZ = 24 # equalizer gains -100..100
XINE_PARAM_EQ_4000HZ = 25 # equalizer gains -100..100
XINE_PARAM_EQ_8000HZ = 26 # equalizer gains -100..100
XINE_PARAM_EQ_16000HZ = 27 # equalizer gains -100..100
XINE_PARAM_AUDIO_CLOSE_DEVICE = 28 # force closing audio device
XINE_PARAM_AUDIO_AMP_MUTE = 29 # 1=>mute, 0=>unmute
XINE_PARAM_FINE_SPEED = 30 # 1.000.000 => normal speed
XINE_PARAM_EARLY_FINISHED_EVENT = 31 # send event when demux finish
XINE_PARAM_GAPLESS_SWITCH = 32 # next stream only gapless swi
XINE_PARAM_DELAY_FINISHED_EVENT = 33 # 1/10sec,0=>disable,-1=>forev
# speeds
XINE_SPEED_PAUSE = 0
XINE_SPEED_SLOW_4 = 1
XINE_SPEED_SLOW_2 = 2
XINE_SPEED_NORMAL = 4
XINE_SPEED_FAST_2 = 8
XINE_SPEED_FAST_4 = 16
# metadata
XINE_META_INFO_TITLE = 0
XINE_META_INFO_COMMENT = 1
XINE_META_INFO_ARTIST = 2
XINE_META_INFO_GENRE = 3
XINE_META_INFO_ALBUM = 4
XINE_META_INFO_YEAR = 5
XINE_META_INFO_VIDEOCODEC = 6
XINE_META_INFO_AUDIOCODEC = 7
XINE_META_INFO_SYSTEMLAYER = 8
XINE_META_INFO_INPUT_PLUGIN = 9
# statuses
XINE_STATUS_IDLE = 0
XINE_STATUS_STOP = 1
XINE_STATUS_PLAY = 2
XINE_STATUS_QUIT = 3
XINE_MSG_NO_ERROR = 0 # (messages to UI)
XINE_MSG_GENERAL_WARNING = 1 # (warning message)
XINE_MSG_UNKNOWN_HOST = 2 # (host name)
XINE_MSG_UNKNOWN_DEVICE = 3 # (device name)
XINE_MSG_NETWORK_UNREACHABLE = 4 # none
XINE_MSG_CONNECTION_REFUSED = 5 # (host name)
XINE_MSG_FILE_NOT_FOUND = 6 # (file name or mrl)
XINE_MSG_READ_ERROR = 7 # (device/file/mrl)
XINE_MSG_LIBRARY_LOAD_ERROR = 8 # (library/decoder)
XINE_MSG_ENCRYPTED_SOURCE = 9 # none
XINE_MSG_SECURITY = 10 # (security message)
XINE_MSG_AUDIO_OUT_UNAVAILABLE = 11 # none
XINE_MSG_PERMISSION_ERROR = 12 # (file name or mrl)
XINE_MSG_FILE_EMPTY = 13 # file is empty
XINE_MSG_AUTHENTICATION_NEEDED = 14 # (mrl, likely http); added in 1.2
# xine_t *xine_new(void)
xine_new = _libxine.xine_new
xine_new.restype = ctypes.c_void_p
# void xine_init(xine_t *self)
xine_init = _libxine.xine_init
xine_init.argtypes = [ctypes.c_void_p]
# void xine_exit(xine_t *self)
xine_exit = _libxine.xine_exit
xine_exit.argtypes = [ctypes.c_void_p]
# void xine_config_load(xine_t *self, const char *cfg_filename)
xine_config_load = _libxine.xine_config_load
xine_config_load.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
# const char *xine_get_homedir(void)
xine_get_homedir = _libxine.xine_get_homedir
xine_get_homedir.restype = ctypes.c_char_p
# xine_audio_port_t *xine_open_audio_driver(xine_t *self, const char *id,
# void *data)
xine_open_audio_driver = _libxine.xine_open_audio_driver
xine_open_audio_driver.argtypes = [ctypes.c_void_p,
ctypes.c_char_p, ctypes.c_void_p]
xine_open_audio_driver.restype = ctypes.c_void_p
# void xine_close_audio_driver(xine_t *self, xine_audio_port_t *driver)
xine_close_audio_driver = _libxine.xine_close_audio_driver
xine_close_audio_driver.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
# xine_stream_t *xine_stream_new(xine_t *self,
# xine_audio_port_t *ao, xine_video_port_t *vo)
xine_stream_new = _libxine.xine_stream_new
xine_stream_new.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_void_p]
xine_stream_new.restype = ctypes.c_void_p
# void xine_close(xine_sxine_event_create_listener_threadtream_t *stream)
xine_close = _libxine.xine_close
xine_close.argtypes = [ctypes.c_void_p]
# int xine_open (xine_stream_t *stream, const char *mrl)
xine_open = _libxine.xine_open
xine_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
xine_open.restype = ctypes.c_int
# int xine_play(xine_stream_t *stream, int start_pos, int start_time)
xine_play = _libxine.xine_play
xine_play.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
xine_play.restype = ctypes.c_int
# void xine_stop(xine_stream_t *stream)
xine_stop = _libxine.xine_stop
xine_stop.argtypes = [ctypes.c_void_p]
# void xine_dispose(xine_stream_t *stream)
xine_dispose = _libxine.xine_dispose
xine_dispose.argtypes = [ctypes.c_void_p]
# xine_event_queue_t *xine_event_new_queue(xine_stream_t *stream)
xine_event_new_queue = _libxine.xine_event_new_queue
xine_event_new_queue.argtypes = [ctypes.c_void_p]
xine_event_new_queue.restype = ctypes.c_void_p
# void xine_event_dispose_queue(xine_event_queue_t *queue)
xine_event_dispose_queue = _libxine.xine_event_dispose_queue
xine_event_dispose_queue.argtypes = [ctypes.c_void_p]
# void xine_event_create_listener_thread(xine_event_queue_t *queue,
# xine_event_listener_cb_t callback,
# void *user_data)
xine_event_create_listener_thread = _libxine.xine_event_create_listener_thread
xine_event_create_listener_thread.argtypes = [ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p]
xine_usec_sleep = _libxine.xine_usec_sleep
xine_usec_sleep.argtypes = [ctypes.c_int]
xine_set_param = _libxine.xine_set_param
xine_set_param.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
xine_get_param = _libxine.xine_get_param
xine_get_param.argtypes = [ctypes.c_void_p, ctypes.c_int]
xine_get_param.restype = ctypes.c_int
xine_get_meta_info = _libxine.xine_get_meta_info
xine_get_meta_info.argtypes = [ctypes.c_void_p, ctypes.c_int]
xine_get_meta_info.restype = ctypes.c_char_p
xine_get_status = _libxine.xine_get_status
xine_get_status.argtypes = [ctypes.c_void_p]
xine_get_status.restype = ctypes.c_int
xine_get_pos_length = _libxine.xine_get_pos_length
xine_get_pos_length.argtypes = [ctypes.c_void_p,
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int)]
xine_get_version_string = _libxine.xine_get_version_string
xine_get_version_string.restype = ctypes.c_char_p
xine_get_file_extensions = _libxine.xine_get_file_extensions
xine_get_file_extensions.argtypes = [ctypes.c_void_p]
xine_get_file_extensions.restype = ctypes.c_char_p
xine_get_mime_types = _libxine.xine_get_mime_types
xine_get_mime_types.argtypes = [ctypes.c_void_p]
xine_get_mime_types.restype = ctypes.c_char_p
xine_list_input_plugins = _libxine.xine_list_input_plugins
xine_list_input_plugins.argtypes = [ctypes.c_void_p]
xine_list_input_plugins.restype = ctypes.POINTER(ctypes.c_char_p)
xine_check_version = _libxine.xine_check_version
xine_check_version.argtypes = [ctypes.c_int, ctypes.c_int,
ctypes.c_int]
xine_check_version.restype = ctypes.c_int
_callbacks = []
def xine_event_create_listener_thread(queue, callback, user_data):
cb = xine_event_listener_cb_t(callback)
_callbacks.append(cb)
_libxine.xine_event_create_listener_thread(queue, cb, user_data)
def xine_get_pos_length(stream):
_pos_stream = ctypes.c_int()
_pos_time = ctypes.c_int()
_length_time = ctypes.c_int()
result = _libxine.xine_get_pos_length(stream, ctypes.byref(_pos_stream),
ctypes.byref(_pos_time), ctypes.byref(_length_time))
if result:
return _pos_stream.value, _pos_time.value, _length_time.value
else:
return 0, 0, 0
|
from unittest import TestCase
import unittest
import ezyrb.vtkhandler as vh
import numpy as np
import filecmp
import os
class TestVtkHandler(TestCase):
def test_vtk_instantiation(self):
vtk_handler = vh.VtkHandler()
def test_vtk_default_infile_member(self):
vtk_handler = vh.VtkHandler()
assert vtk_handler.infile == None
def test_vtk_default_extension_member(self):
vtk_handler = vh.VtkHandler()
assert vtk_handler.extension == '.vtk'
def test_vtk_parse_failing_filename_type(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(TypeError):
output = vtk_handler.parse(5.2)
def test_vtk_parse_failing_output_name_type(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(TypeError):
output = vtk_handler.parse('tests/test_datasets/matlab_output_test.mat', 5.2)
def test_vtk_parse_failing_check_extension(self):
vtk_handler = vh.VtkHandler()
with self.assertRaises(ValueError):
output = vtk_handler.parse('tests/test_datasets/matlab_output_test.mat')
def test_vtk_parse_infile(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert vtk_handler.infile == 'tests/test_datasets/matlab_field_test_bin.vtk'
def test_vtk_parse_shape(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert output.shape == (2500, 1)
def test_vtk_parse_check_data_format_1(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
assert vtk_handler.cell_data == False
def test_vtk_parse_check_data_format_2(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/openfoam_output_test.vtk', 'p')
assert vtk_handler.cell_data == True
def test_vtk_parse_coords_1(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
np.testing.assert_almost_equal(output[33][0], 3.7915385)
def test_vtk_parse_coords_2(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
np.testing.assert_almost_equal(output[0][0], 8.2308226)
def test_vtk_write_failing_filename_type(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
with self.assertRaises(TypeError):
vtk_handler.write(output, 4.)
def test_vtk_write_failing_check_extension(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
with self.assertRaises(ValueError):
vtk_handler.write(output, 'tests/test_datasets/matlab_output_test_out.mat')
def test_vtk_write_failing_infile_instantiation(self):
vtk_handler = vh.VtkHandler()
output = np.zeros((40, 3))
with self.assertRaises(RuntimeError):
vtk_handler.write(output, 'tests/test_datasets/matlab_field_test_out.vtk')
def test_vtk_write_default_output_name(self):
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
outfilename = 'tests/test_datasets/matlab_field_test_out_bin.vtk'
vtk_handler.write(output, outfilename, write_bin=True)
os.remove(outfilename)
def test_vtk_write_comparison_bin_1(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_bin.vtk', 'Pressure')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/matlab_field_test_out_bin.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_bin_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_bin_version6.vtk'
vtk_handler.write(output, outfilename, 'Pressure', write_bin=True)
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
def test_vtk_write_comparison_bin_ascii(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/openfoam_output_test.vtk', 'p')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/openfoam_output_test_out.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/openfoam_output_test_out_true_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/openfoam_output_test_out_true_version6.vtk'
vtk_handler.write(output, outfilename, 'p')
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
def test_vtk_write_comparison_ascii(self):
import vtk
vtk_handler = vh.VtkHandler()
output = vtk_handler.parse('tests/test_datasets/matlab_field_test_ascii.vtk', 'Pressure')
output[0] = [1.1]
output[1] = [1.1]
output[2] = [1.1]
output[11] = [1.1]
output[12] = [1.1]
output[13] = [1.1]
output[30] = [1.1]
output[31] = [1.1]
output[32] = [1.1]
outfilename = 'tests/test_datasets/matlab_field_test_out_ascii.vtk'
if vtk.VTK_MAJOR_VERSION <= 5:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_ascii_version5.vtk'
else:
outfilename_expected = 'tests/test_datasets/matlab_field_test_out_true_ascii_version6.vtk'
vtk_handler.write(output, outfilename, 'Pressure')
self.assertTrue(filecmp.cmp(outfilename, outfilename_expected))
os.remove(outfilename)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-11-10 00:02
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.foreignkey
from sentry.models.platformexternalissue import PlatformExternalIssue
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
def backfill_platformexternalissue_project_id(apps, schema_editor):
"""
Fill the PlatformExternalIssue.project_id from related Group.project_id.
"""
PlatformExternalIssue = apps.get_model("sentry", "PlatformExternalIssue")
Group = apps.get_model("sentry", "Group")
external_issues_with_group = PlatformExternalIssue.objects.filter(
project_id__isnull=True
).select_related("group")
for external_issue in RangeQuerySetWrapperWithProgressBar(
queryset=external_issues_with_group, step=1000
):
try:
PlatformExternalIssue.objects.filter(id=external_issue.id).update(
project_id=external_issue.group.project_id
)
except Group.DoesNotExist:
pass
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = True
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = False
dependencies = [
("sentry", "0126_make_platformexternalissue_group_id_flexfk"),
]
operations = [
migrations.RunPython(backfill_platformexternalissue_project_id, migrations.RunPython.noop),
]
|
# -*- coding: latin-1 -*-
from south.db import db
from django.db import models
from pykeg.core.models import *
class Migration:
def forwards(self, orm):
# Adding model 'ThermoSummaryLog'
db.create_table('core_thermosummarylog', (
('id', orm['core.thermosummarylog:id']),
('sensor', orm['core.thermosummarylog:sensor']),
('date', orm['core.thermosummarylog:date']),
('period', orm['core.thermosummarylog:period']),
('num_readings', orm['core.thermosummarylog:num_readings']),
('min_temp', orm['core.thermosummarylog:min_temp']),
('max_temp', orm['core.thermosummarylog:max_temp']),
('mean_temp', orm['core.thermosummarylog:mean_temp']),
))
db.send_create_signal('core', ['ThermoSummaryLog'])
def backwards(self, orm):
# Deleting model 'ThermoSummaryLog'
db.delete_table('core_thermosummarylog')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.authenticationtoken': {
'Meta': {'unique_together': "(('auth_device', 'token_value'),)"},
'auth_device': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'token_value': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.bac': {
'bac': ('django.db.models.fields.FloatField', [], {}),
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rectime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.beerstyle': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.beertype': {
'abv': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'brewer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Brewer']"}),
'calories_oz': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'carbs_oz': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'style': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BeerStyle']"})
},
'core.brewer': {
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'distribution': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin_city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'origin_country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'origin_state': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'core.config': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'core.drink': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'valid'", 'max_length': '128'}),
'ticks': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.drinkingsessiongroup': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {})
},
'core.keg': {
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'enddate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origcost': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.KegSize']"}),
'startdate': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BeerType']"})
},
'core.kegsize': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'volume_ml': ('django.db.models.fields.FloatField', [], {})
},
'core.kegtap': {
'current_keg': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Keg']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_tick_delta': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'meter_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ml_per_tick': ('django.db.models.fields.FloatField', [], {'default': '0.45454545454545453'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'temperature_sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']", 'null': 'True', 'blank': 'True'})
},
'core.relaylog': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermolog': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"}),
'temp': ('django.db.models.fields.FloatField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'core.thermosensor': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nice_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'core.thermosummarylog': {
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_temp': ('django.db.models.fields.FloatField', [], {}),
'mean_temp': ('django.db.models.fields.FloatField', [], {}),
'min_temp': ('django.db.models.fields.FloatField', [], {}),
'num_readings': ('django.db.models.fields.PositiveIntegerField', [], {}),
'period': ('django.db.models.fields.CharField', [], {'default': "'daily'", 'max_length': '64'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ThermoSensor']"})
},
'core.userdrinkingsession': {
'endtime': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.DrinkingSessionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'starttime': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userdrinkingsessionassignment': {
'drink': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Drink']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserDrinkingSession']"})
},
'core.userlabel': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labelname': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.userpicture': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'core.userprofile': {
'gender': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.UserLabel']"}),
'mugshot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserPicture']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['core']
|
'''
In this lab, you will learn how to:
Package up TensorFlow model
Run training locally
Run training on cloud
Deploy model to cloud
Invoke model to carry out predictions
'''
'''
Scaling up ML using Cloud ML Engine
In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud MLE. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud ML.
Later in the course, we will look at ways to make a more effective machine learning model.
Environment variables for project and bucket
Note that:
Your project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: Project ID: cloud-training-demos
Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket.
Change the cell below to reflect your Project ID and bucket name.
'''
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
'''
Packaging up the code
Take your code and put into a standard Python package structure. model.py and task.py contain the Tensorflow code from earlier (explore the directory structure).
'''
!find taxifare
!cat taxifare/trainer/model.py
'''
Find absolute paths to your data
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you
'''
%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv
'''
Running the Python module from the command-line
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 --job-dir=./tmp
%bash
ls $PWD/taxi_trained/export/exporter/
%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=./test.json
'''
Running locally using gcloud
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained
'''
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.
'''
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
'''
If the above step (to stop TensorBoard) appears stalled, just move on to the next step. You don't need to wait for it to return.
'''
!ls $PWD/taxi_trained
'''
Submit training job using gcloud
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select Machine Learning | Jobs to monitor progress.
Note: Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.
https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
'''
%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
Job [lab3a_180607_192245] submitted successfully.
Your job is still active. You may view the status of your job with the command (on google cloud consile)
$ gcloud ml-engine jobs describe lab3a_180607_192245
or continue streaming the logs with the command
$ gcloud ml-engine jobs stream-logs lab3a_180607_192245
Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""Replace existing User FKs with their PootleProfile counterparts.
This will create temporal DB fields to copy data over and avoid
integrity errors while we are at it.
"""
db.add_column('evernote_auth_evernoteaccount', 'new_user_id',
models.IntegerField(null=True, default=None))
db.execute('''
UPDATE evernote_auth_evernoteaccount as EA
JOIN auth_user AS U on EA.user_id = U.id
JOIN pootle_app_pootleprofile PP on U.id = PP.user_id
SET EA.new_user_id = PP.id;
''')
db.delete_unique('evernote_auth_evernoteaccount', 'user_id')
db.execute('''
UPDATE evernote_auth_evernoteaccount SET user_id = new_user_id;
''')
db.create_unique('evernote_auth_evernoteaccount', 'user_id')
db.delete_column('evernote_auth_evernoteaccount', 'new_user_id')
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'evernote_auth.evernoteaccount': {
'Meta': {'object_name': 'EvernoteAccount'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'evernote_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'evernote_account'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'user_autocreated': ('django.db.models.fields.BooleanField', [], {})
},
'pootle_app.directory': {
'Meta': {'ordering': "['name']", 'object_name': 'Directory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_dirs'", 'null': 'True', 'to': "orm['pootle_app.Directory']"}),
'pootle_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'pootle_language.language': {
'Meta': {'ordering': "['code']", 'object_name': 'Language', 'db_table': "'pootle_app_language'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'directory': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pootle_app.Directory']", 'unique': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nplurals': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'pluralequation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'specialchars': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'pootle_profile.pootleprofile': {
'Meta': {'object_name': 'PootleProfile', 'db_table': "'pootle_app_pootleprofile'"},
'alt_src_langs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_alt_src_langs'", 'blank': 'True', 'db_index': 'True', 'to': u"orm['pootle_language.Language']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_height': ('django.db.models.fields.SmallIntegerField', [], {'default': '5'}),
'rate': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'review_rate': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'ui_lang': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'unit_rows': ('django.db.models.fields.SmallIntegerField', [], {'default': '9'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['auth', 'pootle_profile', 'evernote_auth']
symmetrical = True
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import uuidutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine.clients import progress
from heat.engine import constraints
from heat.engine import function
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.neutron import subnet
from heat.engine.resources import stack_user
from heat.engine import support
from heat.rpc import api as rpc_api
cfg.CONF.import_opt('default_software_config_transport', 'heat.common.config')
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
LOG = logging.getLogger(__name__)
class Server(stack_user.StackUser):
PROPERTIES = (
NAME, IMAGE, BLOCK_DEVICE_MAPPING, BLOCK_DEVICE_MAPPING_V2,
FLAVOR, FLAVOR_UPDATE_POLICY, IMAGE_UPDATE_POLICY, KEY_NAME,
ADMIN_USER, AVAILABILITY_ZONE, SECURITY_GROUPS, NETWORKS,
SCHEDULER_HINTS, METADATA, USER_DATA_FORMAT, USER_DATA,
RESERVATION_ID, CONFIG_DRIVE, DISK_CONFIG, PERSONALITY,
ADMIN_PASS, SOFTWARE_CONFIG_TRANSPORT
) = (
'name', 'image', 'block_device_mapping', 'block_device_mapping_v2',
'flavor', 'flavor_update_policy', 'image_update_policy', 'key_name',
'admin_user', 'availability_zone', 'security_groups', 'networks',
'scheduler_hints', 'metadata', 'user_data_format', 'user_data',
'reservation_id', 'config_drive', 'diskConfig', 'personality',
'admin_pass', 'software_config_transport'
)
_BLOCK_DEVICE_MAPPING_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME, BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name', 'volume_id',
'snapshot_id',
'volume_size',
'delete_on_termination',
)
_BLOCK_DEVICE_MAPPING_V2_KEYS = (
BLOCK_DEVICE_MAPPING_DEVICE_NAME,
BLOCK_DEVICE_MAPPING_VOLUME_ID,
BLOCK_DEVICE_MAPPING_IMAGE_ID,
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
BLOCK_DEVICE_MAPPING_SWAP_SIZE,
BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
BLOCK_DEVICE_MAPPING_DISK_BUS,
BLOCK_DEVICE_MAPPING_BOOT_INDEX,
BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM,
) = (
'device_name',
'volume_id',
'image_id',
'snapshot_id',
'swap_size',
'device_type',
'disk_bus',
'boot_index',
'volume_size',
'delete_on_termination',
)
_NETWORK_KEYS = (
NETWORK_UUID, NETWORK_ID, NETWORK_FIXED_IP, NETWORK_PORT,
) = (
'uuid', 'network', 'fixed_ip', 'port',
)
_SOFTWARE_CONFIG_FORMATS = (
HEAT_CFNTOOLS, RAW, SOFTWARE_CONFIG
) = (
'HEAT_CFNTOOLS', 'RAW', 'SOFTWARE_CONFIG'
)
_SOFTWARE_CONFIG_TRANSPORTS = (
POLL_SERVER_CFN, POLL_SERVER_HEAT, POLL_TEMP_URL, ZAQAR_MESSAGE
) = (
'POLL_SERVER_CFN', 'POLL_SERVER_HEAT', 'POLL_TEMP_URL', 'ZAQAR_MESSAGE'
)
ATTRIBUTES = (
NAME_ATTR, ADDRESSES, NETWORKS_ATTR, FIRST_ADDRESS,
INSTANCE_NAME, ACCESSIPV4, ACCESSIPV6, CONSOLE_URLS,
) = (
'name', 'addresses', 'networks', 'first_address',
'instance_name', 'accessIPv4', 'accessIPv6', 'console_urls',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Server name.'),
update_allowed=True
),
IMAGE: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the image to boot with.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
update_allowed=True
),
BLOCK_DEVICE_MAPPING: properties.Schema(
properties.Schema.LIST,
_('Block device mappings for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
required=True
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the volume to boot from. Only one '
'of volume_id or snapshot_id should be '
'provided.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the volume, in GB. It is safe to '
'leave this blank and have the Compute service '
'infer the size.')
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
)
),
BLOCK_DEVICE_MAPPING_V2: properties.Schema(
properties.Schema.LIST,
_('Block device mappings v2 for this server.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
BLOCK_DEVICE_MAPPING_DEVICE_NAME: properties.Schema(
properties.Schema.STRING,
_('A device name where the volume will be '
'attached in the system at /dev/device_name. '
'This value is typically vda.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_ID: properties.Schema(
properties.Schema.STRING,
_('The volume_id can be boot or non-boot device '
'to the server.'),
constraints=[
constraints.CustomConstraint('cinder.volume')
]
),
BLOCK_DEVICE_MAPPING_IMAGE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the image to create a volume from.'),
constraints=[
constraints.CustomConstraint('glance.image')
],
),
BLOCK_DEVICE_MAPPING_SNAPSHOT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the snapshot to create a volume '
'from.'),
constraints=[
constraints.CustomConstraint('cinder.snapshot')
]
),
BLOCK_DEVICE_MAPPING_SWAP_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('The size of the swap, in MB.')
),
BLOCK_DEVICE_MAPPING_DEVICE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Device type: at the moment we can make distinction'
' only between disk and cdrom.'),
constraints=[
constraints.AllowedValues(['cdrom', 'disk']),
],
),
BLOCK_DEVICE_MAPPING_DISK_BUS: properties.Schema(
properties.Schema.STRING,
_('Bus of the device: hypervisor driver chooses a '
'suitable default if omitted.'),
constraints=[
constraints.AllowedValues(['ide', 'lame_bus',
'scsi', 'usb',
'virtio']),
],
),
BLOCK_DEVICE_MAPPING_BOOT_INDEX: properties.Schema(
properties.Schema.INTEGER,
_('Integer used for ordering the boot disks.'),
),
BLOCK_DEVICE_MAPPING_VOLUME_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Size of the block device in GB. If it is omitted, '
'hypervisor driver calculates size.'),
),
BLOCK_DEVICE_MAPPING_DELETE_ON_TERM: properties.Schema(
properties.Schema.BOOLEAN,
_('Indicate whether the volume should be deleted '
'when the server is terminated.')
),
},
),
support_status=support.SupportStatus(version='2015.1')
),
FLAVOR: properties.Schema(
properties.Schema.STRING,
_('The ID or name of the flavor to boot onto.'),
required=True,
update_allowed=True,
constraints=[
constraints.CustomConstraint('nova.flavor')
]
),
FLAVOR_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply a flavor update; either by requesting '
'a server resize or by replacing the entire server.'),
default='RESIZE',
constraints=[
constraints.AllowedValues(['RESIZE', 'REPLACE']),
],
update_allowed=True
),
IMAGE_UPDATE_POLICY: properties.Schema(
properties.Schema.STRING,
_('Policy on how to apply an image-id update; either by '
'requesting a server rebuild or by replacing the entire server'),
default='REBUILD',
constraints=[
constraints.AllowedValues(['REBUILD', 'REPLACE',
'REBUILD_PRESERVE_EPHEMERAL']),
],
update_allowed=True
),
KEY_NAME: properties.Schema(
properties.Schema.STRING,
_('Name of keypair to inject into the server.'),
constraints=[
constraints.CustomConstraint('nova.keypair')
]
),
ADMIN_USER: properties.Schema(
properties.Schema.STRING,
_('Name of the administrative user to use on the server.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('The default cloud-init user set up for each image '
'(e.g. "ubuntu" for Ubuntu 12.04+, "fedora" for '
'Fedora 19+ and "cloud-user" for CentOS/RHEL 6.5).'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.1',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
AVAILABILITY_ZONE: properties.Schema(
properties.Schema.STRING,
_('Name of the availability zone for server placement.')
),
SECURITY_GROUPS: properties.Schema(
properties.Schema.LIST,
_('List of security group names or IDs. Cannot be used if '
'neutron ports are associated with this server; assign '
'security groups to the ports instead.'),
default=[]
),
NETWORKS: properties.Schema(
properties.Schema.LIST,
_('An ordered list of nics to be added to this server, with '
'information about connected networks, fixed ips, port etc.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
NETWORK_UUID: properties.Schema(
properties.Schema.STRING,
_('ID of network to create a port on.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
message=_('Use property %s.') % NETWORK_ID,
version='2014.1'
)
),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_ID: properties.Schema(
properties.Schema.STRING,
_('Name or ID of network to create a port on.'),
constraints=[
constraints.CustomConstraint('neutron.network')
]
),
NETWORK_FIXED_IP: properties.Schema(
properties.Schema.STRING,
_('Fixed IP address to specify for the port '
'created on the requested network.'),
constraints=[
constraints.CustomConstraint('ip_addr')
]
),
NETWORK_PORT: properties.Schema(
properties.Schema.STRING,
_('ID of an existing port to associate with this '
'server.'),
constraints=[
constraints.CustomConstraint('neutron.port')
]
),
},
),
update_allowed=True
),
SCHEDULER_HINTS: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key-value pairs specified by the client to help '
'boot a server.')
),
METADATA: properties.Schema(
properties.Schema.MAP,
_('Arbitrary key/value metadata to store for this server. Both '
'keys and values must be 255 characters or less. Non-string '
'values will be serialized to JSON (and the serialized '
'string must be 255 characters or less).'),
update_allowed=True
),
USER_DATA_FORMAT: properties.Schema(
properties.Schema.STRING,
_('How the user_data should be formatted for the server. For '
'HEAT_CFNTOOLS, the user_data is bundled as part of the '
'heat-cfntools cloud-init boot configuration data. For RAW '
'the user_data is passed to Nova unmodified. '
'For SOFTWARE_CONFIG user_data is bundled as part of the '
'software config data, and metadata is derived from any '
'associated SoftwareDeployment resources.'),
default=HEAT_CFNTOOLS,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_FORMATS),
]
),
SOFTWARE_CONFIG_TRANSPORT: properties.Schema(
properties.Schema.STRING,
_('How the server should receive the metadata required for '
'software configuration. POLL_SERVER_CFN will allow calls to '
'the cfn API action DescribeStackResource authenticated with '
'the provided keypair. POLL_SERVER_HEAT will allow calls to '
'the Heat API resource-show using the provided keystone '
'credentials. POLL_TEMP_URL will create and populate a '
'Swift TempURL with metadata for polling.'),
default=cfg.CONF.default_software_config_transport,
constraints=[
constraints.AllowedValues(_SOFTWARE_CONFIG_TRANSPORTS),
]
),
USER_DATA: properties.Schema(
properties.Schema.STRING,
_('User data script to be executed by cloud-init.'),
default=''
),
RESERVATION_ID: properties.Schema(
properties.Schema.STRING,
_('A UUID for the set of servers being requested.')
),
CONFIG_DRIVE: properties.Schema(
properties.Schema.BOOLEAN,
_('If True, enable config drive on the server.')
),
DISK_CONFIG: properties.Schema(
properties.Schema.STRING,
_('Control how the disk is partitioned when the server is '
'created.'),
constraints=[
constraints.AllowedValues(['AUTO', 'MANUAL']),
]
),
PERSONALITY: properties.Schema(
properties.Schema.MAP,
_('A map of files to create/overwrite on the server upon boot. '
'Keys are file names and values are the file contents.'),
default={}
),
ADMIN_PASS: properties.Schema(
properties.Schema.STRING,
_('The administrator password for the server.'),
update_allowed=True
),
}
attributes_schema = {
NAME_ATTR: attributes.Schema(
_('Name of the server.'),
type=attributes.Schema.STRING
),
ADDRESSES: attributes.Schema(
_('A dict of all network addresses with corresponding port_id. '
'Each network will have two keys in dict, they are network '
'name and network id. '
'The port ID may be obtained through the following expression: '
'"{get_attr: [<server>, addresses, <network name_or_id>, 0, '
'port]}".'),
type=attributes.Schema.MAP
),
NETWORKS_ATTR: attributes.Schema(
_('A dict of assigned network addresses of the form: '
'{"public": [ip1, ip2...], "private": [ip3, ip4], '
'"public_uuid": [ip1, ip2...], "private_uuid": [ip3, ip4]}. '
'Each network will have two keys in dict, they are network '
'name and network id. '),
type=attributes.Schema.MAP
),
FIRST_ADDRESS: attributes.Schema(
_('Convenience attribute to fetch the first assigned network '
'address, or an empty string if nothing has been assigned at '
'this time. Result may not be predictable if the server has '
'addresses from more than one network.'),
support_status=support.SupportStatus(
status=support.HIDDEN,
version='5.0.0',
message=_('Use the networks attribute instead of '
'first_address. For example: "{get_attr: '
'[<server name>, networks, <network name>, 0]}"'),
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='2014.2',
previous_status=support.SupportStatus(version='2013.2')
)
)
),
INSTANCE_NAME: attributes.Schema(
_('AWS compatible instance name.'),
type=attributes.Schema.STRING
),
ACCESSIPV4: attributes.Schema(
_('The manually assigned alternative public IPv4 address '
'of the server.'),
type=attributes.Schema.STRING
),
ACCESSIPV6: attributes.Schema(
_('The manually assigned alternative public IPv6 address '
'of the server.'),
type=attributes.Schema.STRING
),
CONSOLE_URLS: attributes.Schema(
_("URLs of server's consoles. "
"To get a specific console type, the requested type "
"can be specified as parameter to the get_attr function, "
"e.g. get_attr: [ <server>, console_urls, novnc ]. "
"Currently supported types are "
"novnc, xvpvnc, spice-html5, rdp-html5, serial."),
support_status=support.SupportStatus(version='2015.1'),
type=attributes.Schema.MAP
),
}
# Server host name limit to 53 characters by due to typical default
# linux HOST_NAME_MAX of 64, minus the .novalocal appended to the name
physical_resource_name_limit = 53
default_client_name = 'nova'
entity = 'servers'
def translation_rules(self):
return [properties.TranslationRule(
self.properties,
properties.TranslationRule.REPLACE,
source_path=[self.NETWORKS, self.NETWORK_ID],
value_name=self.NETWORK_UUID)]
def __init__(self, name, json_snippet, stack):
super(Server, self).__init__(name, json_snippet, stack)
if self.user_data_software_config():
self._register_access_key()
def _server_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def _config_drive(self):
# This method is overridden by the derived CloudServer resource
return self.properties[self.CONFIG_DRIVE]
def _populate_deployments_metadata(self, meta):
meta['deployments'] = meta.get('deployments', [])
if self.transport_poll_server_heat():
meta['os-collect-config'] = {'heat': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'stack_id': self.stack.identifier().stack_path(),
'resource_name': self.name}
}
if self.transport_zaqar_message():
queue_id = self.physical_resource_name()
self.data_set('metadata_queue_id', queue_id)
zaqar_plugin = self.client_plugin('zaqar')
zaqar = zaqar_plugin.create_for_tenant(
self.stack.stack_user_project_id)
queue = zaqar.queue(queue_id)
queue.post({'body': meta, 'ttl': zaqar_plugin.DEFAULT_TTL})
meta['os-collect-config'] = {'zaqar': {
'user_id': self._get_user_id(),
'password': self.password,
'auth_url': self.context.auth_url,
'project_id': self.stack.stack_user_project_id,
'queue_id': queue_id}
}
elif self.transport_poll_server_cfn():
meta['os-collect-config'] = {'cfn': {
'metadata_url': '%s/v1/' % cfg.CONF.heat_metadata_server_url,
'access_key_id': self.access_key,
'secret_access_key': self.secret_key,
'stack_name': self.stack.name,
'path': '%s.Metadata' % self.name}
}
elif self.transport_poll_temp_url():
container = self.physical_resource_name()
object_name = str(uuid.uuid4())
self.client('swift').put_container(container)
url = self.client_plugin('swift').get_temp_url(
container, object_name, method='GET')
put_url = self.client_plugin('swift').get_temp_url(
container, object_name)
self.data_set('metadata_put_url', put_url)
self.data_set('metadata_object_name', object_name)
meta['os-collect-config'] = {'request': {
'metadata_url': url}
}
self.client('swift').put_object(
container, object_name, jsonutils.dumps(meta))
self.metadata_set(meta)
def _register_access_key(self):
'''
Access is limited to this resource, which created the keypair
'''
def access_allowed(resource_name):
return resource_name == self.name
if self.transport_poll_server_cfn():
self.stack.register_access_allowed_handler(
self.access_key, access_allowed)
elif self.transport_poll_server_heat():
self.stack.register_access_allowed_handler(
self._get_user_id(), access_allowed)
def _create_transport_credentials(self):
if self.transport_poll_server_cfn():
self._create_user()
self._create_keypair()
elif (self.transport_poll_server_heat() or
self.transport_zaqar_message()):
self.password = uuid.uuid4().hex
self._create_user()
self._register_access_key()
@property
def access_key(self):
return self.data().get('access_key')
@property
def secret_key(self):
return self.data().get('secret_key')
@property
def password(self):
return self.data().get('password')
@password.setter
def password(self, password):
if password is None:
self.data_delete('password')
else:
self.data_set('password', password, True)
def user_data_raw(self):
return self.properties[self.USER_DATA_FORMAT] == self.RAW
def user_data_software_config(self):
return self.properties[
self.USER_DATA_FORMAT] == self.SOFTWARE_CONFIG
def transport_poll_server_cfn(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_CFN
def transport_poll_server_heat(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_SERVER_HEAT
def transport_poll_temp_url(self):
return self.properties[
self.SOFTWARE_CONFIG_TRANSPORT] == self.POLL_TEMP_URL
def transport_zaqar_message(self):
return self.properties.get(
self.SOFTWARE_CONFIG_TRANSPORT) == self.ZAQAR_MESSAGE
def get_software_config(self, ud_content):
try:
sc = self.rpc_client().show_software_config(
self.context, ud_content)
return sc[rpc_api.SOFTWARE_CONFIG_CONFIG]
except Exception as ex:
self.rpc_client().ignore_error_named(ex, 'NotFound')
return ud_content
def handle_create(self):
security_groups = self.properties[self.SECURITY_GROUPS]
user_data_format = self.properties[self.USER_DATA_FORMAT]
ud_content = self.properties[self.USER_DATA]
if self.user_data_software_config() or self.user_data_raw():
if uuidutils.is_uuid_like(ud_content):
# attempt to load the userdata from software config
ud_content = self.get_software_config(ud_content)
metadata = self.metadata_get(True) or {}
if self.user_data_software_config():
self._create_transport_credentials()
self._populate_deployments_metadata(metadata)
userdata = self.client_plugin().build_userdata(
metadata,
ud_content,
instance_user=None,
user_data_format=user_data_format)
flavor = self.properties[self.FLAVOR]
availability_zone = self.properties[self.AVAILABILITY_ZONE]
image = self.properties[self.IMAGE]
if image:
image = self.client_plugin('glance').get_image_id(image)
flavor_id = self.client_plugin().get_flavor_id(flavor)
instance_meta = self.properties[self.METADATA]
if instance_meta is not None:
instance_meta = self.client_plugin().meta_serialize(
instance_meta)
scheduler_hints = self.properties[self.SCHEDULER_HINTS]
if cfg.CONF.stack_scheduler_hints:
if scheduler_hints is None:
scheduler_hints = {}
scheduler_hints['heat_root_stack_id'] = self.stack.root_stack_id()
scheduler_hints['heat_stack_id'] = self.stack.id
scheduler_hints['heat_stack_name'] = self.stack.name
scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack()
scheduler_hints['heat_resource_name'] = self.name
nics = self._build_nics(self.properties[self.NETWORKS])
block_device_mapping = self._build_block_device_mapping(
self.properties[self.BLOCK_DEVICE_MAPPING])
block_device_mapping_v2 = self._build_block_device_mapping_v2(
self.properties[self.BLOCK_DEVICE_MAPPING_V2])
reservation_id = self.properties[self.RESERVATION_ID]
disk_config = self.properties[self.DISK_CONFIG]
admin_pass = self.properties[self.ADMIN_PASS] or None
personality_files = self.properties[self.PERSONALITY]
key_name = self.properties[self.KEY_NAME]
server = None
try:
server = self.client().servers.create(
name=self._server_name(),
image=image,
flavor=flavor_id,
key_name=key_name,
security_groups=security_groups,
userdata=userdata,
meta=instance_meta,
scheduler_hints=scheduler_hints,
nics=nics,
availability_zone=availability_zone,
block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
reservation_id=reservation_id,
config_drive=self._config_drive(),
disk_config=disk_config,
files=personality_files,
admin_pass=admin_pass)
finally:
# Avoid a race condition where the thread could be canceled
# before the ID is stored
if server is not None:
self.resource_id_set(server.id)
return server.id
def check_create_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_check(self):
server = self.client().servers.get(self.resource_id)
status = self.client_plugin().get_status(server)
checks = [{'attr': 'status', 'expected': 'ACTIVE', 'current': status}]
self._verify_check_conditions(checks)
@classmethod
def _build_block_device_mapping(cls, bdm):
if not bdm:
return None
bdm_dict = {}
for mapping in bdm:
mapping_parts = []
snapshot_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if snapshot_id:
mapping_parts.append(snapshot_id)
mapping_parts.append('snap')
else:
volume_id = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID)
mapping_parts.append(volume_id)
mapping_parts.append('')
volume_size = mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE)
delete = mapping.get(cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
if volume_size:
mapping_parts.append(str(volume_size))
else:
mapping_parts.append('')
if delete:
mapping_parts.append(str(delete))
device_name = mapping.get(cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME)
bdm_dict[device_name] = ':'.join(mapping_parts)
return bdm_dict
@classmethod
def _build_block_device_mapping_v2(cls, bdm_v2):
if not bdm_v2:
return None
bdm_v2_list = []
for mapping in bdm_v2:
bmd_dict = None
if mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_VOLUME_ID),
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID),
'source_type': 'snapshot',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID):
bmd_dict = {
'uuid': mapping.get(cls.BLOCK_DEVICE_MAPPING_IMAGE_ID),
'source_type': 'image',
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': False,
}
elif mapping.get(cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE):
bmd_dict = {
'source_type': 'blank',
'destination_type': 'local',
'boot_index': -1,
'delete_on_termination': True,
'guest_format': 'swap',
'volume_size': mapping.get(
cls.BLOCK_DEVICE_MAPPING_SWAP_SIZE),
}
update_props = (cls.BLOCK_DEVICE_MAPPING_DEVICE_NAME,
cls.BLOCK_DEVICE_MAPPING_DEVICE_TYPE,
cls.BLOCK_DEVICE_MAPPING_DISK_BUS,
cls.BLOCK_DEVICE_MAPPING_BOOT_INDEX,
cls.BLOCK_DEVICE_MAPPING_VOLUME_SIZE,
cls.BLOCK_DEVICE_MAPPING_DELETE_ON_TERM)
for update_prop in update_props:
if mapping.get(update_prop) is not None:
bmd_dict[update_prop] = mapping.get(update_prop)
if bmd_dict:
bdm_v2_list.append(bmd_dict)
return bdm_v2_list
def _build_nics(self, networks):
if not networks:
return None
nics = []
for net_data in networks:
nic_info = {}
net_identifier = (net_data.get(self.NETWORK_UUID) or
net_data.get(self.NETWORK_ID))
if net_identifier:
if self.is_using_neutron():
net_id = (self.client_plugin(
'neutron').resolve_network(
net_data, self.NETWORK_ID, self.NETWORK_UUID))
else:
net_id = (self.client_plugin(
'nova').get_nova_network_id(net_identifier))
nic_info['net-id'] = net_id
if net_data.get(self.NETWORK_FIXED_IP):
ip = net_data[self.NETWORK_FIXED_IP]
if netutils.is_valid_ipv6(ip):
nic_info['v6-fixed-ip'] = ip
else:
nic_info['v4-fixed-ip'] = ip
if net_data.get(self.NETWORK_PORT):
nic_info['port-id'] = net_data[self.NETWORK_PORT]
nics.append(nic_info)
return nics
def _add_port_for_address(self, server):
"""Method adds port id to list of addresses.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(server.addresses)
ifaces = server.interface_list()
ip_mac_mapping_on_port_id = dict(((iface.fixed_ips[0]['ip_address'],
iface.mac_addr), iface.port_id)
for iface in ifaces)
for net_name in nets:
for addr in nets[net_name]:
addr['port'] = ip_mac_mapping_on_port_id.get(
(addr['addr'], addr['OS-EXT-IPS-MAC:mac_addr']))
return self._extend_networks(nets)
def _extend_networks(self, networks):
"""Method adds same networks with replaced name on network id.
This method is used only for resolving attributes.
"""
nets = copy.deepcopy(networks)
for key in list(nets.keys()):
try:
net_id = self.client_plugin().get_net_id_by_label(key)
except (exception.NovaNetworkNotFound,
exception.PhysicalResourceNameAmbiguity):
net_id = None
if net_id:
nets[net_id] = nets[key]
return nets
def _resolve_attribute(self, name):
if name == self.FIRST_ADDRESS:
return self.client_plugin().server_to_ipaddress(
self.resource_id) or ''
if name == self.NAME_ATTR:
return self._server_name()
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return ''
if name == self.ADDRESSES:
return self._add_port_for_address(server)
if name == self.NETWORKS_ATTR:
return self._extend_networks(server.networks)
if name == self.INSTANCE_NAME:
return getattr(server, 'OS-EXT-SRV-ATTR:instance_name', None)
if name == self.ACCESSIPV4:
return server.accessIPv4
if name == self.ACCESSIPV6:
return server.accessIPv6
if name == self.CONSOLE_URLS:
return self.client_plugin('nova').get_console_urls(server)
def add_dependencies(self, deps):
super(Server, self).add_dependencies(deps)
# Depend on any Subnet in this template with the same
# network_id as the networks attached to this server.
# It is not known which subnet a server might be assigned
# to so all subnets in a network should be created before
# the servers in that network.
nets = self.properties[self.NETWORKS]
if not nets:
return
for res in six.itervalues(self.stack):
if res.has_interface('OS::Neutron::Subnet'):
subnet_net = (res.properties.get(subnet.Subnet.NETWORK_ID)
or res.properties.get(subnet.Subnet.NETWORK))
for net in nets:
# worry about network_id because that could be the match
# assigned to the subnet as well and could have been
# created by this stack. Regardless, the server should
# still wait on the subnet.
net_id = (net.get(self.NETWORK_ID) or
net.get(self.NETWORK_UUID))
if net_id and net_id == subnet_net:
deps += (self, res)
break
def _get_network_matches(self, old_networks, new_networks):
# make new_networks similar on old_networks
for new_net in new_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if new_net.get(key) is '', convert to None
if not new_net.get(key):
new_net[key] = None
for old_net in old_networks:
for key in ('port', 'network', 'fixed_ip', 'uuid'):
# if old_net.get(key) is '', convert to None
if not old_net.get(key):
old_net[key] = None
# find matches and remove them from old and new networks
not_updated_networks = []
for net in old_networks:
if net in new_networks:
new_networks.remove(net)
not_updated_networks.append(net)
for net in not_updated_networks:
old_networks.remove(net)
return not_updated_networks
def _get_network_id(self, net):
net_id = None
if net.get(self.NETWORK_ID):
if self.is_using_neutron():
net_id = self.client_plugin(
'neutron').resolve_network(
net,
self.NETWORK_ID, self.NETWORK_UUID)
else:
net_id = self.client_plugin(
'nova').get_nova_network_id(net.get(self.NETWORK_ID))
return net_id
def update_networks_matching_iface_port(self, nets, interfaces):
def find_equal(port, net_id, ip, nets):
for net in nets:
if (net.get('port') == port or
(net.get('fixed_ip') == ip and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id))):
return net
def find_poor_net(net_id, nets):
for net in nets:
if (not net.get('port') and not net.get('fixed_ip') and
(self._get_network_id(net) == net_id or
net.get('uuid') == net_id)):
return net
for iface in interfaces:
# get interface properties
props = {'port': iface.port_id,
'net_id': iface.net_id,
'ip': iface.fixed_ips[0]['ip_address'],
'nets': nets}
# try to match by port or network_id with fixed_ip
net = find_equal(**props)
if net is not None:
net['port'] = props['port']
continue
# find poor net that has only network_id
net = find_poor_net(props['net_id'], nets)
if net is not None:
net['port'] = props['port']
def _update_flavor(self, prop_diff):
flavor_update_policy = (
prop_diff.get(self.FLAVOR_UPDATE_POLICY) or
self.properties[self.FLAVOR_UPDATE_POLICY])
flavor = prop_diff[self.FLAVOR]
if flavor_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
flavor_id = self.client_plugin().get_flavor_id(flavor)
handler_args = {'args': (flavor_id,)}
checker_args = {'args': (flavor_id, flavor)}
prg_resize = progress.ServerUpdateProgress(self.resource_id,
'resize',
handler_extra=handler_args,
checker_extra=checker_args)
prg_verify = progress.ServerUpdateProgress(self.resource_id,
'verify_resize')
return prg_resize, prg_verify
def _update_image(self, prop_diff):
image_update_policy = (
prop_diff.get(self.IMAGE_UPDATE_POLICY) or
self.properties[self.IMAGE_UPDATE_POLICY])
if image_update_policy == 'REPLACE':
raise resource.UpdateReplace(self.name)
image = prop_diff[self.IMAGE]
image_id = self.client_plugin('glance').get_image_id(image)
preserve_ephemeral = (
image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL')
password = (prop_diff.get(self.ADMIN_PASS) or
self.properties[self.ADMIN_PASS])
kwargs = {'password': password,
'preserve_ephemeral': preserve_ephemeral}
prg = progress.ServerUpdateProgress(self.resource_id,
'rebuild',
handler_extra={'args': (image_id,),
'kwargs': kwargs})
return prg
def _update_networks(self, server, prop_diff):
updaters = []
new_networks = prop_diff.get(self.NETWORKS)
attach_first_free_port = False
if not new_networks:
new_networks = []
attach_first_free_port = True
old_networks = self.properties[self.NETWORKS]
if not server:
server = self.client().servers.get(self.resource_id)
interfaces = server.interface_list()
# if old networks is None, it means that the server got first
# free port. so we should detach this interface.
if old_networks is None:
for iface in interfaces:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args': (iface.port_id,)})
)
# if we have any information in networks field, we should:
# 1. find similar networks, if they exist
# 2. remove these networks from new_networks and old_networks
# lists
# 3. detach unmatched networks, which were present in old_networks
# 4. attach unmatched networks, which were present in new_networks
else:
# remove not updated networks from old and new networks lists,
# also get list these networks
not_updated_networks = self._get_network_matches(
old_networks, new_networks)
self.update_networks_matching_iface_port(
old_networks + not_updated_networks, interfaces)
# according to nova interface-detach command detached port
# will be deleted
for net in old_networks:
if net.get(self.NETWORK_PORT):
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_detach',
complete=True,
handler_extra={'args':
(net.get(self.NETWORK_PORT),)})
)
handler_kwargs = {'port_id': None, 'net_id': None, 'fip': None}
# attach section similar for both variants that
# were mentioned above
for net in new_networks:
if net.get(self.NETWORK_PORT):
handler_kwargs['port_id'] = net.get(self.NETWORK_PORT)
elif net.get(self.NETWORK_ID):
handler_kwargs['net_id'] = self._get_network_id(net)
handler_kwargs['fip'] = net.get('fixed_ip')
elif net.get(self.NETWORK_UUID):
handler_kwargs['net_id'] = net['uuid']
handler_kwargs['fip'] = net.get('fixed_ip')
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True,
handler_extra={'kwargs': handler_kwargs})
)
# if new_networks is None, we should attach first free port,
# according to similar behavior during instance creation
if attach_first_free_port:
updaters.append(
progress.ServerUpdateProgress(
self.resource_id, 'interface_attach',
complete=True)
)
return updaters
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if 'Metadata' in tmpl_diff:
self.metadata_set(tmpl_diff['Metadata'])
updaters = []
server = None
if self.METADATA in prop_diff:
server = self.client().servers.get(self.resource_id)
self.client_plugin().meta_update(server,
prop_diff[self.METADATA])
if self.FLAVOR in prop_diff:
updaters.extend(self._update_flavor(prop_diff))
if self.IMAGE in prop_diff:
updaters.append(self._update_image(prop_diff))
elif self.ADMIN_PASS in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
server.change_password(prop_diff[self.ADMIN_PASS])
if self.NAME in prop_diff:
if not server:
server = self.client().servers.get(self.resource_id)
self.client_plugin().rename(server, prop_diff[self.NAME])
if self.NETWORKS in prop_diff:
updaters.extend(self._update_networks(server, prop_diff))
# NOTE(pas-ha) optimization is possible (starting first task
# right away), but we'd rather not, as this method already might
# have called several APIs
return updaters
def check_update_complete(self, updaters):
'''Push all updaters to completion in list order.'''
for prg in updaters:
if not prg.called:
handler = getattr(self.client_plugin(), prg.handler)
prg.called = handler(*prg.handler_args,
**prg.handler_kwargs)
return False
if not prg.complete:
check_complete = getattr(self.client_plugin(), prg.checker)
prg.complete = check_complete(*prg.checker_args,
**prg.checker_kwargs)
break
return all(prg.complete for prg in updaters)
def metadata_update(self, new_metadata=None):
'''
Refresh the metadata if new_metadata is None
'''
if new_metadata is None:
# Re-resolve the template metadata and merge it with the
# current resource metadata. This is necessary because the
# attributes referenced in the template metadata may change
# and the resource itself adds keys to the metadata which
# are not specified in the template (e.g the deployments data)
meta = self.metadata_get(refresh=True) or {}
tmpl_meta = self.t.metadata()
meta.update(tmpl_meta)
self.metadata_set(meta)
@staticmethod
def _check_maximum(count, maximum, msg):
'''
Check a count against a maximum, unless maximum is -1 which indicates
that there is no limit
'''
if maximum != -1 and count > maximum:
raise exception.StackValidationFailed(message=msg)
def _validate_block_device_mapping(self):
# either volume_id or snapshot_id needs to be specified, but not both
# for block device mapping.
bdm = self.properties[self.BLOCK_DEVICE_MAPPING] or []
bootable_vol = False
for mapping in bdm:
device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME]
if device_name == 'vda':
bootable_vol = True
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is not None and snapshot_id is not None:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
if volume_id is None and snapshot_id is None:
msg = _('Either volume_id or snapshot_id must be specified for'
' device mapping %s') % device_name
raise exception.StackValidationFailed(message=msg)
bdm_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] or []
if bdm and bdm_v2:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING, self.BLOCK_DEVICE_MAPPING_V2)
for mapping in bdm_v2:
volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID)
snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID)
image_id = mapping.get(self.BLOCK_DEVICE_MAPPING_IMAGE_ID)
swap_size = mapping.get(self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
property_tuple = (volume_id, snapshot_id, image_id, swap_size)
if property_tuple.count(None) < 3:
raise exception.ResourcePropertyConflict(
self.BLOCK_DEVICE_MAPPING_VOLUME_ID,
self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID,
self.BLOCK_DEVICE_MAPPING_IMAGE_ID,
self.BLOCK_DEVICE_MAPPING_SWAP_SIZE)
if property_tuple.count(None) == 4:
msg = _('Either volume_id, snapshot_id, image_id or '
'swap_size must be specified.')
raise exception.StackValidationFailed(message=msg)
if any((volume_id, snapshot_id, image_id)):
bootable_vol = True
return bootable_vol
def _validate_network(self, network):
if (network.get(self.NETWORK_ID) is None
and network.get(self.NETWORK_PORT) is None
and network.get(self.NETWORK_UUID) is None):
msg = _('One of the properties "%(id)s", "%(port_id)s", '
'"%(uuid)s" should be set for the '
'specified network of server "%(server)s".'
'') % dict(id=self.NETWORK_ID,
port_id=self.NETWORK_PORT,
uuid=self.NETWORK_UUID,
server=self.name)
raise exception.StackValidationFailed(message=msg)
if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID):
msg = _('Properties "%(uuid)s" and "%(id)s" are both set '
'to the network "%(network)s" for the server '
'"%(server)s". The "%(uuid)s" property is deprecated. '
'Use only "%(id)s" property.'
'') % dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name)
raise exception.StackValidationFailed(message=msg)
elif network.get(self.NETWORK_UUID):
LOG.info(_LI('For the server "%(server)s" the "%(uuid)s" '
'property is set to network "%(network)s". '
'"%(uuid)s" property is deprecated. Use '
'"%(id)s" property instead.'),
dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID,
network=network[self.NETWORK_ID],
server=self.name))
def validate(self):
'''
Validate any of the provided params
'''
super(Server, self).validate()
bootable_vol = self._validate_block_device_mapping()
# make sure the image exists if specified.
image = self.properties[self.IMAGE]
if not image and not bootable_vol:
msg = _('Neither image nor bootable volume is specified for'
' instance %s') % self.name
raise exception.StackValidationFailed(message=msg)
# network properties 'uuid' and 'network' shouldn't be used
# both at once for all networks
networks = self.properties[self.NETWORKS] or []
# record if any networks include explicit ports
networks_with_port = False
for network in networks:
networks_with_port = (networks_with_port or
network.get(self.NETWORK_PORT))
self._validate_network(network)
# retrieve provider's absolute limits if it will be needed
metadata = self.properties[self.METADATA]
personality = self.properties[self.PERSONALITY]
if metadata is not None or personality:
limits = self.client_plugin().absolute_limits()
# if 'security_groups' present for the server and explict 'port'
# in one or more entries in 'networks', raise validation error
if networks_with_port and self.properties[self.SECURITY_GROUPS]:
raise exception.ResourcePropertyConflict(
self.SECURITY_GROUPS,
"/".join([self.NETWORKS, self.NETWORK_PORT]))
# verify that the number of metadata entries is not greater
# than the maximum number allowed in the provider's absolute
# limits
if metadata is not None:
msg = _('Instance metadata must not contain greater than %s '
'entries. This is the maximum number allowed by your '
'service provider') % limits['maxServerMeta']
self._check_maximum(len(metadata),
limits['maxServerMeta'], msg)
# verify the number of personality files and the size of each
# personality file against the provider's absolute limits
if personality:
msg = _("The personality property may not contain "
"greater than %s entries.") % limits['maxPersonality']
self._check_maximum(len(personality),
limits['maxPersonality'], msg)
for path, contents in personality.items():
msg = (_("The contents of personality file \"%(path)s\" "
"is larger than the maximum allowed personality "
"file size (%(max_size)s bytes).") %
{'path': path,
'max_size': limits['maxPersonalitySize']})
self._check_maximum(len(bytes(contents.encode('utf-8'))),
limits['maxPersonalitySize'], msg)
def _delete_temp_url(self):
object_name = self.data().get('metadata_object_name')
if not object_name:
return
try:
container = self.physical_resource_name()
swift = self.client('swift')
swift.delete_object(container, object_name)
headers = swift.head_container(container)
if int(headers['x-container-object-count']) == 0:
swift.delete_container(container)
except Exception as ex:
self.client_plugin('swift').ignore_not_found(ex)
def _delete_queue(self):
queue_id = self.data().get('metadata_queue_id')
if not queue_id:
return
client_plugin = self.client_plugin('zaqar')
zaqar = client_plugin.create_for_tenant(
self.stack.stack_user_project_id)
try:
zaqar.queue(queue_id).delete()
except Exception as ex:
client_plugin.ignore_not_found(ex)
self.data_delete('metadata_queue_id')
def handle_snapshot_delete(self, state):
if state[0] != self.FAILED:
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
return progress.ServerDeleteProgress(
self.resource_id, image_id, False)
return self.handle_delete()
def handle_delete(self):
if self.resource_id is None:
return
if self.user_data_software_config():
self._delete_user()
self._delete_temp_url()
self._delete_queue()
try:
self.client().servers.delete(self.resource_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
return
return progress.ServerDeleteProgress(self.resource_id)
def check_delete_complete(self, prg):
if not prg:
return True
if not prg.image_complete:
image = self.client().images.get(prg.image_id)
if image.status in ('DELETED', 'ERROR'):
raise exception.Error(image.status)
elif image.status == 'ACTIVE':
prg.image_complete = True
if not self.handle_delete():
return True
return False
return self.client_plugin().check_delete_server_complete(
prg.server_id)
def handle_suspend(self):
'''
Suspend a server - note we do not wait for the SUSPENDED state,
this is polled for by check_suspend_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot suspend %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been suspended successful,
# no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id)
server.suspend()
return server.id
def check_suspend_complete(self, server_id):
cp = self.client_plugin()
server = cp.fetch_server(server_id)
if not server:
return False
status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
% {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED'
else:
exc = resource.ResourceUnknownStatus(
result=_('Suspend of server %s failed') % server.name,
resource_status=status)
raise exc
def handle_resume(self):
'''
Resume a server - note we do not wait for the ACTIVE state,
this is polled for by check_resume_complete in a similar way to the
create logic so we can take advantage of coroutines
'''
if self.resource_id is None:
raise exception.Error(_('Cannot resume %s, resource_id not set') %
self.name)
try:
server = self.client().servers.get(self.resource_id)
except Exception as e:
if self.client_plugin().is_not_found(e):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
raise
else:
# if the server has been resumed successful,
# no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id)
server.resume()
return server.id
def check_resume_complete(self, server_id):
return self.client_plugin()._check_active(server_id)
def handle_snapshot(self):
image_id = self.client().servers.create_image(
self.resource_id, self.physical_resource_name())
self.data_set('snapshot_image_id', image_id)
return image_id
def check_snapshot_complete(self, image_id):
image = self.client().images.get(image_id)
if image.status == 'ACTIVE':
return True
elif image.status == 'ERROR' or image.status == 'DELETED':
raise exception.Error(image.status)
return False
def handle_delete_snapshot(self, snapshot):
image_id = snapshot['resource_data'].get('snapshot_image_id')
try:
self.client().images.delete(image_id)
except Exception as e:
self.client_plugin().ignore_not_found(e)
def handle_restore(self, defn, restore_data):
image_id = restore_data['resource_data']['snapshot_image_id']
props = function.resolve(self.properties.data)
props[self.IMAGE] = image_id
return defn.freeze(properties=props)
def resource_mapping():
return {
'OS::Nova::Server': Server,
}
|
from __future__ import unicode_literals
import json
from copy import deepcopy
import pytest
import sure # noqa
import moto.server as server
from moto import mock_eks
from moto.core import ACCOUNT_ID
from moto.eks.exceptions import ResourceInUseException, ResourceNotFoundException
from moto.eks.models import (
CLUSTER_EXISTS_MSG,
CLUSTER_IN_USE_MSG,
CLUSTER_NOT_FOUND_MSG,
NODEGROUP_EXISTS_MSG,
NODEGROUP_NOT_FOUND_MSG,
)
from moto.eks.responses import DEFAULT_MAX_RESULTS, DEFAULT_NEXT_TOKEN
from tests.test_eks.test_eks import all_arn_values_should_be_valid
from tests.test_eks.test_eks_constants import (
AddonAttributes,
ClusterAttributes,
DEFAULT_ENCODING,
DEFAULT_HTTP_HEADERS,
DEFAULT_REGION,
Endpoints,
FargateAttributes,
HttpHeaders,
NodegroupAttributes,
NODEROLE_ARN_KEY,
NODEROLE_ARN_VALUE,
PARTITIONS,
RegExTemplates,
ResponseAttributes,
ROLE_ARN_KEY,
ROLE_ARN_VALUE,
SERVICE,
StatusCodes,
SUBNETS_KEY,
SUBNETS_VALUE,
)
"""
Test the different server responses
"""
NAME_LIST = ["foo", "bar", "baz", "qux"]
class TestCluster:
cluster_name = "example_cluster"
data = {ClusterAttributes.NAME: cluster_name, ROLE_ARN_KEY: ROLE_ARN_VALUE}
endpoint = Endpoints.CREATE_CLUSTER
expected_arn_values = [
PARTITIONS,
DEFAULT_REGION,
ACCOUNT_ID,
cluster_name,
]
class TestNodegroup:
cluster_name = TestCluster.cluster_name
nodegroup_name = "example_nodegroup"
data = {
ClusterAttributes.CLUSTER_NAME: cluster_name,
NodegroupAttributes.NODEGROUP_NAME: nodegroup_name,
NODEROLE_ARN_KEY: NODEROLE_ARN_VALUE,
SUBNETS_KEY: SUBNETS_VALUE,
}
endpoint = Endpoints.CREATE_NODEGROUP.format(clusterName=cluster_name)
expected_arn_values = [
PARTITIONS,
DEFAULT_REGION,
ACCOUNT_ID,
cluster_name,
nodegroup_name,
None,
]
@pytest.fixture(autouse=True)
def test_client():
backend = server.create_backend_app(SERVICE)
yield backend.test_client()
@pytest.fixture(scope="function")
def create_cluster(test_client):
def create_and_verify_cluster(client, name):
"""Creates one valid cluster and verifies return status code 200."""
data = deepcopy(TestCluster.data)
data.update(name=name)
response = client.post(
TestCluster.endpoint, data=json.dumps(data), headers=DEFAULT_HTTP_HEADERS,
)
response.status_code.should.equal(StatusCodes.OK)
return json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.CLUSTER
]
def _execute(name=TestCluster.cluster_name):
return create_and_verify_cluster(test_client, name=name)
yield _execute
@pytest.fixture(scope="function", autouse=True)
def create_nodegroup(test_client):
def create_and_verify_nodegroup(client, name):
"""Creates one valid nodegroup and verifies return status code 200."""
data = deepcopy(TestNodegroup.data)
data.update(nodegroupName=name)
response = client.post(
TestNodegroup.endpoint, data=json.dumps(data), headers=DEFAULT_HTTP_HEADERS,
)
response.status_code.should.equal(StatusCodes.OK)
return json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.NODEGROUP
]
def _execute(name=TestNodegroup.nodegroup_name):
return create_and_verify_nodegroup(test_client, name=name)
yield _execute
@mock_eks
def test_eks_create_single_cluster(create_cluster):
result_cluster = create_cluster()
result_cluster[ClusterAttributes.NAME].should.equal(TestCluster.cluster_name)
all_arn_values_should_be_valid(
expected_arn_values=TestCluster.expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=result_cluster[ClusterAttributes.ARN],
)
@mock_eks
def test_eks_create_multiple_clusters_with_same_name(test_client, create_cluster):
create_cluster()
expected_exception = ResourceInUseException
expected_msg = CLUSTER_EXISTS_MSG.format(clusterName=TestCluster.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestCluster.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.post(
TestCluster.endpoint,
data=json.dumps(TestCluster.data),
headers=DEFAULT_HTTP_HEADERS,
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_create_nodegroup_without_cluster(test_client):
expected_exception = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(clusterName=TestCluster.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: None,
NodegroupAttributes.NODEGROUP_NAME: None,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
endpoint = Endpoints.CREATE_NODEGROUP.format(clusterName=TestCluster.cluster_name)
response = test_client.post(
endpoint, data=json.dumps(TestNodegroup.data), headers=DEFAULT_HTTP_HEADERS
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_create_nodegroup_on_existing_cluster(create_cluster, create_nodegroup):
create_cluster()
result_data = create_nodegroup()
result_data[NodegroupAttributes.NODEGROUP_NAME].should.equal(
TestNodegroup.nodegroup_name
)
all_arn_values_should_be_valid(
expected_arn_values=TestNodegroup.expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=result_data[NodegroupAttributes.ARN],
)
@mock_eks
def test_eks_create_multiple_nodegroups_with_same_name(
test_client, create_cluster, create_nodegroup
):
create_cluster()
create_nodegroup()
expected_exception = ResourceInUseException
expected_msg = NODEGROUP_EXISTS_MSG.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestNodegroup.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: TestNodegroup.nodegroup_name,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.post(
TestNodegroup.endpoint,
data=json.dumps(TestNodegroup.data),
headers=DEFAULT_HTTP_HEADERS,
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_list_clusters(test_client, create_cluster):
[create_cluster(name) for name in NAME_LIST]
response = test_client.get(
Endpoints.LIST_CLUSTERS.format(
maxResults=DEFAULT_MAX_RESULTS, nextToken=DEFAULT_NEXT_TOKEN
)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.CLUSTERS
]
response.status_code.should.equal(StatusCodes.OK)
len(result_data).should.equal(len(NAME_LIST))
sorted(result_data).should.equal(sorted(NAME_LIST))
@mock_eks
def test_eks_list_nodegroups(test_client, create_cluster, create_nodegroup):
create_cluster()
[create_nodegroup(name) for name in NAME_LIST]
response = test_client.get(
Endpoints.LIST_NODEGROUPS.format(
clusterName=TestCluster.cluster_name,
maxResults=DEFAULT_MAX_RESULTS,
nextToken=DEFAULT_NEXT_TOKEN,
)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.NODEGROUPS
]
response.status_code.should.equal(StatusCodes.OK)
sorted(result_data).should.equal(sorted(NAME_LIST))
len(result_data).should.equal(len(NAME_LIST))
@mock_eks
def test_eks_describe_existing_cluster(test_client, create_cluster):
create_cluster()
response = test_client.get(
Endpoints.DESCRIBE_CLUSTER.format(clusterName=TestCluster.cluster_name)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.CLUSTER
]
response.status_code.should.equal(StatusCodes.OK)
result_data[ClusterAttributes.NAME].should.equal(TestCluster.cluster_name)
all_arn_values_should_be_valid(
expected_arn_values=TestCluster.expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=result_data[ClusterAttributes.ARN],
)
@mock_eks
def test_eks_describe_nonexisting_cluster(test_client):
expected_exception = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(clusterName=TestCluster.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: None,
NodegroupAttributes.NODEGROUP_NAME: None,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.get(
Endpoints.DESCRIBE_CLUSTER.format(clusterName=TestCluster.cluster_name)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_describe_existing_nodegroup(test_client, create_cluster, create_nodegroup):
create_cluster()
create_nodegroup()
response = test_client.get(
Endpoints.DESCRIBE_NODEGROUP.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.NODEGROUP
]
response.status_code.should.equal(StatusCodes.OK)
result_data[ClusterAttributes.CLUSTER_NAME].should.equal(TestNodegroup.cluster_name)
result_data[NodegroupAttributes.NODEGROUP_NAME].should.equal(
TestNodegroup.nodegroup_name
)
all_arn_values_should_be_valid(
expected_arn_values=TestNodegroup.expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=result_data[NodegroupAttributes.ARN],
)
@mock_eks
def test_eks_describe_nonexisting_nodegroup(test_client, create_cluster):
create_cluster()
expected_exception = ResourceNotFoundException
expected_msg = NODEGROUP_NOT_FOUND_MSG.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestNodegroup.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: TestNodegroup.nodegroup_name,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.get(
Endpoints.DESCRIBE_NODEGROUP.format(
clusterName=TestCluster.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_describe_nodegroup_nonexisting_cluster(test_client):
expected_exception = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(clusterName=TestNodegroup.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestNodegroup.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: TestNodegroup.nodegroup_name,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.get(
Endpoints.DESCRIBE_NODEGROUP.format(
clusterName=TestCluster.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_delete_cluster(test_client, create_cluster):
create_cluster()
response = test_client.delete(
Endpoints.DELETE_CLUSTER.format(clusterName=TestCluster.cluster_name)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.CLUSTER
]
response.status_code.should.equal(StatusCodes.OK)
result_data[ClusterAttributes.NAME].should.equal(TestCluster.cluster_name)
all_arn_values_should_be_valid(
expected_arn_values=TestCluster.expected_arn_values,
pattern=RegExTemplates.CLUSTER_ARN,
arn_under_test=result_data[ClusterAttributes.ARN],
)
@mock_eks
def test_eks_delete_nonexisting_cluster(test_client):
expected_exception = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(clusterName=TestCluster.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: None,
NodegroupAttributes.NODEGROUP_NAME: None,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.delete(
Endpoints.DELETE_CLUSTER.format(clusterName=TestCluster.cluster_name)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_delete_cluster_with_nodegroups(
test_client, create_cluster, create_nodegroup
):
create_cluster()
create_nodegroup()
expected_exception = ResourceInUseException
expected_msg = CLUSTER_IN_USE_MSG.format(clusterName=TestCluster.cluster_name)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestCluster.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: TestNodegroup.nodegroup_name,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.delete(
Endpoints.DELETE_CLUSTER.format(clusterName=TestCluster.cluster_name)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_delete_nodegroup(test_client, create_cluster, create_nodegroup):
create_cluster()
create_nodegroup()
response = test_client.delete(
Endpoints.DELETE_NODEGROUP.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))[
ResponseAttributes.NODEGROUP
]
response.status_code.should.equal(StatusCodes.OK)
result_data[ClusterAttributes.CLUSTER_NAME].should.equal(TestNodegroup.cluster_name)
result_data[NodegroupAttributes.NODEGROUP_NAME].should.equal(
TestNodegroup.nodegroup_name
)
all_arn_values_should_be_valid(
expected_arn_values=TestNodegroup.expected_arn_values,
pattern=RegExTemplates.NODEGROUP_ARN,
arn_under_test=result_data[NodegroupAttributes.ARN],
)
@mock_eks
def test_eks_delete_nonexisting_nodegroup(test_client, create_cluster):
create_cluster()
expected_exception = ResourceNotFoundException
expected_msg = NODEGROUP_NOT_FOUND_MSG.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
expected_data = {
ClusterAttributes.CLUSTER_NAME: TestNodegroup.cluster_name,
NodegroupAttributes.NODEGROUP_NAME: TestNodegroup.nodegroup_name,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.delete(
Endpoints.DELETE_NODEGROUP.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
should_return_expected_exception(response, expected_exception, expected_data)
@mock_eks
def test_eks_delete_nodegroup_nonexisting_cluster(test_client):
expected_exception = ResourceNotFoundException
expected_msg = CLUSTER_NOT_FOUND_MSG.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
expected_data = {
ClusterAttributes.CLUSTER_NAME: None,
NodegroupAttributes.NODEGROUP_NAME: None,
FargateAttributes.PROFILE_NAME: None,
AddonAttributes.ADDON_NAME: None,
ResponseAttributes.MESSAGE: expected_msg,
}
response = test_client.delete(
Endpoints.DELETE_NODEGROUP.format(
clusterName=TestNodegroup.cluster_name,
nodegroupName=TestNodegroup.nodegroup_name,
)
)
should_return_expected_exception(response, expected_exception, expected_data)
def should_return_expected_exception(response, expected_exception, expected_data):
result_data = json.loads(response.data.decode(DEFAULT_ENCODING))
response.status_code.should.equal(expected_exception.STATUS)
response.headers.get(HttpHeaders.ErrorType).should.equal(expected_exception.TYPE)
result_data.should.equal(expected_data)
|
# Logger bot configuration.
{
# These messages will dissapear after the bot has been run
# Most of these settings can be changed from within the bot
# itself. See the help command.
'active_servers': set(),
'admin_commands': {'help', 'ignore_server', 'listen_on', 'leave', 'join'},
'admin_roles': set(),
'admins': set(),
# Discord user for the bot.
'bot_user': 'logger@example.com',
'bot_password': 'Password for Discord user'
# MySQL database connection.
'db_host': 'localhost',
'db_user': 'logger',
'db_password': 'Password for database user',
'db_schema': 'discord',
'ignores': set(),
# Set of user ids that are masters of bot, and can do any command.
'masters': {'your-user-id-number'},
'noisy_deny': True,
'protected_servers': set(),
# Character used to triggering commands in the bot. Setting it
# to '!', means commands start with an ! character (e.g, !help).
'trigger': '!',
'user_commands': {'help', 'leave', 'join'},
}
|
import numpy as np
from .stats import Pdf, pdf_from_samples, multiply_pdfs, divide_pdfs
"""
Scaling relationships and related equations for earthquake magnitude
calculations.
"""
"""
Normalized slip distribution from Biasi and Weldon, 2006
"""
Dn_x = np.array(
[ 0. , 0.03852144, 0.07704287, 0.11556431, 0.15408574,
0.19260718, 0.23112861, 0.26965005, 0.30817149, 0.34669292,
0.38521436, 0.42373579, 0.46225723, 0.50077866, 0.5393001 ,
0.57782153, 0.61634297, 0.65486441, 0.69338584, 0.73190728,
0.77042871, 0.80895015, 0.84747158, 0.88599302, 0.92451446,
0.96303589, 1.00155733, 1.04007876, 1.0786002 , 1.11712163,
1.15564307, 1.19416451, 1.23268594, 1.27120738, 1.30972881,
1.34825025, 1.38677168, 1.42529312, 1.46381456, 1.50233599,
1.54085743, 1.57937886, 1.6179003 , 1.65642173, 1.69494317,
1.7334646 , 1.77198604, 1.81050748, 1.84902891, 1.88755035,
1.92607178, 1.96459322, 2.00311465, 2.04163609, 2.08015753,
2.11867896, 2.1572004 , 2.19572183, 2.23424327, 2.2727647 ,
2.31128614, 2.34980758, 2.38832901, 2.42685045, 2.46537188,
2.50389332, 2.54241475, 2.58093619, 2.61945762, 2.65797906,
2.6965005 , 2.73502193, 2.77354337, 2.8120648 , 2.85058624,
2.88910767, 2.92762911, 2.96615055, 3.00467198, 3.04319342,
3.08171485, 3.12023629, 3.15875772, 3.19727916, 3.2358006 ,
3.27432203, 3.31284347, 3.3513649 , 3.38988634, 3.42840777,
3.46692921, 3.50545064, 3.54397208, 3.58249352, 3.62101495,
3.65953639, 3.69805782, 3.73657926, 3.77510069, 3.81362213])
Dn_y = np.array(
[ 3.56431234e-01, 4.07514412e-01, 4.49469325e-01, 4.80250978e-01,
4.99600050e-01, 5.08967345e-01, 5.11056831e-01, 5.09135209e-01,
5.06305810e-01, 5.04929021e-01, 5.06305202e-01, 5.10647854e-01,
5.17294850e-01, 5.25056042e-01, 5.32585263e-01, 5.38688051e-01,
5.42518154e-01, 5.43657945e-01, 5.42107125e-01, 5.38215229e-01,
5.32589131e-01, 5.25993774e-01, 5.19250549e-01, 5.13129949e-01,
5.08236899e-01, 5.04898081e-01, 5.03074847e-01, 5.02334004e-01,
5.01903866e-01, 5.00822254e-01, 4.98152675e-01, 4.93216557e-01,
4.85776256e-01, 4.76112653e-01, 4.64970884e-01, 4.53387277e-01,
4.42445033e-01, 4.33023117e-01, 4.25598012e-01, 4.20136711e-01,
4.16092401e-01, 4.12492219e-01, 4.08093894e-01, 4.01583982e-01,
3.91790171e-01, 3.77880214e-01, 3.59519131e-01, 3.36956396e-01,
3.11019404e-01, 2.83002312e-01, 2.54461304e-01, 2.26954105e-01,
2.01783046e-01, 1.79805426e-01, 1.61356306e-01, 1.46292387e-01,
1.34126853e-01, 1.24201482e-01, 1.15842979e-01, 1.08470898e-01,
1.01650879e-01, 9.51051805e-02, 8.86970782e-02, 8.24006991e-02,
7.62618151e-02, 7.03540397e-02, 6.47382510e-02, 5.94357659e-02,
5.44230300e-02, 4.96471997e-02, 4.50527124e-02, 4.06047119e-02,
3.62987575e-02, 3.21550847e-02, 2.82040784e-02, 2.44727150e-02,
2.09786579e-02, 1.77325398e-02, 1.47440829e-02, 1.20266593e-02,
9.59725861e-03, 7.47225770e-03, 5.66159378e-03, 4.16411755e-03,
2.96568107e-03, 2.04006393e-03, 1.35194170e-03, 8.60866657e-04,
5.25372416e-04, 3.06545806e-04, 1.70626053e-04, 9.04155999e-05,
4.55329491e-05, 2.17590136e-05, 9.85449333e-06, 4.22528115e-06,
1.71367970e-06, 6.56980895e-07, 2.37946616e-07, 8.13790788e-08])
Dn = Pdf(Dn_x, Dn_y)
Dn_sb = multiply_pdfs(Dn, Pdf([Dn_x.min(), Dn_x.max()],
[Dn_x.min(), Dn_x.max()]))
"""
Probability distribution for an earthquake breaking the surface given
Gutenberg-Richter prior; to be used as a p(M) prior for paleoseismic magnitudes
from Biasi and Weldon 2006
"""
gr_pm_x = [5.000, 5.001, 5.057, 5.097, 5.192, 5.300, 5.392, 5.499, 5.597,
5.753, 5.922, 6.021, 6.211, 6.353, 6.533, 6.604, 6.771, 6.999,
7.280, 7.507, 7.726, 7.953, 8.182]
gr_pm_y = [0.000, 0.030, 0.050, 0.063, 0.081, 0.089, 0.089, 0.085, 0.079,
0.067, 0.054, 0.047, 0.035, 0.027, 0.020, 0.018, 0.013, 0.008,
0.005, 0.003, 0.002, 9.785e-4, 0.00]
"""
Conversion functions
"""
def _exp_10(x):
return 10**x
log_fn = {'e': np.log,
'10': np.log10}
exp_fn = {'e': np.exp,
'10': _exp_10}
M_from_D_coeffs = {'BW_2006': {'a': 6.94,
'b': 1.14,
'log_base': '10'},
# WC_1994 are for Average Displacement, not max.
'WC_1994_all': {'a': 6.93,
'b': 0.82,
'log_base': '10'},
'WC_1994_SS': {'a': 7.04,
'b': 0.89,
'log_base': '10'},
'WC_1994_R': {'a': 6.64,
'b': 0.13,
'log_base': '10'},
'WC_1994_N': {'a': 6.78,
'b': 0.65,
'log_base': '10'},
}
M_from_L_coeffs = {'Stirling_2002_instr': {'a': 5.45,
'a_err': 0.08,
'b': 0.95,
'b_err': 0.06,
'log_base': '10'},
'Stirling_2002_pre_instr': {'a': 5.89,
'a_err': 0.11,
'b': 0.79,
'b_err': 0.06,
'log_base': '10'},
'WC_1994_all': {'a': 5.08,
'a_err': 0.1,
'b': 1.16,
'b_err': 0.07,
'log_base': '10'},
'WC_1994_SS': {'a': 5.16,
'a_err': 0.13,
'b': 1.12,
'b_err': 0.08,
'log_base': '10'},
'WC_1994_R': {'a': 5.00,
'a_err': 0.22,
'b': 1.22,
'b_err': 0.16,
'log_base': '10'},
'WC_1994_N': {'a': 4.86,
'a_err': 0.34,
'b': 1.32,
'b_err': 0.26,
'log_base': '10'},
}
def M_from_D(D, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as D.
b : Scalar, or vector of same length as D.
base : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of D.
"""
if ref is not None:
# consider warning if ref is not None and a, b, log are inputs
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
else:
pass
return a + b * log_fn[base](D)
def D_from_M(M, ref='BW_2006', a=None, b=None, base='e'):
"""
Moment magnitude from displacement, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'base'.
General relationship is D = base ** ((M - a) / b)
Parameters
----------
M : Scalar or vector values for moment magnitude
ref : string indicating scaling relationship.
'BW_2006' is Biasi and Weldon (2006) (default).
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
a : Scalar, or vector of same length as M.
b : Scalar, or vector of same length as M.
base : String, base for exponent, default 'e'.
'e' is e.
'10' is 10.
Returns
-------
D : Scalar or vector of calculated displacement (in meters),
with shape of M.
"""
if ref is not None:
a = M_from_D_coeffs[ref]['a']
b = M_from_D_coeffs[ref]['b']
base = M_from_D_coeffs[ref]['log_base']
return exp_fn[base]((M - a) / b)
def M_from_L(L, ref='Stirling_2002_instr', unit='km', a=None, b=None, base='e',
a_err=None, b_err=None, mc=False):
"""
Moment magnitude from length, using the specified scaling
(keyword 'ref', or parameters 'a', 'b' and 'log'.
General relationship is M = a + b * log(D).
Parameters
----------
D : Scalar or vector values for displacement (in meters)
ref : string indicating scaling relationship.
'Stirling_2002_instr' is from Stirling et al. 2002, instrumental data.
'WC_1994_all' is Wells and Coppersmith (1994) for all events.
'WC_1994_SS' is Wells and Coppersmith (1994) for strike-slip events.
'WC_1994_R' is Wells and Coppersmith (1994) for reverse events.
'WC_1994_N' is Wells and Coppersmith (1994) for normal events.
`ref=None` will allow you to enter your own coefficients and base.
unit : Unit of length measure. Default is 'km'. 'm' also works.
a : Scalar, or vector of same length as D.
a_err : Standard error of `a`. Scalar.
b : Scalar, or vector of same length as D.
b_err : Standard error of `b`. Scalar.
log : String, base for logarithm, default 'e'.
'e' is natural log.
'10' is log10.
mc : Boolean that indicates whether to sample the coefficents a and b
including uncertainties `a_err` and `b_err` through Monte Carlo
techniques.
Returns
-------
M : Scalar or vector of calculated magnitude, with shape of L.
"""
# unit conversion
if unit == 'm':
L = L * 1000.
if ref is not None:
a = M_from_L_coeffs[ref]['a']
b = M_from_L_coeffs[ref]['b']
base = M_from_L_coeffs[ref]['log_base']
try:
a_err = M_from_L_coeffs[ref]['a_err']
b_err = M_from_L_coeffs[ref]['b_err']
except KeyError:
pass
if mc == True:
A = a if a_err is None else np.random.normal(a, a_err, len(L))
B = b if b_err is None else np.random.normal(b, b_err, len(L))
else:
A = a
B = b
return A + B * log_fn[base](L)
"""
Estimation functions
"""
def p_D_M(D, M, ref='BW_2006', sample_bias_corr=False):
"""
Likelihood of predicted D given M, as defined by Biasi and Weldon (2006).
Parameters
----------
D : Scalar or array of displacement values (in meters).
M : Scalar or array of magnitudes.
ref: Displacement-magnitude scaling reference (string).
'BW_2006' is Biasi and Weldon (2006).
'WC_1994_all' is Wells and Coppersmith (1994).
Returns
-------
p_D_M : Calculated likelihood. If scalar, simply returns the likelihood.
If not, returns an improper pdf (a `culpable.stats.Pdf`) which
is an interpolation class. Actual likelihoods are `p_D_M.y`, and
corresponding magnitudes (i.e. the prior p_M) are `p_D_M.x`.
"""
D_ave = D_from_M(M, ref=ref)
D = np.abs(D)
if sample_bias_corr == True:
Dn_ = Dn_sb
else:
Dn_ = Dn
if np.isscalar(D):
D_score = D / D_ave
p_D_M = Dn_(D_score)
else:
D_score = np.array([d / D_ave for d in D])
p_D_M = Dn_(D_score)
p_D_M = np.mean(p_D_M, axis=0)
if np.isscalar(p_D_M):
p_D_M = np.float(p_D_M)
else:
p_D_M = Pdf(M, p_D_M, normalize=True)
return p_D_M
def _make_p_M_x(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Makes the X values (i.e., the magnitudes) for a p_M distribution.
"""
if n_M is not None:
p_M_x = np.linspace(p_M_min, p_M_max, num=n_M)
else:
if M_step is None:
M_step = 0.1 # in case it's passed as None from another function
p_M_x = np.arange(p_M_min, p_M_max + M_step, M_step)
return p_M_x
def make_p_M_uniform(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a uniform PDF between the minimum and maximum magnitudes given
by p_M_min and p_M_max.
Parameters
----------
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
return Pdf(p_M_x, np.ones(len(p_M_x)) * 1 / len(p_M_x))
def make_p_M_gr_surface_break(p_M_min=5., p_M_max=8.5, M_step=0.1, n_M=None):
"""
Creates a PDF based on a Gutenberg-Richter distribution that is then
modified to account for the decreasing likelihood of surface rupture
with decreasing magnitude (distribution from Biasi and Weldon 2006,
figure 8b.
Returns:
--------
p_M : Pdf class with a modified Gutenberg-Richter distribution.
"""
p_M_x = _make_p_M_x(p_M_min=p_M_min, p_M_max=p_M_max, M_step=M_step,
n_M=n_M)
p_M_gr_sb = Pdf(gr_pm_x, gr_pm_y)
p_M_gr_sb_y = p_M_gr_sb(p_M_x)
return Pdf(p_M_x, p_M_gr_sb_y)
def make_p_M(p_M_type='uniform', p_M_min=None, p_M_max=None, M_step=None,
n_M=None):
"""
Creates the a PDF of magnitudes to use as the prior p(M).
Parameters
----------
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum magnitude.
p_M_max : Maximum magnitude.
M_step : Width of steps in interpolation (no effect on final results).
n_M : number of points in interpolation (no effect on final results).
Returns
-------
p_M : Pdf function with a uniform distribution between p_M_min and p_M_max
"""
if p_M_type == 'uniform':
p_M = make_p_M_uniform(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
elif p_M_type == 'GR_surface_break':
p_M = make_p_M_gr_surface_break(p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
return p_M
def p_M_D(D, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
ref='BW_2006', p_M_type='uniform', sample_bias_corr=False):
"""
Calculates p(M|D), the posterior probability of an earthquake having a
magnitude of M given observed displacement D, based on Biasi and Weldon
2006 (but with optional sample bias correction).
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacements in meters (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
else:
#TODO: maybe add some logic for dealing with non `Pdf` priors
pass
p_D = Pdf(p_M.x, [np.trapz(Dn_y, Dn_x * D_from_M(M, ref=ref))
for M in p_M.x])
p_D_M_ = p_D_M(D, p_M.x, ref=ref, sample_bias_corr=sample_bias_corr)
p_M_D_ = multiply_pdfs(p_M, p_D_M_, step=M_step)
p_M_D_ = divide_pdfs(p_M_D_, p_D, step=M_step)
return p_M_D_
def p_M_L(L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', ref='WC_1994_all', mc=True):
"""
Calculates p(M|L), the posterior probability of an earthquake having a
magnitude of M given observed length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_max : Maximum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
ref : Reference for magnitude-length scaling relationships. See `M_from_L`
for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_L_samples = M_from_L(L, ref=ref, mc=mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
p_M_L_ = multiply_pdfs(p_M, p_M_L_)
return p_M_L_
def p_M_DL(D, L, p_M=None, p_M_min=None, p_M_max=None, M_step=None, n_M=None,
p_M_type='uniform', D_ref='BW_2006', L_ref='WC_1994_all',
L_mc=True, sample_bias_corr=False):
"""
Calculates p(M|D,L), the posterior probability of an earthquake having a
magnitude of M given observed offset/displacement D and rupture length L.
Either a `p_M` Pdf object should be passed, or the additional parameters
necessary to construct one; see `make_p_M`.
Parameters
----------
D : Scalar or vector of displacement in meters (floats).
L : Scalar or vector of lengths in kilometers (floats).
p_M : Prior magnitude distribution p(M), in the Pdf class from
culpable.stats.
p_M_type : Type of prior. Current values are 'uniform' and
'GR_surface_break' (i.e., a Gutenberg-Richter with WC 1994's
correction for the likelihood of events of different sizes
breaking the surface, as reported in BW 2006).
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
p_M_min : Minimum prior magnitude; only needed if `p_M` is not given.
M_step : Spacing for `p_M`; only needed if `p_M` is not given.
n_M : number of points for `p_M`; only needed if `p_M` is not given.
D_ref : Reference for magnitude-displacement scaling relationships. See
`M_from_D` for a list of implemented relationships.
L_ref : Reference for magnitude-length scaling relationships. See
`M_from_L` for a list of implemented relationships.
mc : Boolean that describes whether to propagate the uncertainty (standard
errors) in the scaling relationship to the posterior using a Monte
Carlo simulation.
sample_bias_correction: Boolean indicating whether to correct for
preferential sampling of scarps proportionally
to the offset at a point relative to the min
and max offsets.
Returns
------
p_M_D : Pdf function of the posterior magnitude estimation p(M|D).
"""
if p_M is None:
p_M = make_p_M(p_M_type=p_M_type, p_M_min=p_M_min, p_M_max=p_M_max,
M_step=M_step, n_M=n_M)
p_M_D_ = p_M_D(D, p_M, ref=D_ref, sample_bias_corr=sample_bias_corr)
p_M_L_samples = M_from_L(L, ref=L_ref, mc=L_mc)
p_M_L_ = pdf_from_samples(p_M_L_samples, x_min=p_M.x.min(),
x_max=p_M.x.max())
return multiply_pdfs(p_M_L_, p_M_D_)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def get_options(option_type):
"""
option_type: string
'training' or 'diplay' or 'visualize'
"""
# name
tf.app.flags.DEFINE_string("training_name","tc_v1","name of next training in log")
# Common
tf.app.flags.DEFINE_string("env_type", "gym", "environment type (lab or gym or maze)")
tf.app.flags.DEFINE_string("env_name", "CartPole-v1", "environment name (for lab)")
tf.app.flags.DEFINE_integer("env_max_steps", 400000, "max number of steps in environment")
tf.app.flags.DEFINE_boolean("use_base", False, "whether to use base A3C for aux network")
tf.app.flags.DEFINE_boolean("use_pixel_change", False, "whether to use pixel change")
tf.app.flags.DEFINE_boolean("use_value_replay", False, "whether to use value function replay")
tf.app.flags.DEFINE_boolean("use_reward_prediction", False, "whether to use reward prediction")
tf.app.flags.DEFINE_boolean("use_temporal_coherence", True, "whether to use temporal coherence")
tf.app.flags.DEFINE_boolean("use_proportionality", False, "whether to use proportionality")
tf.app.flags.DEFINE_boolean("use_causality", False, "whether to use causality")
tf.app.flags.DEFINE_boolean("use_repeatability", False, "whether to use repeatability")
tf.app.flags.DEFINE_string("checkpoint_dir", "/tmp/StRADRL/checkpoints", "checkpoint directory")
# For training
if option_type == 'training':
tf.app.flags.DEFINE_string("temp_dir", "/tmp/StRADRL/tensorboard/", "base directory for tensorboard")
tf.app.flags.DEFINE_string("log_dir", "/tmp/StRADRL/log/", "base directory for logs")
tf.app.flags.DEFINE_integer("max_time_step", 10**6, "max time steps")
tf.app.flags.DEFINE_integer("save_interval_step", 10**4, "saving interval steps")
tf.app.flags.DEFINE_boolean("grad_norm_clip", 40.0, "gradient norm clipping")
#base
tf.app.flags.DEFINE_float("initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("gamma", 0.99, "discount factor for rewards")
tf.app.flags.DEFINE_float("entropy_beta", 0.01, "entropy regurarlization constant")
tf.app.flags.DEFINE_float("value_lambda", 0.5, "value ratio for base loss")
tf.app.flags.DEFINE_float("base_lambda", 0.97, "generalized adv. est. lamba for short-long sight")
# auxiliary
tf.app.flags.DEFINE_integer("parallel_size", 1, "parallel thread size")
tf.app.flags.DEFINE_float("aux_initial_learning_rate", 1e-3, "learning rate")
tf.app.flags.DEFINE_float("aux_lambda", 0.0, "generalized adv. est. lamba for short-long sight (aux)")
tf.app.flags.DEFINE_float("gamma_pc", 0.9, "discount factor for pixel control")
tf.app.flags.DEFINE_float("pixel_change_lambda", 0.0001, "pixel change lambda") # 0.05, 0.01 ~ 0.1 for lab, 0.0001 ~ 0.01 for gym
tf.app.flags.DEFINE_float("temporal_coherence_lambda", 1., "temporal coherence lambda")
tf.app.flags.DEFINE_float("proportionality_lambda", 100., "proportionality lambda")
tf.app.flags.DEFINE_float("causality_lambda", 1., "causality lambda")
tf.app.flags.DEFINE_float("repeatability_lambda", 100., "repeatability lambda")
tf.app.flags.DEFINE_integer("experience_history_size", 100000, "experience replay buffer size")
# queuer
tf.app.flags.DEFINE_integer("local_t_max", 20, "repeat step size")
tf.app.flags.DEFINE_integer("queue_length", 5, "max number of batches (of length local_t_max) in queue")
tf.app.flags.DEFINE_integer("env_runner_sync", 1, "number of env episodes before sync to global")
tf.app.flags.DEFINE_float("action_freq", 0, "number of actions per second in env")
# For display
if option_type == 'display':
tf.app.flags.DEFINE_string("frame_save_dir", "/tmp/StRADRL_frames", "frame save directory")
tf.app.flags.DEFINE_boolean("recording", False, "whether to record movie")
tf.app.flags.DEFINE_boolean("frame_saving", False, "whether to save frames")
return tf.app.flags.FLAGS
|
# -*- coding: utf-8 -*-
'''
Random Forest classifier
'''
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from ...core.routes import register
from .base import BaseMl, BaseMlSk
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.lda import LDA
@register('ml.gaussianNB')
class SKGaussianNB(BaseMlSk):
'''
Gaussian Naive Bayes (from sklearn)
'''
init_kwargs = ('class_count_', 'theta_', 'sigma_',
'classifier', 'data', 'action')
run_kwargs = ()
classifier_class = GaussianNB
@register('ml.AdaBoost')
class AdaBoost(BaseMlSk):
'''
AdaBoost (from sklearn)
'''
init_kwargs = ('base_estimator', 'n_estimators',
'learning_rate', 'algorithm', 'random_state',
'classifier', 'data', 'action')
run_kwargs = ()
classifier_class = AdaBoostClassifier
@register('ml.KNeighborsClassifier')
class AdaBoost(BaseMlSk):
'''
AdaBoost (from sklearn)
'''
init_kwargs = ('n_neighbors', 'weights',
'algorithm', 'gamma', 'coef0', 'probability', 'shrinking',
'tol', 'cache_size', 'class_weight', 'max_iter', 'random_state',
'classifier', 'data', 'action')
run_kwargs = ()
classifier_class = KNeighborsClassifier
@register('ml.SVC')
class AdaBoost(BaseMlSk):
'''
AdaBoost (from sklearn)
'''
init_kwargs = ('C', 'kernel',
'degree', 'leaf_size', 'metric', 'p', 'metric_params',
'classifier', 'data', 'action')
run_kwargs = ()
classifier_class = SVC
@register('ml.LDA')
class LDA(BaseMlSk):
'''
LDA (from sklearn)
'''
init_kwargs = ('solver', 'shrinkage', 'priors', 'n_components',
'store_covariance', 'tol')
run_kwargs = ()
classifier_class = LDA
|
# coding: utf-8
# The Hazard Library
# Copyright (C) 2012-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`openquake.hazardlib.calc.hazard_curve` implements
:func:`hazard_curves`.
"""
import sys
import numpy
from openquake.hazardlib.calc import filters
from openquake.hazardlib.imt import from_string
from openquake.hazardlib.gsim.base import deprecated
@deprecated('Use calc_hazard_curves instead')
def hazard_curves(
sources, sites, imts, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter):
"""
Deprecated. It does the same job of
:func:`openquake.hazardlib.calc.hazard_curve.calc_hazard_curves`,
with the only difference that the intensity measure types in input
and output are hazardlib objects instead of simple strings.
"""
imtls = {str(imt): imls for imt, imls in imts.iteritems()}
curves_by_imt = calc_hazard_curves(
sources, sites, imtls, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter)
return {from_string(imt): curves
for imt, curves in curves_by_imt.iteritems()}
def calc_hazard_curves(
sources, sites, imtls, gsims, truncation_level,
source_site_filter=filters.source_site_noop_filter,
rupture_site_filter=filters.rupture_site_noop_filter):
"""
Compute hazard curves on a list of sites, given a set of seismic sources
and a set of ground shaking intensity models (one per tectonic region type
considered in the seismic sources).
Probability of ground motion exceedance is computed using the following
formula ::
P(X≥x|T) = 1 - ∏ ∏ Prup_ij(X<x|T)
where ``P(X≥x|T)`` is the probability that the ground motion parameter
``X`` is exceeding level ``x`` one or more times in a time span ``T``, and
``Prup_ij(X<x|T)`` is the probability that the j-th rupture of the i-th
source is not producing any ground motion exceedance in time span ``T``.
The first product ``∏`` is done over sources, while the second one is done
over ruptures in a source.
The above formula computes the probability of having at least one ground
motion exceedance in a time span as 1 minus the probability that none of
the ruptures in none of the sources is causing a ground motion exceedance
in the same time span. The basic assumption is that seismic sources are
independent, and ruptures in a seismic source are also independent.
:param sources:
An iterator of seismic sources objects (instances of subclasses
of :class:`~openquake.hazardlib.source.base.BaseSeismicSource`).
:param sites:
Instance of :class:`~openquake.hazardlib.site.SiteCollection` object,
representing sites of interest.
:param imtls:
Dictionary mapping intensity measure type strings
to lists of intensity measure levels.
:param gsims:
Dictionary mapping tectonic region types (members
of :class:`openquake.hazardlib.const.TRT`) to
:class:`~openquake.hazardlib.gsim.base.GMPE` or
:class:`~openquake.hazardlib.gsim.base.IPE` objects.
:param truncation_level:
Float, number of standard deviations for truncation of the intensity
distribution.
:param source_site_filter:
Optional source-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:param rupture_site_filter:
Optional rupture-site filter function. See
:mod:`openquake.hazardlib.calc.filters`.
:returns:
Dictionary mapping intensity measure type strings (same keys
as in parameter ``imtls``) to 2d numpy arrays of float, where
first dimension differentiates sites (the order and length
are the same as in ``sites`` parameter) and the second one
differentiates IMLs (the order and length are the same as
corresponding value in ``imts`` dict).
"""
imts = {from_string(imt): imls for imt, imls in imtls.iteritems()}
curves = dict((imt, numpy.ones([len(sites), len(imtls[imt])]))
for imt in imtls)
sources_sites = ((source, sites) for source in sources)
for source, s_sites in source_site_filter(sources_sites):
try:
ruptures_sites = ((rupture, s_sites)
for rupture in source.iter_ruptures())
for rupture, r_sites in rupture_site_filter(ruptures_sites):
gsim = gsims[rupture.tectonic_region_type]
sctx, rctx, dctx = gsim.make_contexts(r_sites, rupture)
for imt in imts:
poes = gsim.get_poes(sctx, rctx, dctx, imt, imts[imt],
truncation_level)
pno = rupture.get_probability_no_exceedance(poes)
curves[str(imt)] *= r_sites.expand(pno, placeholder=1)
except Exception, err:
etype, err, tb = sys.exc_info()
msg = 'An error occurred with source id=%s. Error: %s'
msg %= (source.source_id, err.message)
raise etype, msg, tb
for imt in imtls:
curves[imt] = 1 - curves[imt]
return curves
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network Hosts are responsible for allocating ips and setting up network.
There are multiple backend drivers that handle specific types of networking
topologies. All of the network commands are issued to a subclass of
:class:`NetworkManager`.
**Related Flags**
:network_driver: Driver to use for network creation
:flat_network_bridge: Bridge device for simple network instances
:flat_interface: FlatDhcp will bridge into this interface if set
:flat_network_dns: Dns for simple network
:vlan_start: First VLAN for private networks
:vpn_ip: Public IP for the cloudpipe VPN servers
:vpn_start: First Vpn port for private networks
:cnt_vpn_clients: Number of addresses reserved for vpn clients
:network_size: Number of addresses in each private subnet
:floating_range: Floating IP address block
:fixed_range: Fixed IP address block
:date_dhcp_on_disassociate: Whether to update dhcp when fixed_ip
is disassociated
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
:create_unique_mac_address_attempts: Number of times to attempt creating
a unique mac address
"""
import datetime
import itertools
import math
import netaddr
import socket
from eventlet import greenpool
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova import manager
from nova import quota
from nova import utils
from nova import rpc
from nova.network import api as network_api
from nova.compute import api as compute_api
import random
LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', None,
'Bridge for simple network instances')
flags.DEFINE_string('flat_network_dns', '8.8.4.4',
'Dns for simple network')
flags.DEFINE_bool('flat_injected', False,
'Whether to attempt to inject network setup into guest')
flags.DEFINE_string('flat_interface', None,
'FlatDhcp will bridge into this interface if set')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
flags.DEFINE_string('vlan_interface', None,
'vlans will bridge into this interface if set')
flags.DEFINE_integer('num_networks', 1, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
flags.DEFINE_bool('multi_host', False,
'Default value for multi_host in networks')
flags.DEFINE_integer('network_size', 256,
'Number of addresses in each private subnet')
flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
'Driver to use for network creation')
flags.DEFINE_bool('update_dhcp_on_disassociate', False,
'Whether to update dhcp when fixed_ip is disassociated')
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
flags.DEFINE_integer('create_unique_mac_address_attempts', 5,
'Number of attempts to create unique mac address')
flags.DEFINE_bool('auto_assign_floating_ip', False,
'Autoassigning floating ip to VM')
flags.DEFINE_string('network_host', socket.gethostname(),
'Network host to use for ip allocation in flat modes')
flags.DEFINE_bool('fake_call', False,
'If True, skip using the queue and make local calls')
flags.DEFINE_bool('force_dhcp_release', False,
'If True, send a dhcp release on instance termination')
class AddressAlreadyAllocated(exception.Error):
"""Address was already allocated."""
pass
class RPCAllocateFixedIP(object):
"""Mixin class originally for FlatDCHP and VLAN network managers.
used since they share code to RPC.call allocate_fixed_ip on the
correct network host to configure dnsmasq
"""
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
green_pool = greenpool.GreenPool()
vpn = kwargs.get('vpn')
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in \
requested_networks if network['uuid'] == uuid):
break
# NOTE(vish): if we are not multi_host pass to the network host
if not network['multi_host']:
host = network['host']
# NOTE(vish): if there is no network host, set one
if host == None:
host = rpc.call(context, FLAGS.network_topic,
{'method': 'set_network_host',
'args': {'network_ref': network}})
if host != self.host:
# need to call allocate_fixed_ip to correct network host
topic = self.db.queue_get_for(context,
FLAGS.network_topic,
host)
args = {}
args['instance_id'] = instance_id
args['network_id'] = network['id']
args['address'] = address
args['vpn'] = vpn
green_pool.spawn_n(rpc.call, context, topic,
{'method': '_rpc_allocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
self.allocate_fixed_ip(context, instance_id, network,
vpn=vpn, address=address)
# wait for all of the allocates (if any) to finish
green_pool.waitall()
def _rpc_allocate_fixed_ip(self, context, instance_id, network_id,
**kwargs):
"""Sits in between _allocate_fixed_ips and allocate_fixed_ip to
perform network lookup on the far side of rpc.
"""
network = self.db.network_get(context, network_id)
self.allocate_fixed_ip(context, instance_id, network, **kwargs)
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
if floating_ip.get('fixed_ip', None):
fixed_address = floating_ip['fixed_ip']['address']
# NOTE(vish): The False here is because we ignore the case
# that the ip is already bound.
self.driver.bind_floating_ip(floating_ip['address'], False)
self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
project_id = kwargs.get('project_id')
requested_networks = kwargs.get('requested_networks')
LOG.debug(_("floating IP allocation for instance |%s|"), instance_id,
context=context)
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
ips = super(FloatingIP, self).allocate_for_instance(context, **kwargs)
if FLAGS.auto_assign_floating_ip:
# allocate a floating ip (public_ip is just the address string)
public_ip = self.allocate_floating_ip(context, project_id)
# set auto_assigned column to true for the floating ip
self.db.floating_ip_set_auto_assigned(context, public_ip)
# get the floating ip object from public_ip string
floating_ip = self.db.floating_ip_get_by_address(context,
public_ip)
# get the first fixed_ip belonging to the instance
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
fixed_ip = fixed_ips[0] if fixed_ips else None
# call to correct network host to associate the floating ip
self.network_api.associate_floating_ip(context,
floating_ip,
fixed_ip,
affect_auto_assigned=True)
return ips
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
instance_id = kwargs.get('instance_id')
LOG.debug(_("floating IP deallocation for instance |%s|"), instance_id,
context=context)
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
# disassociate floating ips related to fixed_ip
for floating_ip in fixed_ip.floating_ips:
address = floating_ip['address']
self.network_api.disassociate_floating_ip(context, address)
# deallocate if auto_assigned
if floating_ip['auto_assigned']:
self.network_api.release_floating_ip(context,
address,
True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def allocate_floating_ip(self, context, project_id):
"""Gets an floating ip from the pool."""
# NOTE(tr3buchet): all networks hosts in zone now use the same pool
LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeded for %s, tried to allocate '
'address'),
context.project_id)
raise quota.QuotaError(_('Address quota exceeded. You cannot '
'allocate any more addresses'))
# TODO(vish): add floating ips through manage command
return self.db.floating_ip_allocate_address(context,
project_id)
def associate_floating_ip(self, context, floating_address, fixed_address):
"""Associates an floating ip to a fixed ip."""
floating_ip = self.db.floating_ip_get_by_address(context,
floating_address)
if floating_ip['fixed_ip']:
raise exception.FloatingIpAlreadyInUse(
address=floating_ip['address'],
fixed_ip=floating_ip['fixed_ip']['address'])
self.db.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
self.host)
self.driver.bind_floating_ip(floating_address)
self.driver.ensure_floating_forward(floating_address, fixed_address)
def disassociate_floating_ip(self, context, floating_address):
"""Disassociates a floating ip."""
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
self.driver.unbind_floating_ip(floating_address)
self.driver.remove_floating_forward(floating_address, fixed_address)
def deallocate_floating_ip(self, context, floating_address):
"""Returns an floating ip to the pool."""
self.db.floating_ip_deallocate(context, floating_address)
class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
host management:
hosts configure themselves for networks they are assigned to in the
table upon startup. If there are networks in the table which do not
have hosts, those will be filled in and have hosts configured
as the hosts pick them up one at time during their periodic task.
The one at a time part is to flatten the layout to help scale
"""
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
# If True, this manager requires VIF to create VLAN tag.
SHOULD_CREATE_VLAN = False
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
self.network_api = network_api.API()
self.compute_api = compute_api.API()
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): this is for compatibility
if not network_ref['multi_host']:
return network_ref['gateway']
if not host:
host = self.host
network_id = network_ref['id']
try:
fip = self.db.fixed_ip_get_by_network_host(context,
network_id,
host)
return fip['address']
except exception.FixedIpNotFoundForNetworkHost:
elevated = context.elevated()
return self.db.fixed_ip_associate_pool(elevated,
network_id,
host=host)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
# NOTE(vish): Set up networks for which this host already has
# an ip address.
ctxt = context.get_admin_context()
for network in self.db.network_get_all_by_host(ctxt, self.host):
self._setup_network(ctxt, network)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
super(NetworkManager, self).periodic_tasks(context)
if self.timeout_fixed_ips:
now = utils.utcnow()
timeout = FLAGS.fixed_ip_disassociate_timeout
time = now - datetime.timedelta(seconds=timeout)
num = self.db.fixed_ip_disassociate_all_by_timeout(context,
self.host,
time)
if num:
LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
def set_network_host(self, context, network_ref):
"""Safely sets the host of the network."""
LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_ref['id'],
self.host)
return host
def _do_trigger_security_group_members_refresh_for_instance(self,
instance_id):
admin_context = context.get_admin_context()
instance_ref = self.db.instance_get(admin_context, instance_id)
groups = instance_ref['security_groups']
group_ids = [group['id'] for group in groups]
self.compute_api.trigger_security_group_members_refresh(admin_context,
group_ids)
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine & return which networks an instance should connect to."""
# TODO(tr3buchet) maybe this needs to be updated in the future if
# there is a better way to determine which networks
# a non-vlan instance should connect to
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context,
network_uuids)
else:
try:
networks = self.db.network_get_all(context)
except exception.NoNetworksFound:
return []
# return only networks which are not vlan networks
return [network for network in networks if
not network['vlan']]
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the various network resources for an instance.
rpc.called by network_api
"""
instance_id = kwargs.pop('instance_id')
host = kwargs.pop('host')
project_id = kwargs.pop('project_id')
type_id = kwargs.pop('instance_type_id')
requested_networks = kwargs.get('requested_networks')
vpn = kwargs.pop('vpn')
admin_context = context.elevated()
LOG.debug(_("network allocations for instance %s"), instance_id,
context=context)
networks = self._get_networks_for_instance(admin_context,
instance_id, project_id,
requested_networks=requested_networks)
self._allocate_mac_addresses(context, instance_id, networks)
self._allocate_fixed_ips(admin_context, instance_id,
host, networks, vpn=vpn,
requested_networks=requested_networks)
return self.get_instance_nw_info(context, instance_id, type_id, host)
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating various network resources for an instance.
rpc.called by network_api
kwargs can contain fixed_ips to circumvent another db lookup
"""
instance_id = kwargs.pop('instance_id')
try:
fixed_ips = kwargs.get('fixed_ips') or \
self.db.fixed_ip_get_by_instance(context, instance_id)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
LOG.debug(_("network deallocation for instance |%s|"), instance_id,
context=context)
# deallocate fixed ips
for fixed_ip in fixed_ips:
self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
# deallocate vifs (mac addresses)
self.db.virtual_interface_delete_by_instance(context, instance_id)
def get_instance_nw_info(self, context, instance_id,
instance_type_id, host):
"""Creates network info list for instance.
called by allocate_for_instance and netowrk_api
context needs to be elevated
:returns: network info list [(network,info),(network,info)...]
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
# TODO(tr3buchet) should handle floating IPs as well?
try:
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
except exception.FixedIpNotFoundForInstance:
LOG.warn(_('No fixed IPs for instance %s'), instance_id)
fixed_ips = []
vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
flavor = self.db.instance_type_get(context, instance_type_id)
network_info = []
# a vif has an address, instance_id, and network_id
# it is also joined to the instance and network given by those IDs
for vif in vifs:
network = vif['network']
if network is None:
continue
# determine which of the instance's IPs belong to this network
network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
fixed_ip['network_id'] == network['id']]
# TODO(tr3buchet) eventually "enabled" should be determined
def ip_dict(ip):
return {
'ip': ip,
'netmask': network['netmask'],
'enabled': '1'}
def ip6_dict():
return {
'ip': ipv6.to_global(network['cidr_v6'],
vif['address'],
network['project_id']),
'netmask': network['netmask_v6'],
'enabled': '1'}
network_dict = {
'bridge': network['bridge'],
'id': network['id'],
'cidr': network['cidr'],
'cidr_v6': network['cidr_v6'],
'injected': network['injected'],
'vlan': network['vlan'],
'bridge_interface': network['bridge_interface'],
'multi_host': network['multi_host']}
if network['multi_host']:
dhcp_server = self._get_dhcp_ip(context, network, host)
else:
dhcp_server = self._get_dhcp_ip(context,
network,
network['host'])
info = {
'label': network['label'],
'gateway': network['gateway'],
'dhcp_server': dhcp_server,
'broadcast': network['broadcast'],
'mac': vif['address'],
'vif_uuid': vif['uuid'],
'rxtx_cap': flavor['rxtx_cap'],
'dns': [],
'ips': [ip_dict(ip) for ip in network_IPs],
'should_create_bridge': self.SHOULD_CREATE_BRIDGE,
'should_create_vlan': self.SHOULD_CREATE_VLAN}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict()]
# TODO(tr3buchet): handle ip6 routes here as well
if network['gateway_v6']:
info['gateway6'] = network['gateway_v6']
if network['dns1']:
info['dns'].append(network['dns1'])
if network['dns2']:
info['dns'].append(network['dns2'])
network_info.append((network_dict, info))
return network_info
def _allocate_mac_addresses(self, context, instance_id, networks):
"""Generates mac addresses and creates vif rows in db for them."""
for network in networks:
self.add_virtual_interface(context, instance_id, network['id'])
def add_virtual_interface(self, context, instance_id, network_id):
vif = {'address': self.generate_mac_address(),
'instance_id': instance_id,
'network_id': network_id,
'uuid': str(utils.gen_uuid())}
# try FLAG times to create a vif record with a unique mac_address
for _ in xrange(FLAGS.create_unique_mac_address_attempts):
try:
return self.db.virtual_interface_create(context, vif)
except exception.VirtualInterfaceCreateException:
vif['address'] = self.generate_mac_address()
else:
self.db.virtual_interface_delete_by_instance(context,
instance_id)
raise exception.VirtualInterfaceMacAddressException()
def generate_mac_address(self):
"""Generate an Ethernet MAC address."""
mac = [0x02, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):
"""Adds a fixed ip to an instance from specified network."""
networks = [self.db.network_get(context, network_id)]
self._allocate_fixed_ips(context, instance_id, host, networks)
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Removes a fixed ip from an instance from specified network."""
fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
for fixed_ip in fixed_ips:
if fixed_ip['address'] == address:
self.deallocate_fixed_ip(context, address)
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_id=instance_id, ip=address)
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
address = None
if network['cidr']:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context,
address, instance_id,
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context.elevated(),
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
get_vif = self.db.virtual_interface_get_by_instance_and_network
vif = get_vif(context, instance_id, network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
self._setup_network(context, network)
return address
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
self.db.fixed_ip_update(context, address,
{'allocated': False,
'virtual_interface_id': None})
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
instance_id = instance_ref['id']
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
if FLAGS.force_dhcp_release:
dev = self.driver.get_dev(fixed_ip_ref['network'])
vif = self.db.virtual_interface_get_by_instance_and_network(
context, instance_ref['id'], fixed_ip_ref['network']['id'])
self.driver.release_dhcp(dev, address, vif['address'])
def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s leased that is not associated') %
address)
now = utils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': True,
'updated_at': now})
if not fixed_ip['allocated']:
LOG.warn(_('IP |%s| leased that isn\'t allocated'), address,
context=context)
def release_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug(_('Released IP |%(address)s|'), locals(), context=context)
fixed_ip = self.db.fixed_ip_get_by_address(context, address)
instance = fixed_ip['instance']
if not instance:
raise exception.Error(_('IP %s released that is not associated') %
address)
if not fixed_ip['leased']:
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip['address'],
{'leased': False})
if not fixed_ip['allocated']:
self.db.fixed_ip_disassociate(context, address)
# NOTE(vish): dhcp server isn't updated until next setup, this
# means there will stale entries in the conf file
# the code below will update the file if necessary
if FLAGS.update_dhcp_on_disassociate:
network_ref = self.db.fixed_ip_get_network(context, address)
self._setup_network(context, network_ref)
def create_networks(self, context, label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
fixed_net_v4 = netaddr.IPNetwork('0/32')
fixed_net_v6 = netaddr.IPNetwork('::0/128')
subnets_v4 = []
subnets_v6 = []
subnet_bits = int(math.ceil(math.log(network_size, 2)))
if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
prefixlen_v6 = 128 - subnet_bits
subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
if cidr:
fixed_net_v4 = netaddr.IPNetwork(cidr)
prefixlen_v4 = 32 - subnet_bits
subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
count=num_networks))
# NOTE(jkoelker): This replaces the _validate_cidrs call and
# prevents looping multiple times
try:
nets = self.db.network_get_all(context)
except exception.NoNetworksFound:
nets = []
used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets]
def find_next(subnet):
next_subnet = subnet.next()
while next_subnet in subnets_v4:
next_subnet = next_subnet.next()
if next_subnet in fixed_net_v4:
return next_subnet
for subnet in list(subnets_v4):
if subnet in used_subnets:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
raise ValueError(_('cidr already in use'))
for used_subnet in used_subnets:
if subnet in used_subnet:
msg = _('requested cidr (%(cidr)s) conflicts with '
'existing supernet (%(super)s)')
raise ValueError(msg % {'cidr': subnet,
'super': used_subnet})
if used_subnet in subnet:
next_subnet = find_next(subnet)
if next_subnet:
subnets_v4.remove(subnet)
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
msg = _('requested cidr (%(cidr)s) conflicts '
'with existing smaller cidr '
'(%(smaller)s)')
raise ValueError(msg % {'cidr': subnet,
'smaller': used_subnet})
networks = []
subnets = itertools.izip_longest(subnets_v4, subnets_v6)
for index, (subnet_v4, subnet_v6) in enumerate(subnets):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
net['multi_host'] = multi_host
net['dns1'] = dns1
net['dns2'] = dns2
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
if cidr and subnet_v4:
net['cidr'] = str(subnet_v4)
net['netmask'] = str(subnet_v4.netmask)
net['gateway'] = str(subnet_v4[1])
net['broadcast'] = str(subnet_v4.broadcast)
net['dhcp_start'] = str(subnet_v4[2])
if cidr_v6 and subnet_v6:
net['cidr_v6'] = str(subnet_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
net['gateway_v6'] = str(gateway_v6)
else:
net['gateway_v6'] = str(subnet_v6[1])
net['netmask_v6'] = str(subnet_v6._prefixlen)
if kwargs.get('vpn', False):
# this bit here is for vlan-manager
del net['dns1']
del net['dns2']
vlan = kwargs['vlan_start'] + index
net['vpn_private_address'] = str(subnet_v4[2])
net['dhcp_start'] = str(subnet_v4[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them uniq per ip
net['vpn_public_port'] = kwargs['vpn_start'] + index
# None if network with cidr or cidr_v6 already exists
network = self.db.network_create_safe(context, net)
if not network:
raise ValueError(_('Network already exists!'))
else:
networks.append(network)
if network and cidr and subnet_v4:
self._create_fixed_ips(context, network['id'])
return networks
def delete_network(self, context, fixed_range, require_disassociated=True):
network = db.network_get_by_cidr(context, fixed_range)
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
' before delete' % network.project_id))
db.network_delete_safe(context, network.id)
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
def _top_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
def _create_fixed_ips(self, context, network_id):
"""Create all fixed ips for network."""
network = self.db.network_get(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
project_net = netaddr.IPNetwork(network['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
if index < bottom_reserved or num_ips - index < top_reserved:
reserved = True
else:
reserved = False
self.db.fixed_ip_create(context, {'network_id': network_id,
'address': address,
'reserved': reserved})
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
raise NotImplementedError()
def validate_networks(self, context, networks):
"""check if the networks exists and host
is set to each network.
"""
if networks is None or len(networks) == 0:
return
network_uuids = [uuid for (uuid, fixed_ip) in networks]
self._get_networks_by_uuids(context, network_uuids)
for network_uuid, address in networks:
# check if the fixed IP address is valid and
# it actually belongs to the network
if address is not None:
if not utils.is_valid_ipv4(address):
raise exception.FixedIpInvalid(address=address)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context,
address)
if fixed_ip_ref['network']['uuid'] != network_uuid:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_uuid)
if fixed_ip_ref['instance'] is not None:
raise exception.FixedIpAlreadyInUse(address=address)
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids)
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
FlatManager does not do any bridge or vlan creation. The user is
responsible for setting up whatever bridges are specified when creating
networks through nova-manage. This bridge needs to be created on all
compute hosts.
The idea is to create a single network for the host with a command like:
nova-manage network create 192.168.0.0/24 1 256. Creating multiple
networks for for one manager is currently not supported, but could be
added by modifying allocate_fixed_ip and get_network to get the a network
with new logic instead of network_get_by_bridge. Arbitrary lists of
addresses in a single network can be accomplished with manual db editing.
If flat_injected is True, the compute host will attempt to inject network
config into the guest. It attempts to modify /etc/network/interfaces and
currently only works on debian based systems. To support a wider range of
OSes, some other method may need to be devised to let the guest know which
ip it should be using so that it can configure itself. Perhaps an attached
disk or serial device with configuration info.
Metadata forwarding must be handled by the gateway, and since nova does
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
timeout_fixed_ips = False
def _allocate_fixed_ips(self, context, instance_id, host, networks,
**kwargs):
"""Calls allocate_fixed_ip once for each network."""
requested_networks = kwargs.get('requested_networks')
for network in networks:
address = None
if requested_networks is not None:
for address in (fixed_ip for (uuid, fixed_ip) in \
requested_networks if network['uuid'] == uuid):
break
self.allocate_fixed_ip(context, instance_id,
network, address=address)
def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
super(FlatManager, self).deallocate_fixed_ip(context, address,
**kwargs)
self.db.fixed_ip_disassociate(context, address)
def _setup_network(self, context, network_ref):
"""Setup Network on this host."""
net = {}
net['injected'] = FLAGS.flat_injected
self.db.network_update(context, network_ref['id'], net)
class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
It never injects network settings into the guest. It also manages bridges.
Otherwise it behaves like FlatManager.
"""
SHOULD_CREATE_BRIDGE = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.driver.init_host()
self.driver.ensure_metadata_ip()
super(FlatDHCPManager, self).init_host()
self.init_host_floating_ips()
self.driver.metadata_forward()
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
mac_address = self.generate_mac_address()
dev = self.driver.plug(network_ref, mac_address)
self.driver.initialize_gateway_device(dev, network_ref)
if not FLAGS.fake_network:
self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network_ref)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
vlan for each project. Each project gets its own subnet. The networks
and associated subnets are created with nova-manage using a command like:
nova-manage network create 10.0.0.0/8 3 16. This will create 3 networks
of 16 addresses from the beginning of the 10.0.0.0 range.
A dhcp server is run for each subnet, so each project will have its own.
For this mode to be useful, each project will need a vpn to access the
instances in its subnet.
"""
SHOULD_CREATE_BRIDGE = True
SHOULD_CREATE_VLAN = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
self.driver.init_host()
self.driver.ensure_metadata_ip()
NetworkManager.init_host(self)
self.init_host_floating_ips()
self.driver.metadata_forward()
def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
if kwargs.get('vpn', None):
address = network['vpn_private_address']
self.db.fixed_ip_associate(context,
address,
instance_id,
reserved=True)
else:
address = kwargs.get('address', None)
if address:
address = self.db.fixed_ip_associate(context, address,
instance_id,
network['id'])
else:
address = self.db.fixed_ip_associate_pool(context,
network['id'],
instance_id)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
vif = self.db.virtual_interface_get_by_instance_and_network(context,
instance_id,
network['id'])
values = {'allocated': True,
'virtual_interface_id': vif['id']}
self.db.fixed_ip_update(context, address, values)
self._setup_network(context, network)
return address
def add_network_to_project(self, context, project_id):
"""Force adds another network to a project."""
self.db.network_associate(context, project_id, force=True)
def _get_networks_for_instance(self, context, instance_id, project_id,
requested_networks=None):
"""Determine which networks an instance should connect to."""
# get networks associated with project
if requested_networks is not None and len(requested_networks) != 0:
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self.db.network_get_all_by_uuids(context,
network_uuids,
project_id)
else:
networks = self.db.project_get_networks(context, project_id)
return networks
def create_networks(self, context, **kwargs):
"""Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s') %
kwargs)
NetworkManager.create_networks(self, context, vpn=True, **kwargs)
def _setup_network(self, context, network_ref):
"""Sets up network on this host."""
if not network_ref['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
network_ref = db.network_update(context, network_ref['id'], net)
else:
address = network_ref['vpn_public_address']
network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref)
mac_address = self.generate_mac_address()
dev = self.driver.plug(network_ref, mac_address)
self.driver.initialize_gateway_device(dev, network_ref)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
if address == FLAGS.vpn_ip and hasattr(self.driver,
"ensure_vpn_forward"):
self.driver.ensure_vpn_forward(FLAGS.vpn_ip,
network_ref['vpn_public_port'],
network_ref['vpn_private_address'])
if not FLAGS.fake_network:
self.driver.update_dhcp(context, dev, network_ref)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, dev, network_ref)
gateway = utils.get_my_linklocal(dev)
self.db.network_update(context, network_ref['id'],
{'gateway_v6': gateway})
def _get_networks_by_uuids(self, context, network_uuids):
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
return super(VlanManager, self)._bottom_reserved_ips + 1 # vpn server
@property
def _top_reserved_ips(self):
"""Number of reserved ips at the top of the range."""
parent_reserved = super(VlanManager, self)._top_reserved_ips
return parent_reserved + FLAGS.cnt_vpn_clients
|
# -*- encoding: UTF-8 -*-
from controle import UC
from lib.memoria import Mem_instrucoes, Mem_dados
from lib.registradores import Banco
from lib.operacoes import ULA
from lib.instrucoes import Instrucao_R_I
class Sistema (object):
def __init__(self):
self.__PC = 0
self.__UC = UC()
self.__ULA = ULA()
self.__memoriaInstrucao = Mem_instrucoes()
self.__memoriaDados = Mem_dados()
self.__bancoDeRegistradores = Banco()
def executaInstrucao (self):
instrucao = self.__memoriaInstrucao.getInstrucao(self.__PC)
valores = self.decodifica(instrucao)
if valores:
self.__ULA.opera(self.__UC, instrucao, valores)
self.incrementaPC()
def decodifica (self, instrucao):
'''
Função: decodifica(instrucao)
Descrição: Localiza e retorna valores de registradores e variáveis
'''
# Verifica se o PC aponta para um Label
if type(instrucao) is str:
return None
self.__UC.decodifica(instrucao)
if type(instrucao) is Instrucao_R_I:
resultado = instrucao.getResultado()
valor2 = None
# buscando o primeiro registrador
resultado = self.__bancoDeRegistradores.getRegistrador(nome=resultado)
# buscando operando 1
valor1 = instrucao.getValor1()
print valor1
if self.__bancoDeRegistradores.getRegistrador(nome=valor1):
valor1 = self.__bancoDeRegistradores.getRegistrador(nome=valor1).getValor()
elif self.__memoriaDados.getDado(nome=valor1):
valor1 = self.__memoriaDados.getDado(valor1).getValor()
# buscando operando 2
if instrucao.getValor2():
valor2 = instrucao.getValor2()
if self.__bancoDeRegistradores.getRegistrador(nome=valor2):
valor2 = self.__bancoDeRegistradores.getRegistrador(nome=valor2).getValor()
elif self.__memoriaDados.getDado(nome=valor2):
valor2 = self.__memoriaDados.getDado(valor2).getValor()
return [resultado, valor1, valor2]
else:
endereco = instrucao.getEndereco()
fila_de_inst = self.__memoriaInstrucao.getDados()
for inst in fila_de_inst:
if inst == endereco:
self.__PC = fila_de_inst.index(inst)
return None
return None
def getPC(self):
return self.__PC
def getProximaInstrucao(self):
try:
return self.__memoriaInstrucao.getInstrucao(self.__PC)
except:
return "Fim do programa!"
def setPC (self, indice):
self.__PC = indice
def incrementaPC(self):
self.__PC += 1
def getIR (self):
return self.__IR
def getMDR (self):
return self.__MDR
def getA (self):
return self.__A
def getB (self):
return self.__B
def getULA (self):
return self.__ULA
def getUC(self):
return self.__UC
def getMemoriaInstrucao(self):
return self.__memoriaInstrucao
def getMemoriaDados(self):
return self.__memoriaDados
def getBanco (self):
return self.__bancoDeRegistradores
|
import tempfile
import docker
from django.template import loader, Context
def get_docker_client():
"""
Get configured docker client.
:param params: Settings module with Docker client configuration
:type params: :mod:`abdallah.settings`
:returns: Configured Docker client
:rtype: :class:`docker.Client`
"""
from abdallah import settings
client = docker.Client(base_url=settings.DOCKER['BASE_URL'],
version=settings.DOCKER['VERSION'],
timeout=settings.DOCKER['TIMEOUT'],
tls=settings.DOCKER['TLS'])
return client
def get_job_host_config(job, job_attr):
template = loader.get_template('abdallah/job.sh')
context_dict = job_attr.copy()
context_dict.update({'job': job})
context = Context(context_dict)
init_script_path = tempfile.mktemp('abdallah')
volumes = [init_script_path]
with open(init_script_path, 'w') as init_script:
init_script.write(template.render(context))
# host_config = docker.utils.create_host_config(binds=[
# '/init.sh:%s:ro' % init_script_path,
# ])
host_config = {
init_script_path: {'bind': '/job.sh', 'ro': True}
}
return volumes, host_config
|
#-*- coding: utf-8 -*-
import ujson
from django.http import HttpResponse, Http404
from django.core.cache import cache
from django.core.management import call_command
from django.db import connection
RACES_PER_VIEW = 25
def last_races(request):
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, raceId, date FROM rankings_races ORDER BY date DESC LIMIT " + \
str(RACES_PER_VIEW) + ";"
cursor.execute(query)
races = dictfetchall(cursor)
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def race(request, pk):
pk = str(int(pk))
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, `table`, raceId, date FROM rankings_races WHERE id='" + \
pk + "';"
cursor.execute(query)
races = dictfetchall(cursor)[0]
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def race_category(request, category):
if category not in ['WC', 'EC', 'FIS']:
return Http404
page = request.GET.get('page')
page = 0 if page is None else (int(page) - 1)
nb_races = RACES_PER_VIEW * 2 if 'FIS' in category else RACES_PER_VIEW
offset = nb_races * page
cursor = connection.cursor()
query = "SELECT id, info, category, genre, link, location, discipline, raceId, date FROM rankings_races WHERE category='" + \
category + "' ORDER BY date DESC LIMIT " + \
str(offset) + ", " + str(nb_races) + ";"
cursor.execute(query)
races = dictfetchall(cursor)
races = ujson.dumps(races, encode_html_chars=False, ensure_ascii=False)
res = HttpResponse(
races,
content_type="application/json"
)
return res
def update(request):
call_command('updateraces', verbosity=3, interactive=False)
return HttpResponse('1')
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
|
#!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
api.update_api_list()
try:
result = api.do('getAvailableCameraFunction')
current = result['result'][0]
availavle = result['result'][1]
if current != "Remote Shooting":
if "Remote Shooting" in availavle:
api.do('setCameraFunction',["Remote Shooting"])
api.update_api_list()
else:
print "Remote Shooting not availavle"
except KeyError:
print result
try:
result = api.do('getAvailableShootMode')
current = result['result'][0]
availavle = result['result'][1]
if current != "still":
if "still" in availavle:
api.do('setShootMode',["still"])
api.update_api_list()
else:
print "stil Shooting not availavle"
except KeyError:
print result
try:
result = api.do('actTakePicture')
url = result['result'][0][0]
except KeyError:
print result
except TypeError:
print result
f = urllib2.urlopen(url)
d = np.asarray(bytearray(f.read()), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
time.sleep(10)
|
# -*- coding: utf-8 -*-
import traceback
import os
import random
# import json
import subprocess
import zipfile
import zlib
from datetime import datetime as dt
from tg import request, config
from rpac.model import *
__all__ = [
'gen_pdf',
'null_string_sizes',
'format_fibers',
'format_cares',
'format_coo',
'format_list']
CARES = [
"WASH",
"BLEACH",
"IRON",
"DRY",
"DRYCLEAN",
"SPECIALCARE"
]
def gen_pdf(header_no, details):
try:
public_dir = config.get( 'public_dir' )
download_dir = os.path.join( public_dir, 'layout_pdf' )
if not os.path.exists( download_dir ):
os.makedirs( download_dir )
phantomjs = os.path.join( public_dir, 'phantomjs', 'phantomjs.exe' )
labeljs = os.path.join( public_dir, 'phantomjs', 'pdf.js' )
pdfs = []
for detail_id, item_code in details:
http_url = 'http://%s/pdflayout/index?id=%s' % (request.headers.get( 'Host' ), detail_id)
_name = '%s_%s%d' % (trim(item_code), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) )
pdf_file = os.path.join( download_dir, '%s.pdf' % _name )
cmd = '%s %s %s %s' % (phantomjs, labeljs, http_url, pdf_file)
# print cmd
sp = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
while 1:
if sp.poll() is not None:
#print 'exec command completed.'
break
# else:
# line = sp.stdout.readline().strip()
pdfs.append(pdf_file)
pd_zip_file = os.path.join( download_dir, "%s_pdf_%s%d.zip" % (trim(header_no), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) ) )
create_zip(pd_zip_file, pdfs)
remove_files(pdfs)
return pd_zip_file
except:
traceback.print_exc()
return None
def create_zip(zipf, files):
_zip = zipfile.ZipFile(zipf, 'w', zlib.DEFLATED)
for f in files:
if os.path.exists(f):
_zip.write(os.path.abspath(f), os.path.basename(f))
_zip.close()
return zipf
def remove_files(files):
for f in files:
remove_file(f)
def remove_file(file):
try:
os.remove(file)
except:
pass
def trim(s):
return ''.join(s.split())
def null_string_sizes(data):
null_list = data.get('SIZE', {'values': []})['values']
if not null_list:
return ['']
return null_list
def format_fibers(data, capitalize=False):
fibers = {
'en': [],
'sp': []
}
for ff in data['FIBERS']['values']:
if ff:
if capitalize:
fibers['en'].append('%s%% %s' % (ff['percent'], ff['english'].lower().capitalize()))
fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish'].lower().capitalize()))
else:
fibers['en'].append('%s%% %s' % (ff['percent'], ff['english']))
fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish']))
# print fibers
return fibers
def format_cares(data):
cares = {
'en': [],
'sp': []
}
for cs in CARES:
cc = data.get(cs, {'values': []})
for c in cc['values']:
# print '****', c
cares['en'].append(c['english'])
cares['sp'].append(c['spanish'])
return cares
def format_coo(data):
coos = {
'en': [],
'sp': []
}
for coo in data['CO']['values']:
coos['en'].append(coo['english'])
coos['sp'].append(coo['spanish'])
return coos
def format_list(ll, method=None, s=''):
if method:
return s.join([getattr(l, method)() for l in ll if l])
return s.join(ll)
def format_list2(ll):
return [l.lower().capitalize() for l in ll if l]
def format_price(data):
try:
price = '$%.2f' % float(data['PRICE']['values'][0])
return price
except:
return '$0.00'
|
#!/usr/bin/python
"""Test to verify table message presentation."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(3000))
sequence.append(TypeAction("Line 1"))
sequence.append(KeyComboAction("Return"))
sequence.append(KeyComboAction("<Control>F12"))
sequence.append(PauseAction(1000))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Down arrow to enter the table",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'A1 B1.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Down arrow to next row of the table",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'A2 B2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Down arrow to exit the table",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'blank'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"4. Up arrow to enter the table",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'A2 B2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"5. Tab to move to last cell of the table",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'End of table.'",
"SPEECH OUTPUT: 'blank B2.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"6. Tab to insert a new row in the table",
["BRAILLE LINE: 'Row inserted at the end of the table.'",
" VISIBLE: 'Row inserted at the end of the t', cursor=0",
"BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'Row inserted at the end of the table.' voice=system",
"SPEECH OUTPUT: 'blank A3.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Z"))
sequence.append(utils.AssertPresentationAction(
"7. Ctrl+Z to undo that insertion",
["BRAILLE LINE: 'undo'",
" VISIBLE: 'undo', cursor=0",
"BRAILLE LINE: 'Last row deleted.'",
" VISIBLE: 'Last row deleted.', cursor=0",
"BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'undo' voice=system",
"SPEECH OUTPUT: 'Last row deleted.' voice=system",
"SPEECH OUTPUT: 'End of table.'",
"SPEECH OUTPUT: 'A2 B2.'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
from multiprocessing import set_start_method, cpu_count
set_start_method('forkserver')
import os
os.environ["OMP_NUM_THREADS"] = str(cpu_count()) # or to whatever you want
from argparse import ArgumentParser
from datetime import datetime
time_now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
ap = ArgumentParser()
ap.add_argument('-d', '--directory', type=str,
default='nalu_tf_save_dir/saves_{}'.format(time_now),
help='The tensorflow ckpt save file')
ap.add_argument('-nnl', '--n_nalu_layers', type=int, default=1,
help='Whether to use 1 (default), 2, or ... N NALU layers.')
ap.add_argument('-nnn', '--n_nalu_neurons', type=int, default=0,
help='How many features on the second NALU layer')
ap.add_argument('-ne', '--n_epochs', type=int, default=200,
help='Number of N_EPOCHS to train the network with.')
ap.add_argument('-nc', '--n_classes', type=int, default=1,
help='n_classes == 1 for Regression (default); > 1 for Classification.')
ap.add_argument('-bs', '--batch_size', type=int, default=32,
help='Batch size: number of samples per batch.')
ap.add_argument('-lr', '--learning_rate', type=float, default=1e-3,
help='Learning rate: how fast the optimizer moves up/down the gradient.')
ap.add_argument('-ts', '--test_size', type=float, default=0.75,
help='How much to split the train / test ratio')
ap.add_argument('-rs', '--random_state', type=int, default=42,
help='Integer value to initialize train/test splitting randomization')
ap.add_argument('-v', '--verbose', action="store_true",
help='Whether to set verbosity = True or False (default)')
ap.add_argument('-ds', '--data_set', type=str, default='',
help='The csv file containing the data to predict with')
try:
args = vars(ap.parse_args())
except:
args = {}
args['directory'] = ap.get_default('directory')
args['n_nalu_layers'] = ap.get_default('n_nalu_layers')
args['n_nalu_neurons'] = ap.get_default('n_nalu_neurons')
args['n_epochs'] = ap.get_default('n_epochs')
args['n_classes'] = ap.get_default('n_classes')
args['batch_size'] = ap.get_default('batch_size')
args['learning_rate'] = ap.get_default('learning_rate')
args['test_size'] = ap.get_default('test_size')
arts['random_state'] = ap.get_default('random_state')
args['verbose'] = ap.get_default('verbose')
args['data_set'] = ap.get_default('data_set')
verbose = args['verbose']
data_set_fname = args['data_set']
import pandas as pd
import numpy as np
import pdb
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, minmax_scale
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor#, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.externals import joblib
from sklearn.metrics import r2_score
import xgboost as xgb
from tqdm import tqdm
from glob import glob
from time import time
start0 = time()
print('BEGIN NEW HyperParameter Optimization.')
from sklearn.metrics import r2_score
''' NALU: Nearual Arithmentic Logical Unit
NALU uses memory and logic gates to train a unique TF layer to modify the gradients of the weights.
This seems to be very smilar to a LSTM layer, but for a non-RNN.
This code has been specifically implemented with tensorflow.
Code source: https://github.com/grananqvist/NALU-tf
Original paper: https://arxiv.org/abs/1808.00508 (Trask et al.)
'''
import numpy as np
import tensorflow as tf
def nalu(input_layer, num_outputs):
""" Neural Arithmetic Logic Unit tesnorflow layer
Arguments:
input_layer - A Tensor representing previous layer
num_outputs - number of ouput units
Returns:
A tensor representing the output of NALU
"""
shape = (int(input_layer.shape[-1]), num_outputs)
# define variables
W_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
M_hat = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
G = tf.Variable(tf.truncated_normal(shape, stddev=0.02))
# operations according to paper
W = tf.tanh(W_hat) * tf.sigmoid(M_hat)
m = tf.exp(tf.matmul(tf.log(tf.abs(input_layer) + 1e-7), W))
g = tf.sigmoid(tf.matmul(input_layer, G))
a = tf.matmul(input_layer, W)
out = g * a + (1 - g) * m
return out
def generate_dataset(size=10000, op='sum', n_features=2):
""" Generate dataset for NALU toy problem
Arguments:
size - number of samples to generate
op - the operation that the generated data should represent. sum | prod
Returns:
X - the dataset
Y - the dataset labels
"""
X = np.random.randint(9, size=(size, n_features))
if op == 'prod':
Y = np.prod(X, axis=1, keepdims=True)
else:
Y = np.sum(X, axis=1, keepdims=True)
return X, Y
def chisq(y_true, y_pred, y_error): return np.sum(((y_true-y_pred)/y_error)**2.)
if __name__ == "__main__":
N_FEATURES = features.shape[-1]
EXPORT_DIR = args['directory']
N_NALU_LAYERS = args['n_nalu_layers']
N_NALU_NEURONS = N_FEATURES
if args['n_nalu_neurons'] > 0: N_NALU_NEURONS = args['n_nalu_neurons']
N_CLASSES = args['n_classes'] # = 1 for regression
TEST_SIZE = args['test_size']
RANDOM_STATE = args['random_state']
N_EPOCHS = args['n_epochs']
LEARNING_RATE = args['learning_rate']
BATCH_SIZE = args['batch_size']
EXPORT_DIR = EXPORT_DIR + '_nnl{}_nnn{}_nc{}_bs{}_lr{}_ne{}_ts{}_rs{}/'.format(N_NALU_LAYERS, N_NALU_NEURONS, N_CLASSES, BATCH_SIZE, LEARNING_RATE, N_EPOCHS, TEST_SIZE, RANDOM_STATE)
print("Saving models to path: {}".format(EXPORT_DIR))
idx_train, idx_test = train_test_split(np.arange(labels.size), test_size=TEST_SIZE, random_state=RANDOM_STATE)
X_data, Y_data = features[idx_train], labels[idx_train][:,None]
LAST_BIT = X_data.shape[0]-BATCH_SIZE*(X_data.shape[0]//BATCH_SIZE)
# Force integer number of batches total by dropping last "<BATCH_SIEZ" number of samples
X_data_use = X_data[:-LAST_BIT].copy()
Y_data_use = Y_data[:-LAST_BIT].copy()
N_FEATURES = X_data.shape[-1]
output_dict = {}
output_dict['loss'] = np.zeros(N_EPOCHS)
output_dict['accuracy'] = np.zeros(N_EPOCHS)
output_dict['R2_train'] = np.zeros(N_EPOCHS)
output_dict['R2_test'] = np.zeros(N_EPOCHS)
output_dict['chisq_train'] = np.zeros(N_EPOCHS)
output_dict['chisq_test'] = np.zeros(N_EPOCHS)
with tf.device("/cpu:0"):
# tf.reset_default_graph()
# define placeholders and network
X = tf.placeholder(tf.float32, shape=[None, N_FEATURES])
Y_true = tf.placeholder(tf.float32, shape=[None, 1])
# Setup NALU Layers
nalu_layers = {'nalu0':nalu(X,N_NALU_NEURONS)}
for kn in range(1, N_NALU_LAYERS):
prev_layer = nalu_layers['nalu{}'.format(kn-1)]
nalu_layers['nalu{}'.format(kn)] = nalu(prev_layer, N_NALU_NEURONS)
Y_pred = nalu(nalu_layers['nalu{}'.format(N_NALU_LAYERS-1)], N_CLASSES) # N_CLASSES = 1 for regression
# loss and train operations
loss = tf.nn.l2_loss(Y_pred - Y_true) # NALU uses mse
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
train_op = optimizer.minimize(loss)
# Add an op to initialize the variables.
init_op = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()#max_to_keep=N_EPOCHS)
sess_config = tf.ConfigProto(
device_count={"CPU": cpu_count()},
inter_op_parallelism_threads=cpu_count(),
intra_op_parallelism_threads=cpu_count())
with tf.Session(config=sess_config) as sess:
''' Tensorboard Redouts'''
''' Training R-Squared Score'''
total_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, tf.reduce_mean(Y_true))))
unexplained_error = tf.reduce_sum(tf.square(tf.subtract(Y_true, Y_pred)))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
# ''' Testing R-Squared Score'''
# Y_pred_test = Y_pred.eval(feed_dict={X: features[idx_test]})
# total_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, tf.reduce_mean(Y_data_use))))
# unexplained_error_test = tf.reduce_sum(tf.square(tf.subtract(Y_data_use, Y_pred_test)))
# R_squared_test = tf.subtract(1, tf.div(unexplained_error, total_error))
''' Loss and RMSE '''
squared_error = tf.square(tf.subtract(Y_true, Y_pred))
loss = tf.reduce_sum(tf.sqrt(tf.cast(squared_error, tf.float32)))
rmse = tf.sqrt(tf.reduce_mean(tf.cast(squared_error, tf.float32)))
''' Declare Scalar Tensorboard Terms'''
tf.summary.scalar('loss', loss)
tf.summary.scalar('RMSE', rmse)
tf.summary.scalar('R_sqrd', R_squared)
''' Declare Histogram Tensorboard Terms'''
# Squared Error Histogram
tf.summary.histogram('SqErr Hist', squared_error)
# NALU Layers Histogram
for kn in range(N_NALU_LAYERS):
tf.summary.histogram('NALU{}'.format(kn), nalu_layers['nalu{}'.format(kn)])
''' Merge all the summaries and write them out to `export_dir` + `/logs_train_`time_now`` '''
merged = tf.summary.merge_all()
''' Output all summaries to `export_dir` + `/logs_train_`time_now`` '''
train_writer = tf.summary.FileWriter(EXPORT_DIR + '/logs_train_{}'.format(time_now),sess.graph)
# test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test')
''' END Tensorboard Readout Step'''
sess.run(init_op)
best_test_r2 = 0
for ep in tqdm(range(N_EPOCHS)):
i = 0
gts = 0
# for k in range(N_EPOCHS):
# batch_now = range(k*N_EPOCHS, (k+1)*N_EPOCHS)
while i < len(X_data_use):
xs, ys = X_data_use[i:i+BATCH_SIZE], Y_data_use[i:i+BATCH_SIZE]
_, ys_pred, l = sess.run([train_op, Y_pred, loss],
feed_dict={X: xs, Y_true: ys})
# calculate number of correct predictions from batch
gts += np.sum(np.isclose(ys, ys_pred, atol=1e-4, rtol=1e-4))
i += BATCH_SIZE
ytest_pred = Y_pred.eval(feed_dict={X: features[idx_test]})
test_r2 = r2_score(labels[idx_test][:,None], ytest_pred)
# print("Test R2 Score: {}".format(test_r2_score))
acc = gts/len(Y_data_use)
train_r2 = r2_score(ys, ys_pred)
print('epoch {}, loss: {:.5}, accuracy: {:.5}, Batch R2: {:.5}, Test R2: {:.5}'.format(ep, l, acc, train_r2, test_r2))
output_dict['loss'][ep] = l
output_dict['accuracy'][ep] = acc
output_dict['R2_train'][ep] = train_r2
output_dict['R2_test'][ep] = test_r2
output_dict['chisq_train'][ep] = chisq(ys.flatten(), ys_pred.flatten(), spitzerCalRawData['fluxerr'][i:i+BATCH_SIZE])
output_dict['chisq_test'][ep] = chisq(labels[idx_test], ytest_pred.flatten(), spitzerCalRawData['fluxerr'][idx_test])
save_path = saver.save(sess, EXPORT_DIR + "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
# print("Model saved in path: %s" % save_path)
if test_r2 >= best_test_r2:
best_test_r2 = test_r2
''' Store the Best Scored Test-R2 '''
save_path = saver.save(sess, EXPORT_DIR + "best_test_r2/model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
ep = '_FINAL'
save_path = saver.save(sess, EXPORT_DIR+ "model_epoch{}_l{:.5}_a{:.5}_BatchR2-{:.5}_TestR2-{:.5}.ckpt".format(ep, l, acc, train_r2, test_r2))
print("Model saved in path: %s" % save_path)
try:
pd.DataFrame(output_dict, index=range(N_EPOCHS)).to_csv(EXPORT_DIR+ "model_loss_acc_BatchR2_TestR2_DataFrame.csv")
except Exception as e:
print('DataFrame to CSV broke because', str(e))
'''
with tf.name_scope("loss"):
def tf_nll(labels, output, uncs, coeff=1):
error = output - labels
return tf.reduce_sum(tf.divide(tf.squared_difference(output, labels) , tf.square(uncs)))# + tf.log(tf.square(uncs))
#return tf.reduce_sum(1 * (coeff * np.log(2*np.pi) + coeff * tf.log(uncs) + (0.5/uncs) * tf.pow(error, 2)))
negloglike = tf_nll(labels=y, output=output, uncs=unc)
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([negloglike] + reg_losses, name="chisq")
with tf.name_scope("eval"):
accuracy = tf.reduce_mean(tf.squared_difference(output, y, name="accuracy"))
SqErrRatio= tf.divide(accuracy, tf.reduce_mean(tf.squared_difference(y, tf.reduce_mean(y))))
r2_acc = 1.0 - SqErrRatio
chsiqMean = tf_nll(labels=y, output=tf.reduce_mean(y), uncs=unc)
chisqModel= tf_nll(labels=y, output=output, uncs=unc)
rho2_acc = 1.0 - chisqModel / chsiqMean"
]
},mse_summary = tf.summary.scalar('train_acc' , accuracy )
loss_summary = tf.summary.scalar('loss' , loss )
nll_summary = tf.summary.scalar('negloglike', negloglike)
r2s_summary = tf.summary.scalar('r2_acc' , r2_acc )
p2s_summary = tf.summary.scalar('rho2_acc' , rho2_acc )
val_summary = tf.summary.scalar('val_acc' , accuracy )
# hid1_hist = tf.summary.histogram('hidden1', hidden1)
# hid2_hist = tf.summary.histogram('hidden1', hidden1)
# hid3_hist = tf.summary.histogram('hidden1', hidden1)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
'''
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name="py-trello",
version="0.3.1",
description='Python wrapper around the Trello API',
long_description=open('README.rst').read(),
author='Richard Kolkovich',
author_email='richard@sigil.org',
url='https://trello.com/board/py-trello/4f145d87b2f9f15d6d027b53',
keywords='python',
license='BSD License',
classifiers=[
"Development Status :: 4 - Beta",
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
install_requires=["requests", "requests-oauthlib >= 0.4.1", "python-dateutil"],
packages=find_packages(),
include_package_data=True,
)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgentNN(DynaMixin, QAgentNN):
def __init__(self, **kwargs):
super(Dyna_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'dmW'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
# phi_length = 5
# dim_state = (1, phi_length, 3+2)
# range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
# range_state = [[range_state_slice]*phi_length]
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 0, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 2
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgentNN(
env_model=env_model, num_sim=num_sim,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
'''
Python classes for Leopard Imaging LI-USB30-M021 on Linux
Copyright (C) 2016 Simon D. Levy
This file is part of M021_V4L2.
M021_V4L2 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
BreezySTM32 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with M021_V4L2. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import libm021v4l2 as lib
# XXX This is kind of a sleazy implementation, partly in Python, partly in C extension.
# XXX We probably should do the whole thing in C.
class _Capture(object):
'''
Abstract parent class; do not instantiate directly.
'''
def __init__(self, rows, cols, bcorrect, gcorrect, rcorrect):
self.frame = np.zeros((rows,cols,3), dtype='uint8')
lib.init(self.frame, bcorrect, gcorrect, rcorrect)
def read(self):
'''
Reads one frame of data from the camera. Returns a pair (success,frame), where success is True or
False, frame is an RxCx3 NumPy array of color image bytes.
'''
lib.acquire(self.frame)
return True, self.frame
def getCount(self):
'''
Returns the number of frames acquired since init.
'''
return lib.count()
class Capture1280x720(_Capture):
'''
A class for capturing 1280x720 color images at 60 frames per second.
Optional bcorrect, gcorrect, rcorrect values specify color-correction for
red, green, and blue components, respectively.
'''
def __init__(self, bcorrect=50, gcorrect=0, rcorrect=50):
_Capture.__init__(self, 720, 1280, bcorrect, gcorrect, rcorrect)
class Capture800x460(_Capture):
'''
A class for capturing 800x460 color images at 90 frames per second
Optional bcorrect, gcorrect, rcorrect values specify color-correction for
red, green, and blue components, respectively.
'''
def __init__(self, bcorrect=50, gcorrect=0, rcorrect=50):
_Capture.__init__(self, 460, 800, bcorrect, gcorrect, rcorrect)
class Capture640x480(_Capture):
'''
A class for capturing 640x480 color images at 30 frames per second
Optional bcorrect, gcorrect, rcorrect values specify color-correction for
red, green, and blue components, respectively.
'''
def __init__(self, bcorrect=50, gcorrect=0, rcorrect=50):
_Capture.__init__(self, 480, 640, bcorrect, gcorrect, rcorrect)
class Capture1600x1200(_Capture):
'''
An experimental class for capturing 1600x1200 color images at 30 frames per
second with the LI-C570 camera. Optional bcorrect, gcorrect, rcorrect values
specify color-correction for red, green, and blue components, respectively.
'''
def __init__(self, bcorrect=50, gcorrect=0, rcorrect=50):
_Capture.__init__(self, 1200, 1600, bcorrect, gcorrect, rcorrect)
|
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _servomotor
def _swig_setattr(self,class_type,name,value):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
self.__dict__[name] = value
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class PhidgetServoMotor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PhidgetServoMotor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PhidgetServoMotor, name)
def __repr__(self):
return "<C PhidgetServoMotor instance at %s>" % (self.this,)
__swig_setmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_set
__swig_getmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_get
if _newclass:min_pulse = property(_servomotor.PhidgetServoMotor_min_pulse_get, _servomotor.PhidgetServoMotor_min_pulse_set)
__swig_setmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_set
__swig_getmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_get
if _newclass:max_pulse = property(_servomotor.PhidgetServoMotor_max_pulse_get, _servomotor.PhidgetServoMotor_max_pulse_set)
__swig_setmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_set
__swig_getmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_get
if _newclass:factor = property(_servomotor.PhidgetServoMotor_factor_get, _servomotor.PhidgetServoMotor_factor_set)
__swig_setmethods__["position"] = _servomotor.PhidgetServoMotor_position_set
__swig_getmethods__["position"] = _servomotor.PhidgetServoMotor_position_get
if _newclass:position = property(_servomotor.PhidgetServoMotor_position_get, _servomotor.PhidgetServoMotor_position_set)
def __init__(self, *args):
_swig_setattr(self, PhidgetServoMotor, 'this', _servomotor.new_PhidgetServoMotor(*args))
_swig_setattr(self, PhidgetServoMotor, 'thisown', 1)
def __del__(self, destroy=_servomotor.delete_PhidgetServoMotor):
try:
if self.thisown: destroy(self)
except: pass
class PhidgetServoMotorPtr(PhidgetServoMotor):
def __init__(self, this):
_swig_setattr(self, PhidgetServoMotor, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, PhidgetServoMotor, 'thisown', 0)
_swig_setattr(self, PhidgetServoMotor,self.__class__,PhidgetServoMotor)
_servomotor.PhidgetServoMotor_swigregister(PhidgetServoMotorPtr)
phidget_reset_PhidgetServoMotor = _servomotor.phidget_reset_PhidgetServoMotor
phidget_servomotor_set_parameters = _servomotor.phidget_servomotor_set_parameters
|
#!/usr/bin/python
'''
Creates text file of Cocoa superclasses in given filename or in
./cocoa_indexes/classes.txt by default.
'''
import os, re
from cocoa_definitions import write_file, find
from commands import getoutput
def find_headers(frameworks):
'''Returns a dictionary of the headers for each given framework.'''
headers_and_frameworks = {}
for framework in frameworks:
headers_and_frameworks[framework] = \
' '.join(find('/System/Library/Frameworks/%s.framework'
% framework, '.h'))
return headers_and_frameworks
def get_classes(header_files_and_frameworks):
'''Returns list of Cocoa Protocols classes & their framework.'''
classes = {}
for framework, files in header_files_and_frameworks:
for line in getoutput(r"grep -ho '@\(interface\|protocol\) [A-Z]\w\+' "
+ files).split("\n"):
cocoa_class = re.search(r'[A-Z]\w+', line)
if cocoa_class and not classes.has_key(cocoa_class.group(0)):
classes[cocoa_class.group(0)] = framework
classes = classes.items()
classes.sort()
return classes
def get_superclasses(classes_and_frameworks):
'''
Given a list of Cocoa classes & their frameworks, returns a list of their
superclasses in the form: "class\|superclass\|superclass\|...".
'''
args = ''
for classname, framework in classes_and_frameworks:
args += classname + ' ' + framework + ' '
return getoutput('./superclasses ' + args).split("\n")
def output_file(fname=None):
'''Output text file of Cocoa classes to given filename.'''
if fname is None:
fname = './cocoa_indexes/classes.txt'
if not os.path.isdir(os.path.dirname(fname)):
os.mkdir(os.path.dirname(fname))
frameworks = ('Foundation', 'AppKit', 'AddressBook', 'CoreData',
'PreferencePanes', 'QTKit', 'ScreenSaver', 'SyncServices',
'WebKit')
headers_and_frameworks = find_headers(frameworks).items()
superclasses = get_superclasses(get_classes(headers_and_frameworks))
write_file(fname, superclasses)
if __name__ == '__main__':
from sys import argv
output_file(argv[1] if len(argv) > 1 else None)
|
"""
Django settings for testproject project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "=t4f&0jd786fl_ri1$7z9)!iblzhv1r7f$9p&z4kol9zej*(q@"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"stdnumfield",
"testapp",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "testproject.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "testproject.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
# fmt: off
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation"
".UserAttributeSimilarityValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".MinimumLengthValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".CommonPasswordValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation"
".NumericPasswordValidator"
),
},
]
# fmt: on
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = "/static/"
|
#!/usr/bin/env python3
# Copyright 2016 Donour Sizemore
#
# This file is part of RacePi
#
# RacePi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2.
#
# RacePi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RacePi. If not, see <http://www.gnu.org/licenses/>.
import os
import gps3.gps3 as gps3
import time
from racepi.sensor.handler.sensor_handler import SensorHandler
GPS_REQUIRED_FIELDS = ['time', 'lat', 'lon', 'speed', 'track', 'epx', 'epy', 'epv', 'alt']
GPS_READ_TIMEOUT = 2.0
class GpsSensorHandler(SensorHandler):
def __init__(self):
SensorHandler.__init__(self, self.__record_from_gps)
def __record_from_gps(self):
# TODO auto retry and reinit on hotplug
if not self.pipe_out:
raise ValueError("Illegal argument, no queue specified")
os.system("taskset -p 0xfe %d" % os.getpid())
os.nice(19);
print("Starting GPS reader")
gps_socket = gps3.GPSDSocket()
data_stream = gps3.DataStream()
gps_socket.connect()
gps_socket.watch()
while not self.doneEvent.is_set():
newdata = gps_socket.next(timeout=GPS_READ_TIMEOUT)
now = time.time()
if newdata:
data_stream.unpack(newdata)
sample = data_stream.TPV
t = sample.get('time')
if t is not None and set(GPS_REQUIRED_FIELDS).issubset(set(sample.keys())):
self.pipe_out.send((now, sample))
print("GPS reader shutdown")
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class FakeResourceRequest(object):
"""A fake of ``nova.scheduler.utils.ResourceRequest``.
Allows us to assert that various properties of a real ResourceRequest
object are set as we'd like them to be.
"""
def __init__(self):
self._rg_by_id = {}
self._group_policy = None
self._limit = 1000
class TestUtilsBase(test.NoDBTestCase):
def setUp(self):
super(TestUtilsBase, self).setUp()
self.context = nova_context.get_admin_context()
self.mock_host_manager = mock.Mock()
def assertResourceRequestsEqual(self, expected, observed):
self.assertEqual(expected._limit, observed._limit)
self.assertEqual(expected._group_policy, observed._group_policy)
ex_by_id = expected._rg_by_id
ob_by_id = observed._rg_by_id
self.assertEqual(set(ex_by_id), set(ob_by_id))
for ident in ex_by_id:
self.assertEqual(vars(ex_by_id[ident]), vars(ob_by_id[ident]))
@ddt.ddt
class TestUtils(TestUtilsBase):
def _test_resources_from_request_spec(self, expected, flavor, image=None):
if image is None:
image = objects.ImageMeta(properties=objects.ImageMetaProps())
fake_spec = objects.RequestSpec(flavor=flavor, image=image)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
return resources
def test_resources_from_request_spec_flavor_only(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_flavor_req_traits(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'trait:CUSTOM_FLAVOR_TRAIT': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(['CUSTOM_FLAVOR_TRAIT'])
)
resources = self._test_resources_from_request_spec(
expected_resources, flavor)
expected_result = set(['CUSTOM_FLAVOR_TRAIT'])
self.assertEqual(expected_result, resources.all_required_traits)
def test_resources_from_request_spec_flavor_and_image_traits(self):
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_IMAGE_TRAIT1': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# trait:CUSTOM_IMAGE_TRAIT2 is defined in both extra_specs and
# image metadata. We get a union of both.
'CUSTOM_IMAGE_TRAIT1',
'CUSTOM_IMAGE_TRAIT2',
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor,
image)
def test_resources_from_request_spec_flavor_forbidden_trait(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_with_no_disk(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=0,
ephemeral_gb=0,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_custom_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 15,
"CUSTOM_TEST_CLASS": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_override_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 99,
"resources:MEMORY_MB": 99,
"resources:DISK_GB": 99})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 99,
"MEMORY_MB": 99,
"DISK_GB": 99,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_remove_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 0,
"resources:DISK_GB": 0})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"MEMORY_MB": 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_vgpu(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=0,
swap=0,
extra_specs={
"resources:VGPU": 1,
"resources:VGPU_DISPLAY_HEAD": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 10,
"VGPU": 1,
"VGPU_DISPLAY_HEAD": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_bad_std_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:DOESNT_EXIST": 0})
fake_spec = objects.RequestSpec(flavor=flavor)
with mock.patch("nova.objects.request_spec.LOG.warning") as mock_log:
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
mock_log.assert_called_once()
args = mock_log.call_args[0]
self.assertEqual(args[0], "Received an invalid ResourceClass "
"'%(key)s' in extra_specs.")
self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
def test_get_resources_from_request_spec_granular(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={'resources1:VGPU': '1',
'resources1:VGPU_DISPLAY_HEAD': '2',
# Replace
'resources3:VCPU': '2',
# Stay separate (don't sum)
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom in the unnumbered group (merge with DISK_GB)
'resources:CUSTOM_THING': '123',
# Traits make it through
'trait3:CUSTOM_SILVER': 'required',
'trait3:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': '0',
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': '0',
'resources:CUSTOM_FOO': '0',
# Bogus values don't make it through
'resources1:MEMORY_MB': 'bogus',
'group_policy': 'none'})
expected_resources = FakeResourceRequest()
expected_resources._group_policy = 'none'
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
'CUSTOM_THING': 123,
}
)
expected_resources._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'VGPU': 1,
'VGPU_DISPLAY_HEAD': 2,
}
)
expected_resources._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'VCPU': 2,
},
required_traits={
'CUSTOM_GOLD',
'CUSTOM_SILVER',
}
)
expected_resources._rg_by_id['24'] = objects.RequestGroup(
requester_id='24',
resources={
'SRIOV_NET_VF': 2,
},
)
expected_resources._rg_by_id['42'] = objects.RequestGroup(
requester_id='42',
resources={
'SRIOV_NET_VF': 1,
}
)
rr = self._test_resources_from_request_spec(expected_resources, flavor)
expected_querystring = (
'group_policy=none&'
'limit=1000&'
'required3=CUSTOM_GOLD%2CCUSTOM_SILVER&'
'resources=CUSTOM_THING%3A123%2CDISK_GB%3A10&'
'resources1=VGPU%3A1%2CVGPU_DISPLAY_HEAD%3A2&'
'resources24=SRIOV_NET_VF%3A2&'
'resources3=VCPU%3A2&'
'resources42=SRIOV_NET_VF%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_all_required_traits(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:HW_CPU_X86_SSE': 'required',
'trait:HW_CPU_X86_AVX': 'required',
'trait:HW_CPU_X86_AVX2': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'HW_CPU_X86_SSE',
'HW_CPU_X86_AVX'
},
forbidden_traits={
'HW_CPU_X86_AVX2'
}
)
resource = self._test_resources_from_request_spec(expected_resources,
flavor)
expected_result = {'HW_CPU_X86_SSE', 'HW_CPU_X86_AVX'}
self.assertEqual(expected_result,
resource.all_required_traits)
def test_resources_from_request_spec_aggregates(self):
destination = objects.Destination()
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor,
requested_destination=destination)
destination.require_aggregates(['foo', 'bar'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar']],
req.get_request_group(None).aggregates)
destination.require_aggregates(['baz'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar'], ['baz']],
req.get_request_group(None).aggregates)
def test_resources_from_request_spec_no_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination.aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
def test_resources_from_request_spec_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(
flavor=flavor,
requested_destination=objects.Destination(
forbidden_aggregates=set(['foo', 'bar'])))
req = utils.resources_from_request_spec(self.context, reqspec,
self.mock_host_manager)
self.assertEqual(set(['foo', 'bar']),
req.get_request_group(None).forbidden_aggregates)
def test_resources_from_request_spec_no_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination.forbidden_aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
def test_process_extra_specs_granular_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_extra_specs_granular_not_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_missing_extra_specs_value(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_no_force_hosts_or_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rr = self._test_resources_from_request_spec(expected, flavor)
expected_querystring = (
'limit=1000&'
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_process_use_force_nodes(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='test')])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, None, 'test', cell=None)
def test_process_use_force_hosts(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_force_hosts_multinodes_found(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012'),
objects.ComputeNode(host='test',
uuid='87654321-4321-4321-4321-210987654321'),
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
# Validate that the limit is unset
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
# Validate that the limit is unset
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_requested_destination(self):
fake_cell = objects.CellMapping(uuid=uuids.cell1, name='foo')
destination = objects.Destination(
host='fake-host', node='fake-node', cell=fake_cell)
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='fake-node')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, requested_destination=destination)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(
self.context, 'fake-host', 'fake-node', cell=fake_cell)
def test_resources_from_request_spec_having_requested_resources(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
rg1 = objects.RequestGroup(
resources={'CUSTOM_FOO': 1}, requester_id='The-first-group')
# Leave requester_id out to trigger ValueError
rg2 = objects.RequestGroup(required_traits={'CUSTOM_BAR'})
reqspec = objects.RequestSpec(flavor=flavor,
requested_resources=[rg1, rg2])
self.assertRaises(
ValueError,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Set conflicting requester_id
rg2.requester_id = 'The-first-group'
self.assertRaises(
exception.RequestGroupSuffixConflict,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Good path: nonempty non-conflicting requester_id
rg2.requester_id = 'The-second-group'
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertIs(rg1, req.get_request_group('The-first-group'))
self.assertIs(rg2, req.get_request_group('The-second-group'))
# Make sure those ended up as suffixes correctly
qs = req.to_querystring()
self.assertIn('resourcesThe-first-group=CUSTOM_FOO%3A1', qs)
self.assertIn('requiredThe-second-group=CUSTOM_BAR', qs)
def test_resources_from_request_spec_requested_resources_unfilled(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
@ddt.data(
# Test single hint that we are checking for.
{'group': [uuids.fake]},
# Test hint we care about and some other random hint.
{'same_host': [uuids.fake], 'fake-hint': ['fake-value']},
# Test multiple hints we are checking for.
{'same_host': [uuids.server1], 'different_host': [uuids.server2]})
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
"""Tests that there is no limit applied to the
GET /allocation_candidates query string if a given scheduler hint
is in the request spec.
"""
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, scheduler_hints=hints)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, resources.to_querystring())
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 16,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
def test_resources_from_flavor_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 6, # No root disk...
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_resources_from_flavor_with_override(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1024,
extra_specs={
# Replace
'resources:VCPU': '2',
# Sum up
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom
'resources:CUSTOM_THING': '123',
# Ignore
'trait:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': 0,
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': 0,
'resources:CUSTOM_FOO': 0,
'group_policy': 'none'})
instance = objects.Instance()
expected = {
'VCPU': 2,
'DISK_GB': 16,
'CUSTOM_THING': 123,
'SRIOV_NET_VF': 3,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
def test_resource_request_init(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources:VCPU': '2',
'resources:MEMORY_MB': '2048',
'trait:HW_CPU_X86_AVX': 'required',
# Key skipped because no colons
'nocolons': '42',
'trait:CUSTOM_MAGIC': 'required',
'trait:CUSTOM_BRONZE': 'forbidden',
# Resource skipped because invalid resource class name
'resources86:CUTSOM_MISSPELLED': '86',
'resources1:SRIOV_NET_VF': '1',
# Resource skipped because non-int-able value
'resources86:CUSTOM_FOO': 'seven',
# Resource skipped because negative value
'resources86:CUSTOM_NEGATIVE': '-7',
'resources1:IPV4_ADDRESS': '1',
# Trait skipped because unsupported value
'trait86:CUSTOM_GOLD': 'preferred',
'trait1:CUSTOM_PHYSNET_NET1': 'required',
'trait1:CUSTOM_PHYSNET_NET2': 'forbidden',
'resources2:SRIOV_NET_VF': '1',
'resources2:IPV4_ADDRESS': '2',
'trait2:CUSTOM_PHYSNET_NET2': 'required',
'trait2:HW_NIC_ACCEL_SSL': 'required',
# Groupings that don't quite match the patterns are ignored
'resources_*5:SRIOV_NET_VF': '7',
'traitFoo$:HW_NIC_ACCEL_SSL': 'required',
# Solo resource, no corresponding traits
'resources3:DISK_GB': '5',
'group_policy': 'isolate',
})
expected = FakeResourceRequest()
expected._group_policy = 'isolate'
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_AVX',
'CUSTOM_MAGIC',
},
forbidden_traits={
'CUSTOM_BRONZE',
},
)
expected._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 1,
},
required_traits={
'CUSTOM_PHYSNET_NET1',
},
forbidden_traits={
'CUSTOM_PHYSNET_NET2',
},
)
expected._rg_by_id['2'] = objects.RequestGroup(
requester_id='2',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 2,
},
required_traits={
'CUSTOM_PHYSNET_NET2',
'HW_NIC_ACCEL_SSL',
}
)
expected._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'DISK_GB': 5,
}
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
expected_querystring = (
'group_policy=isolate&'
'limit=1000&'
'required=CUSTOM_MAGIC%2CHW_CPU_X86_AVX%2C%21CUSTOM_BRONZE&'
'required1=CUSTOM_PHYSNET_NET1%2C%21CUSTOM_PHYSNET_NET2&'
'required2=CUSTOM_PHYSNET_NET2%2CHW_NIC_ACCEL_SSL&'
'resources=MEMORY_MB%3A2048%2CVCPU%3A2&'
'resources1=IPV4_ADDRESS%3A1%2CSRIOV_NET_VF%3A1&'
'resources2=IPV4_ADDRESS%3A2%2CSRIOV_NET_VF%3A1&'
'resources3=DISK_GB%3A5'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def _test_resource_request_init_with_legacy_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'hw:cpu_policy': 'dedicated',
'hw:cpu_thread_policy': 'isolate',
'hw:emulator_threads_policy': 'isolate',
})
return objects.RequestSpec(flavor=flavor, is_bfv=False)
def test_resource_request_init_with_legacy_extra_specs(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have two PCPUs, one due to hw:cpu_policy and the
# other due to hw:cpu_thread_policy
'PCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
# we should forbid hyperthreading due to hw:cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_extra_specs_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw:cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw:cpu_thread_policy
# because enable_pinning_translate=False
forbidden_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def test_resource_request_init_with_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_TRUSTED': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd'
})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'CUSTOM_TRUSTED',
}
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def _test_resource_request_init_with_legacy_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'hw_cpu_policy': 'dedicated',
'hw_cpu_thread_policy': 'require',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
return objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
def test_resource_request_init_with_legacy_image_props(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a PCPU due to hw_cpu_policy
'PCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# we should require hyperthreading due to hw_cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_image_props_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw_cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw_cpu_thread_policy
# because enable_pinning_translate=False
required_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def _test_resource_request_init_with_mixed_cpus(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'PCPU': 2,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_dedicated(self):
"""Ensure the mixed instance, which is generated through
'hw:cpu_dedicated_mask' extra spec, properly requests the PCPU, VCPU,
MEMORY_MB and DISK_GB resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def test_resource_request_init_with_mixed_cpus_realtime(self):
"""Ensure the mixed instance, which is generated through real-time CPU
interface, properly requests the PCPU, VCPU, MEMORY_BM and DISK_GB
resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def _test_resource_request_init_with_mixed_cpus_iso_emu(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# An extra PCPU resource is requested due to 'ISOLATE' emulator
# thread policy.
'PCPU': 3,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_iso_emu_realtime(self):
"""Ensure the mixed instance, which is generated through the
'hw:cpu_dedicated_mask' extra spec, specs, properly requests the PCPU,
VCPU, MEMORY_MB, DISK_GB resources, ensure an extra PCPU resource is
requested due to a ISOLATE emulator thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_with_mixed_cpus_iso_emu_dedicated(self):
"""Ensure the mixed instance, which is generated through realtime extra
specs, properly requests the PCPU, VCPU, MEMORY_MB, DISK_GB resources,
ensure an extra PCPU resource is requested due to a ISOLATE emulator
thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_is_bfv(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1555)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
# this should only include the ephemeral and swap disk, and the
# latter should be converted from MB to GB and rounded up
'DISK_GB': 7,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=True)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vpmems(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:pmem': '4GB, 4GB,SMALL'})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
'CUSTOM_PMEM_NAMESPACE_4GB': 2,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_1_2(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '1.2', 'hw:tpm_model': 'tpm-tis'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='1.2',
hw_tpm_model='tpm-tis',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_1_2'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_2_0(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '2.0', 'hw:tpm_model': 'tpm-crb'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='2.0',
hw_tpm_model='tpm-crb',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_2_0'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_add_group_inserts_the_group(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg1 = objects.RequestGroup(requester_id='foo',
required_traits={'CUSTOM_FOO'})
req._add_request_group(rg1)
rg2 = objects.RequestGroup(requester_id='bar',
forbidden_traits={'CUSTOM_BAR'})
req._add_request_group(rg2)
self.assertIs(rg1, req.get_request_group('foo'))
self.assertIs(rg2, req.get_request_group('bar'))
def test_empty_groups_forbidden(self):
"""Not allowed to add premade RequestGroup without resources/traits/
aggregates.
"""
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg = objects.RequestGroup(requester_id='foo')
self.assertRaises(ValueError, req._add_request_group, rg)
def test_claim_resources_on_destination_no_source_allocations(self):
"""Tests the negative scenario where the instance does not have
allocations in Placement on the source compute node so no claim is
attempted on the destination compute node.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value={})
@mock.patch.object(reportclient,
'claim_resources',
new_callable=mock.NonCallableMock)
def test(mock_claim, mock_get_allocs):
ex = self.assertRaises(
exception.ConsumerAllocationRetrievalFailed,
utils.claim_resources_on_destination,
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
self.assertIn(
'Expected to find allocations for source node resource '
'provider %s' % source_node.uuid, str(ex))
test()
def test_claim_resources_on_destination_claim_fails(self):
"""Tests the negative scenario where the resource allocation claim
on the destination compute node fails, resulting in an error.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'allocations': {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value=source_res_allocs)
@mock.patch.object(reportclient,
'claim_resources', return_value=False)
def test(mock_claim, mock_get_allocs):
# NOTE(danms): Don't pass source_node_allocations here to test
# that they are fetched if needed.
self.assertRaises(exception.NoValidHost,
utils.claim_resources_on_destination,
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=1)
test()
def test_claim_resources_on_destination(self):
"""Happy path test where everything is successful."""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer')
@mock.patch.object(reportclient,
'claim_resources', return_value=True)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node,
source_res_allocs, consumer_generation=None)
self.assertFalse(mock_get_allocs.called)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=None)
test()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources(self, mock_is_rebuild, mock_client):
"""Tests that when claim_resources() is called, that we appropriately
call the placement client to claim resources for the instance.
"""
mock_is_rebuild.return_value = False
ctx = nova_context.RequestContext(user_id=uuids.user_id)
spec_obj = objects.RequestSpec(project_id=uuids.project_id)
instance_uuid = uuids.instance
alloc_req = mock.sentinel.alloc_req
mock_client.claim_resources.return_value = True
res = utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.user_id, allocation_request_version=None,
consumer_generation=None)
self.assertTrue(res)
# Now do it again but with RequestSpec.user_id set.
spec_obj.user_id = uuids.spec_user_id
mock_client.reset_mock()
utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.spec_user_id, allocation_request_version=None,
consumer_generation=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources_for_policy_check(self, mock_is_rebuild,
mock_client):
mock_is_rebuild.return_value = True
ctx = mock.Mock(user_id=uuids.user_id)
res = utils.claim_resources(ctx, None, mock.sentinel.spec_obj,
mock.sentinel.instance_uuid, [])
self.assertTrue(res)
mock_is_rebuild.assert_called_once_with(mock.sentinel.spec_obj)
self.assertFalse(mock_client.claim_resources.called)
def test_get_weight_multiplier(self):
host_attr = {'vcpus_total': 4, 'vcpus_used': 6,
'cpu_allocation_ratio': 1.0}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': 'invalid'},
)]
# Get value from default given value if the agg meta is invalid.
self.assertEqual(
1.0,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'},
)]
# Get value from aggregate metadata
self.assertEqual(
1.9,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'}),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.8'}),
]
# Get min value from aggregate metadata
self.assertEqual(
1.8,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
def _set_up_and_fill_provider_mapping(self, requested_resources):
request_spec = objects.RequestSpec()
request_spec.requested_resources = requested_resources
allocs = {
uuids.rp_uuid1: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
},
uuids.rp_uuid2: {
'resources': {
'NET_BW_INGR_KILOBIT_PER_SEC': 1,
}
}
}
mappings = {
uuids.port_id1: [uuids.rp_uuid2],
uuids.port_id2: [uuids.rp_uuid1],
}
allocation_req = {'allocations': allocs, 'mappings': mappings}
selection = objects.Selection(
allocation_request=jsonutils.dumps(allocation_req))
# Unmapped initially
for rg in requested_resources:
self.assertEqual([], rg.provider_uuids)
utils.fill_provider_mapping(request_spec, selection)
def test_fill_provider_mapping(self):
rg1 = objects.RequestGroup(requester_id=uuids.port_id1)
rg2 = objects.RequestGroup(requester_id=uuids.port_id2)
self._set_up_and_fill_provider_mapping([rg1, rg2])
# Validate the mappings
self.assertEqual([uuids.rp_uuid2], rg1.provider_uuids)
self.assertEqual([uuids.rp_uuid1], rg2.provider_uuids)
def test_fill_provider_mapping_no_op(self):
# This just proves that having 'mappings' in the allocation request
# doesn't break anything.
self._set_up_and_fill_provider_mapping([])
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation_returns_early(
self, mock_map):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = []
report_client = mock.sentinel.report_client
allocation = mock.sentinel.allocation
utils.fill_provider_mapping_based_on_allocation(
context, report_client, request_spec, allocation)
mock_map.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation(
self, mock_map, mock_report_client):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = [objects.RequestGroup()]
allocation = {
uuids.rp_uuid: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
}
traits = ['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL']
mock_report_client.get_provider_traits.return_value = report.TraitInfo(
traits=['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL'],
generation=0)
utils.fill_provider_mapping_based_on_allocation(
context, mock_report_client, request_spec, allocation)
mock_map.assert_called_once_with(allocation, {uuids.rp_uuid: traits})
class TestEncryptedMemoryTranslation(TestUtilsBase):
flavor_name = 'm1.test'
image_name = 'cirros'
def _get_request_spec(self, extra_specs, image):
flavor = objects.Flavor(name=self.flavor_name,
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs=extra_specs)
# NOTE(aspiers): RequestSpec.flavor is not nullable, but
# RequestSpec.image is.
reqspec = objects.RequestSpec(flavor=flavor)
if image:
reqspec.image = image
return reqspec
def _get_resource_request(self, extra_specs, image):
reqspec = self._get_request_spec(extra_specs, image)
return utils.ResourceRequest(reqspec)
def _get_expected_resource_request(self, mem_encryption_context):
expected_resources = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
if mem_encryption_context:
expected_resources[orc.MEM_ENCRYPTION_CONTEXT] = 1
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources=expected_resources)
return expected
def _test_encrypted_memory_support_not_required(self, extra_specs,
image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(False)
self.assertResourceRequestsEqual(expected, resreq)
def test_encrypted_memory_support_empty_extra_specs(self):
self._test_encrypted_memory_support_not_required(extra_specs={})
def test_encrypted_memory_support_false_extra_spec(self):
for extra_spec in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec})
def test_encrypted_memory_support_empty_image_props(self):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(properties=objects.ImageMetaProps()))
def test_encrypted_memory_support_false_image_prop(self):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_false(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def _test_encrypted_memory_support_conflict(self, extra_spec,
image_prop_in,
image_prop_out):
# NOTE(aspiers): hw_mem_encryption image property is a
# FlexibleBooleanField, so the result should always be coerced
# to a boolean.
self.assertIsInstance(image_prop_out, bool)
image = objects.ImageMeta(
name=self.image_name,
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop_in)
)
reqspec = self._get_request_spec(
extra_specs={'hw:mem_encryption': extra_spec},
image=image)
# Sanity check that our test request spec has an extra_specs
# dict, which is needed in order for there to be a conflict.
self.assertIn('flavor', reqspec)
self.assertIn('extra_specs', reqspec.flavor)
error = (
"Flavor %(flavor_name)s has hw:mem_encryption extra spec "
"explicitly set to %(flavor_val)s, conflicting with "
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
exc = self.assertRaises(
exception.FlavorImageConflict,
utils.ResourceRequest, reqspec
)
error_data = {
'flavor_name': self.flavor_name,
'flavor_val': extra_spec,
'image_name': self.image_name,
'image_val': image_prop_out,
}
self.assertEqual(error % error_data, str(exc))
def test_encrypted_memory_support_conflict1(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop_in in ('1', 'true', 'True'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, True
)
def test_encrypted_memory_support_conflict2(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop_in in ('0', 'false', 'False'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, False
)
@mock.patch.object(utils, 'LOG')
def _test_encrypted_memory_support_required(self, requesters, extra_specs,
mock_log, image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(True)
self.assertResourceRequestsEqual(expected, resreq)
mock_log.debug.assert_has_calls([
mock.call('Added %s=1 to requested resources',
orc.MEM_ENCRYPTION_CONTEXT)
])
def test_encrypted_memory_support_extra_spec(self):
for extra_spec in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi'))
)
def test_encrypted_memory_support_image_prop(self):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw_mem_encryption image property',
{},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_required(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec and '
'hw_mem_encryption image property',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
class TestResourcesFromRequestGroupDefaultPolicy(test.NoDBTestCase):
"""These test cases assert what happens when the group policy is missing
from the flavor but more than one numbered request group is requested from
various sources. Note that while image can provide required traits for the
resource request those traits are always added to the unnumbered group so
image cannot be a source of additional numbered groups.
"""
def setUp(self):
super(TestResourcesFromRequestGroupDefaultPolicy, self).setUp()
self.context = nova_context.get_admin_context()
self.port_group1 = objects.RequestGroup.from_port_request(
self.context, uuids.port1,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
})
self.port_group2 = objects.RequestGroup.from_port_request(
self.context, uuids.port2,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 2000,
"NET_BW_EGR_KILOBIT_PER_SEC": 2000},
"required": ["CUSTOM_PHYSNET_3",
"CUSTOM_VNIC_TYPE_DIRECT"]
})
self.image = objects.ImageMeta(properties=objects.ImageMetaProps())
def test_one_group_from_flavor_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_one_group_from_port_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_flavor_only_warns(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
'resources2:CUSTOM_FOO': '1'
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy', rr.to_querystring())
def test_one_group_from_flavor_one_from_port_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_ports_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1, self.port_group2])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities used across this package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
# Skip all operations that are backprop related or export summaries.
SKIPPED_PREFIXES = (
'gradients/', 'RMSProp/', 'Adagrad/', 'Const_', 'HistogramSummary',
'ScalarSummary')
# Valid activation ops for quantization end points.
_ACTIVATION_OP_SUFFIXES = ['/Relu6', '/Relu', '/Identity']
# Regular expression for recognizing nodes that are part of batch norm group.
_BATCHNORM_RE = re.compile(r'^(.*)/BatchNorm/batchnorm')
def BatchNormGroups(graph):
"""Finds batch norm layers, returns their prefixes as a list of strings.
Args:
graph: Graph to inspect.
Returns:
List of strings, prefixes of batch norm group names found.
"""
bns = []
for op in graph.get_operations():
match = _BATCHNORM_RE.search(op.name)
if match:
bn = match.group(1)
if not bn.startswith(SKIPPED_PREFIXES):
bns.append(bn)
# Filter out duplicates.
return list(collections.OrderedDict.fromkeys(bns))
def GetEndpointActivationOp(graph, prefix):
"""Returns an Operation with the given prefix and a valid end point suffix.
Args:
graph: Graph where to look for the operation.
prefix: String, prefix of Operation to return.
Returns:
The Operation with the given prefix and a valid end point suffix or None if
there are no matching operations in the graph for any valid suffix
"""
for suffix in _ACTIVATION_OP_SUFFIXES:
activation = _GetOperationByNameDontThrow(graph, prefix + suffix)
if activation:
return activation
return None
def _GetOperationByNameDontThrow(graph, name):
"""Returns an Operation with the given name.
Args:
graph: Graph where to look for the operation.
name: String, name of Operation to return.
Returns:
The Operation with the given name. None if the name does not correspond to
any operation in the graph
"""
try:
return graph.get_operation_by_name(name)
except KeyError:
return None
def CreateOrGetQuantizationStep():
"""Returns a Tensor of the number of steps the quantized graph has run.
Returns:
Quantization step Tensor.
"""
quantization_step_name = 'fake_quantization_step'
quantization_step_tensor_name = quantization_step_name + '/AssignAdd:0'
g = ops.get_default_graph()
try:
return g.get_tensor_by_name(quantization_step_tensor_name)
except KeyError:
# Create in proper graph and base name_scope.
with g.name_scope(None):
quantization_step_tensor = variable_scope.get_variable(
quantization_step_name,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
with g.name_scope(quantization_step_tensor.op.name + '/'):
# We return the incremented variable tensor. Since this is used in conds
# for quant_delay and freeze_bn_delay, it will run once per graph
# execution.
return state_ops.assign_add(quantization_step_tensor, 1)
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiDCEnvironment(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiDCEnvironment, self).__init__(
networkapi_url,
user,
password,
user_ldap
)
def get(self, ids=None, **kwargs):
"""
Method to get dc environments by their ids or list all.
:param ids: List containing identifiers of environments
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing environments
"""
url = build_uri_with_ids('api/v3/environment/dc/', ids)
return super(ApiDCEnvironment, self).get(self.prepare_url(url, kwargs))
def search(self, **kwargs):
"""
Method to search environments based on extends search.
:param search: Dict containing QuerySets to find environments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing environments
"""
return super(ApiDCEnvironment, self).get(self.prepare_url('api/v3/environment/dc/', kwargs))
def create(self, environments):
"""
Method to create environments
:param environments: Dict containing environments desired to be created on database
:return: None
"""
data = dict(dc=environments)
return super(ApiDCEnvironment, self).post('api/v3/environment/dc/', data)
|
# -*- coding: utf-8 -*-
#
# Hermes documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath(os.path.join("..", "python")))
sys.path.append(os.path.abspath('exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'math_dollar', 'youtube', 'popup', 'sourcecode', 'latexcode']
latex_preamble = r"""
\usepackage{dsfont}
\usepackage{braket}
\usepackage{slashed}
\usepackage{etoolbox}
\pagestyle{fancy}
\usepackage{color}
\usepackage{float}
\usepackage{bm}
\let\origfigure=\figure
\renewenvironment{figure}[6]{
\origfigure[H]}
{\endlist}
\def\degrees{^\circ}
\def\d{{\rm d}}
\pagenumbering{arabic}
\def\L{{\mathcal L}}
\def\H{{\mathcal H}}
\def\M{{\mathcal M}}
\def\matrix{}
\def\fslash#1{#1 \!\!\!/}
\def\F{{\bf F}}
\def\R{{\bf R}}
\def\J{{\bf J}}
\def\x{{\bf x}}
\def\y{{\bf y}}
\def\h{{\rm h}}
\def\a{{\rm a}}
\newcommand{\bfx}{\mbox{\boldmath $x$}}
\newcommand{\bfy}{\mbox{\boldmath $y$}}
\newcommand{\bfz}{\mbox{\boldmath $z$}}
\newcommand{\bfv}{\mbox{\boldmath $v$}}
\newcommand{\bfu}{\mbox{\boldmath $u$}}
\newcommand{\bfF}{\mbox{\boldmath $F$}}
\newcommand{\bfJ}{\mbox{\boldmath $J$}}
\newcommand{\bfU}{\mbox{\boldmath $U$}}
\newcommand{\bfY}{\mbox{\boldmath $Y$}}
\newcommand{\bfR}{\mbox{\boldmath $R$}}
\newcommand{\bfg}{\mbox{\boldmath $g$}}
\newcommand{\bfc}{\mbox{\boldmath $c$}}
\newcommand{\bfxi}{\mbox{\boldmath $\xi$}}
\newcommand{\bfw}{\mbox{\boldmath $w$}}
\newcommand{\bfE}{\mbox{\boldmath $E$}}
\newcommand{\bfS}{\mbox{\boldmath $S$}}
\newcommand{\bfb}{\mbox{\boldmath $b$}}
\newcommand{\bfH}{\mbox{\boldmath $H$}}
\def\Hcurl{{\bfH({\rm curl})}}
\def\Hdiv{{\bfH({\rm div})}}
\newcommand{\dd}[2]{\frac{\partial #1}{\partial #2}}
\newcommand{\dx}{\;\mbox{d}\bfx}
%\def\back{\!\!\!\!\!\!\!\!\!\!}
\def\PY{}
\def\PYZcb{}
\def\PYZob{}
\def\PYZus{}
\def\PYZbs{}
\def\PYZpc{}
\def\PYZti{}
\def\PYZsh{}
\def\PYZhy{-}
\def\back{}
\def\col#1#2{\left(\matrix{#1#2}\right)}
\def\row#1#2{\left(\matrix{#1#2}\right)}
\def\mat#1{\begin{pmatrix}#1\end{pmatrix}}
\def\matd#1#2{\left(\matrix{#1\back0\cr0\back#2}\right)}
\def\p#1#2{{\partial#1\over\partial#2}}
\def\cg#1#2#3#4#5#6{({#1},\,{#2},\,{#3},\,{#4}\,|\,{#5},\,{#6})}
\def\half{{\textstyle{1\over2}}}
\def\jsym#1#2#3#4#5#6{\left\{\matrix{
{#1}{#2}{#3}
{#4}{#5}{#6}
}\right\}}
\def\diag{\hbox{diag}}
\font\dsrom=dsrom10
\def\one{\hbox{\dsrom 1}}
\def\res{\mathop{\mathrm{Res}}}
\def\mathnot#1{\text{"$#1$"}}
%See Character Table for cmmib10:
%http://www.math.union.edu/~dpvc/jsmath/download/extra-fonts/cmmib10/cmmib10.html
\font\mib=cmmib10
\def\balpha{\hbox{\mib\char"0B}}
\def\bbeta{\hbox{\mib\char"0C}}
\def\bgamma{\hbox{\mib\char"0D}}
\def\bdelta{\hbox{\mib\char"0E}}
\def\bepsilon{\hbox{\mib\char"0F}}
\def\bzeta{\hbox{\mib\char"10}}
\def\boldeta{\hbox{\mib\char"11}}
\def\btheta{\hbox{\mib\char"12}}
\def\biota{\hbox{\mib\char"13}}
\def\bkappa{\hbox{\mib\char"14}}
\def\blambda{\hbox{\mib\char"15}}
\def\bmu{\hbox{\mib\char"16}}
\def\bnu{\hbox{\mib\char"17}}
\def\bxi{\hbox{\mib\char"18}}
\def\bpi{\hbox{\mib\char"19}}
\def\brho{\hbox{\mib\char"1A}}
\def\bsigma{\hbox{\mib\char"1B}}
\def\btau{\hbox{\mib\char"1C}}
\def\bupsilon{\hbox{\mib\char"1D}}
\def\bphi{\hbox{\mib\char"1E}}
\def\bchi{\hbox{\mib\char"1F}}
\def\bpsi{\hbox{\mib\char"20}}
\def\bomega{\hbox{\mib\char"21}}
\def\bvarepsilon{\hbox{\mib\char"22}}
\def\bvartheta{\hbox{\mib\char"23}}
\def\bvarpi{\hbox{\mib\char"24}}
\def\bvarrho{\hbox{\mib\char"25}}
\def\bvarphi{\hbox{\mib\char"27}}
%how to use:
%$$\alpha\balpha$$
%$$\beta\bbeta$$
%$$\gamma\bgamma$$
%$$\delta\bdelta$$
%$$\epsilon\bepsilon$$
%$$\zeta\bzeta$$
%$$\eta\boldeta$$
%$$\theta\btheta$$
%$$\iota\biota$$
%$$\kappa\bkappa$$
%$$\lambda\blambda$$
%$$\mu\bmu$$
%$$\nu\bnu$$
%$$\xi\bxi$$
%$$\pi\bpi$$
%$$\rho\brho$$
%$$\sigma\bsigma$$
%$$\tau\btau$$
%$$\upsilon\bupsilon$$
%$$\phi\bphi$$
%$$\chi\bchi$$
%$$\psi\bpsi$$
%$$\omega\bomega$$
%
%$$\varepsilon\bvarepsilon$$
%$$\vartheta\bvartheta$$
%$$\varpi\bvarpi$$
%$$\varrho\bvarrho$$
%$$\varphi\bvarphi$$
%small font
\font\mibsmall=cmmib7
\def\bsigmasmall{\hbox{\mibsmall\char"1B}}
\def\Tr{\hbox{Tr}\,}
\def\Arg{\hbox{Arg}}
\def\atan{\hbox{atan}}
"""
pngmath_latex_preamble = latex_preamble
latex_elements = {"preamble": latex_preamble}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hermes'
copyright = u'2009-2013, hp-FEM group'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.1'
# The full version, including alpha/beta/rc tags.
release = '3.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
highlight_language = 'c++'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'bodyfont': 'verdana, helvetica, arial, sans-serif',
'bgcolor': '#FFFFFF',
'textcolor': '#000000',
'linkcolor': '#3D5C7A',
'rightsidebar': False,
'sidebarbgcolor': '#F8F8F8',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#3D5C7A',
'headfont': '"trebuchet ms", verdana, helvetica, arial, sans-serif',
'headbgcolor': '#FFFFFF',
'headtextcolor': '#7590AE',
'headlinkcolor': '#3D5C7A',
'codebgcolor': '#F5F5F5',
'codetextcolor': '#000000',
'relbarbgcolor': '#1553ef',
'relbartextcolor': '#000000',
'relbarlinkcolor': '#FFFFFF',
'footerbgcolor': '#FFFFFF',
'footertextcolor': '#000000'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Hermes Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Content'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'HermesDoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hermes.tex', u'Hermes Documentation',
u'hp-FEM group', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
from common import *
import re
def editableColourStr(array):
return '#%02X%02X%02X (%d)' % tuple(array)
NICE_STR_RE = re.compile('^#([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})\s*(?:\((\d+)\))?$')
def colourFromNiceStr(thing):
match = NICE_STR_RE.match(thing)
try:
if match:
r,g,b,a = match.groups()
return (int(r,16), int(g,16), int(b,16), int(a) if a != None else 255)
except:
pass
return None
class KPWorldTableModel(QtCore.QAbstractTableModel):
FIELDS = ('Name', 'World ID', 'Track ID',
'FS Text 1', 'FS Text 2',
'FS Hint 1', 'FS Hint 2',
'HUD Text 1', 'HUD Text 2',
'HUD Hue', 'HUD Saturation', 'HUD Lightness',
'Title Level')
def __init__(self, kpmap, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent)
self.currentMap = kpmap
self.worlds = kpmap.worlds
def columnCount(self, parent):
return len(self.FIELDS)
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return self.FIELDS[section]
else:
if role == Qt.DisplayRole:
return str(self.worlds[section].uniqueKey)
return QtCore.QVariant()
def rowCount(self, parent):
if parent.isValid():
return 0
else:
return len(self.worlds)
def data(self, index, role):
if index.isValid():
entry = self.worlds[index.row()]
col = index.column()
if role == Qt.DisplayRole or role == Qt.EditRole:
if col == 0:
return entry.name
elif col == 1:
return entry.worldID
elif col == 2:
return entry.musicTrackID
elif col == 3 or col == 4:
return editableColourStr(entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return editableColourStr(entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return editableColourStr(entry.hudTextColours[col - 7])
elif col >= 9 and col <= 11:
return entry.hudHintTransform[col - 9]
elif col == 12:
return entry.titleScreenID
if role == Qt.DecorationRole:
if col == 3 or col == 4:
return QtGui.QColor(*entry.fsTextColours[col - 3])
elif col == 5 or col == 6:
return QtGui.QColor(*entry.fsHintColours[col - 5])
elif col == 7 or col == 8:
return QtGui.QColor(*entry.hudTextColours[col - 7])
return QtCore.QVariant()
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
def setData(self, index, value, role):
if index.isValid():
if role == Qt.EditRole:
success = False
entry = self.worlds[index.row()]
col = index.column()
if col == 0:
entry.name = str(value.toString())
success = True
elif col == 1:
entry.worldID = str(value.toString())
success = True
elif col == 2:
v,ok = value.toInt()
if ok:
entry.musicTrackID = v
success = True
elif col >= 3 and col <= 8:
newCol = colourFromNiceStr(str(value.toString()))
if newCol:
success = True
if col == 3:
entry.fsTextColours = (newCol, entry.fsTextColours[1])
elif col == 4:
entry.fsTextColours = (entry.fsTextColours[0], newCol)
elif col == 5:
entry.fsHintColours = (newCol, entry.fsHintColours[1])
elif col == 6:
entry.fsHintColours = (entry.fsHintColours[0], newCol)
elif col == 7:
entry.hudTextColours = (newCol, entry.hudTextColours[1])
elif col == 8:
entry.hudTextColours = (entry.hudTextColours[0], newCol)
elif col >= 9 and col <= 11:
v,ok = value.toInt()
if ok:
new = list(entry.hudHintTransform)
new[col - 9] = v
entry.hudHintTransform = new
success = True
elif col == 12:
entry.titleScreenID = str(value.toString())
success = True
if success:
self.dataChanged.emit(index, index)
return success
return False
def addEntryToEnd(self):
self.beginInsertRows(QtCore.QModelIndex(), len(self.worlds), len(self.worlds))
entry = KPWorldDef()
entry.uniqueKey = self.currentMap.allocateWorldDefKey()
self.worlds.append(entry)
self.endInsertRows()
def removeRows(self, row, count, parent):
if not parent.isValid():
if row >= 0 and (row + count) <= len(self.worlds):
self.beginRemoveRows(parent, row, row+count-1)
for i in xrange(count):
del self.worlds[row]
self.endRemoveRows()
class KPWorldEditor(QtGui.QWidget):
def __init__(self, kpmap, parent=None):
QtGui.QWidget.__init__(self, parent, Qt.Window)
self.setWindowTitle('World Editor')
self.dataView = QtGui.QTableView(self)
self.addButton = QtGui.QPushButton('Add', self)
self.removeButton = QtGui.QPushButton('Remove', self)
layout = QtGui.QGridLayout(self)
layout.addWidget(self.dataView, 0, 0, 1, 2)
layout.addWidget(self.addButton, 1, 0, 1, 1)
layout.addWidget(self.removeButton, 1, 1, 1, 1)
self.model = KPWorldTableModel(kpmap, self)
self.dataView.setModel(self.model)
self.addButton.clicked.connect(self.model.addEntryToEnd)
self.removeButton.clicked.connect(self.removeCurrentEntry)
def removeCurrentEntry(self):
what = self.dataView.selectionModel().currentIndex()
if what.isValid():
what = what.row()
key = self.model.worlds[what].uniqueKey
self.model.removeRows(what, 1, QtCore.QModelIndex())
|
# Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict as OD
from itertools import chain
try:
# Python 3 abstract base classes.
import collections.abc as abc
except ImportError:
import collections as abc
from evergreen_config_generator.functions import (
bootstrap, func, run_tests, s3_put)
from evergreen_config_generator.tasks import (
both_or_neither, FuncTask, MatrixTask, NamedTask, prohibit, require, Task)
from evergreen_config_lib import shell_mongoc
class CompileTask(NamedTask):
def __init__(self, task_name, tags=None, config='debug',
compression='default', continue_on_err=False,
extra_commands=None, depends_on=None, **kwargs):
super(CompileTask, self).__init__(task_name=task_name,
depends_on=depends_on,
tags=tags,
**kwargs)
self.extra_commands = extra_commands or []
# Environment variables for .evergreen/compile.sh.
self.compile_sh_opt = kwargs
if config == 'debug':
self.compile_sh_opt['DEBUG'] = 'ON'
else:
assert config == 'release'
self.compile_sh_opt['RELEASE'] = 'ON'
if compression != 'default':
self.compile_sh_opt['SNAPPY'] = (
'ON' if compression in ('all', 'snappy') else 'OFF')
self.compile_sh_opt['ZLIB'] = (
'BUNDLED' if compression in ('all', 'zlib') else 'OFF')
self.continue_on_err = continue_on_err
def to_dict(self):
task = super(CompileTask, self).to_dict()
script = ''
for opt, value in sorted(self.compile_sh_opt.items()):
script += 'export %s="%s"\n' % (opt, value)
script += "CC='${CC}' MARCH='${MARCH}' sh .evergreen/compile.sh"
task['commands'].append(shell_mongoc(script))
task['commands'].append(func('upload build'))
task['commands'].extend(self.extra_commands)
return task
class SpecialTask(CompileTask):
def __init__(self, *args, **kwargs):
super(SpecialTask, self).__init__(*args, **kwargs)
self.add_tags('special')
class LinkTask(NamedTask):
def __init__(self, task_name, extra_commands, orchestration=True, **kwargs):
if orchestration == 'ssl':
bootstrap_commands = [bootstrap(SSL=1)]
elif orchestration:
bootstrap_commands = [bootstrap()]
else:
bootstrap_commands = []
super(LinkTask, self).__init__(
task_name=task_name,
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=bootstrap_commands + extra_commands,
**kwargs)
all_tasks = [
NamedTask('check-public-headers',
commands=[shell_mongoc('sh ./.evergreen/check-public-headers.sh')]),
FuncTask('make-release-archive',
'release archive', 'upload docs', 'upload man pages',
'upload release', 'upload build'),
CompileTask('hardened-compile',
tags=['hardened'],
compression=None,
CFLAGS='-fno-strict-overflow -D_FORTIFY_SOURCE=2 -fstack-protector-all -fPIE -O',
LDFLAGS='-pie -Wl,-z,relro -Wl,-z,now'),
FuncTask('abi-compliance-check', 'abi report'),
CompileTask('debug-compile-compression-zlib',
tags=['zlib', 'compression'],
compression='zlib'),
CompileTask('debug-compile-compression-snappy',
tags=['snappy', 'compression'],
compression='snappy'),
CompileTask('debug-compile-compression',
tags=['zlib', 'snappy', 'compression'],
compression='all'),
CompileTask('debug-compile-no-align',
tags=['debug-compile'],
compression='zlib',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF"),
CompileTask('debug-compile-nosasl-nossl',
tags=['debug-compile', 'nosasl', 'nossl']),
CompileTask('debug-compile-lto', CFLAGS='-flto'),
CompileTask('debug-compile-lto-thin', CFLAGS='-flto=thin'),
SpecialTask('debug-compile-c11',
tags=['debug-compile', 'c11', 'stdflags'],
CFLAGS='-std=c11 -D_XOPEN_SOURCE=600'),
SpecialTask('debug-compile-c99',
tags=['debug-compile', 'c99', 'stdflags'],
CFLAGS='-std=c99 -D_XOPEN_SOURCE=600'),
SpecialTask('debug-compile-c89',
tags=['debug-compile', 'c89', 'stdflags'],
CFLAGS='-std=c89 -D_POSIX_C_SOURCE=200112L -pedantic'),
SpecialTask('debug-compile-valgrind',
tags=['debug-compile', 'valgrind'],
SASL='OFF',
SSL='OPENSSL',
VALGRIND='ON',
CFLAGS='-DBSON_MEMCHECK'),
SpecialTask('debug-compile-coverage',
tags=['debug-compile', 'coverage'],
COVERAGE='ON',
extra_commands=[func('upload coverage')]),
CompileTask('debug-compile-no-counters',
tags=['debug-compile', 'no-counters'],
ENABLE_SHM_COUNTERS='OFF'),
SpecialTask('debug-compile-asan-clang',
tags=['debug-compile', 'asan-clang'],
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=address -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS='-DENABLE_EXTRA_ALIGNMENT=OFF',
PATH='/usr/lib/llvm-3.8/bin:$PATH'),
# include -pthread in CFLAGS on gcc to address the issue explained here:
# https://groups.google.com/forum/#!topic/address-sanitizer/JxnwgrWOLuc
SpecialTask('debug-compile-asan-gcc',
compression='zlib',
CFLAGS='-fsanitize=address -pthread',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF"),
SpecialTask('debug-compile-asan-clang-openssl',
tags=['debug-compile', 'asan-clang'],
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=address -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF",
PATH='/usr/lib/llvm-3.8/bin:$PATH',
SSL='OPENSSL'),
SpecialTask('debug-compile-ubsan',
compression='zlib',
CC='clang-3.8',
CFLAGS='-fsanitize=undefined -fno-omit-frame-pointer'
' -DBSON_MEMCHECK',
CHECK_LOG='ON',
EXTRA_CONFIGURE_FLAGS="-DENABLE_EXTRA_ALIGNMENT=OFF",
PATH='/usr/lib/llvm-3.8/bin:$PATH'),
SpecialTask('debug-compile-scan-build',
tags=['clang', 'debug-compile', 'scan-build'],
continue_on_err=True,
ANALYZE='ON',
CC='clang',
extra_commands=[
func('upload scan artifacts'),
shell_mongoc('''
if find scan -name \*.html | grep -q html; then
exit 123
fi''')]),
CompileTask('compile-tracing',
TRACING='ON'),
CompileTask('release-compile',
config='release',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')])),
CompileTask('debug-compile-nosasl-openssl',
tags=['debug-compile', 'nosasl', 'openssl'],
SSL='OPENSSL'),
CompileTask('debug-compile-nosasl-darwinssl',
tags=['debug-compile', 'nosasl', 'darwinssl'],
SSL='DARWIN'),
CompileTask('debug-compile-nosasl-winssl',
tags=['debug-compile', 'nosasl', 'winssl'],
SSL='WINDOWS'),
CompileTask('debug-compile-sasl-nossl',
tags=['debug-compile', 'sasl', 'nossl'],
SASL='AUTO',
SSL='OFF'),
CompileTask('debug-compile-sasl-openssl',
tags=['debug-compile', 'sasl', 'openssl'],
SASL='AUTO',
SSL='OPENSSL'),
CompileTask('debug-compile-sasl-darwinssl',
tags=['debug-compile', 'sasl', 'darwinssl'],
SASL='AUTO',
SSL='DARWIN'),
CompileTask('debug-compile-sasl-winssl',
tags=['debug-compile', 'sasl', 'winssl'],
SASL='AUTO',
SSL='WINDOWS'),
CompileTask('debug-compile-sspi-nossl',
tags=['debug-compile', 'sspi', 'nossl'],
SASL='SSPI',
SSL='OFF'),
CompileTask('debug-compile-sspi-openssl',
tags=['debug-compile', 'sspi', 'openssl'],
SASL='SSPI',
SSL='OPENSSL'),
CompileTask('debug-compile-rdtscp',
ENABLE_RDTSCP='ON'),
CompileTask('debug-compile-sspi-winssl',
tags=['debug-compile', 'sspi', 'winssl'],
SASL='SSPI',
SSL='WINDOWS'),
CompileTask('debug-compile-nosrv',
tags=['debug-compile'],
SRV='OFF'),
LinkTask('link-with-cmake',
extra_commands=[
func('link sample program', BUILD_SAMPLE_WITH_CMAKE=1)]),
LinkTask('link-with-cmake-ssl',
extra_commands=[
func('link sample program',
BUILD_SAMPLE_WITH_CMAKE=1,
ENABLE_SSL=1)]),
LinkTask('link-with-cmake-snappy',
extra_commands=[
func('link sample program',
BUILD_SAMPLE_WITH_CMAKE=1,
ENABLE_SNAPPY=1)]),
LinkTask('link-with-cmake-mac',
extra_commands=[
func('link sample program', BUILD_SAMPLE_WITH_CMAKE=1)]),
LinkTask('link-with-cmake-windows',
extra_commands=[func('link sample program MSVC')]),
LinkTask('link-with-cmake-windows-ssl',
extra_commands=[func('link sample program MSVC', ENABLE_SSL=1)],
orchestration='ssl'),
LinkTask('link-with-cmake-windows-snappy',
extra_commands=[
func('link sample program MSVC', ENABLE_SNAPPY=1)]),
LinkTask('link-with-cmake-mingw',
extra_commands=[func('link sample program mingw')]),
LinkTask('link-with-pkg-config',
extra_commands=[func('link sample program')]),
LinkTask('link-with-pkg-config-mac',
extra_commands=[func('link sample program')]),
LinkTask('link-with-pkg-config-ssl',
extra_commands=[func('link sample program', ENABLE_SSL=1)]),
LinkTask('link-with-bson',
extra_commands=[func('link sample program bson')],
orchestration=False),
LinkTask('link-with-bson-mac',
extra_commands=[func('link sample program bson')],
orchestration=False),
LinkTask('link-with-bson-windows',
extra_commands=[func('link sample program MSVC bson')],
orchestration=False),
LinkTask('link-with-bson-mingw',
extra_commands=[func('link sample program mingw bson')],
orchestration=False),
NamedTask('debian-package-build',
commands=[
shell_mongoc('export IS_PATCH="${is_patch}"\n'
'sh .evergreen/debian_package_build.sh'),
s3_put(local_file='deb.tar.gz',
remote_file='${branch_name}/mongo-c-driver-debian-packages-${CURRENT_VERSION}.tar.gz',
content_type='${content_type|application/x-gzip}')]),
NamedTask('rpm-package-build',
commands=[
shell_mongoc('sh .evergreen/build_snapshot_rpm.sh'),
s3_put(local_file='rpm.tar.gz',
remote_file='${branch_name}/mongo-c-driver-rpm-packages-${CURRENT_VERSION}.tar.gz',
content_type='${content_type|application/x-gzip}')]),
NamedTask('install-uninstall-check-mingw',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
export CC="C:/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin/gcc.exe"
BSON_ONLY=1 cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd
cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd''')]),
NamedTask('install-uninstall-check-msvc',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
export CC="Visual Studio 14 2015 Win64"
BSON_ONLY=1 cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd
cmd.exe /c .\\.evergreen\\install-uninstall-check-windows.cmd''')]),
NamedTask('install-uninstall-check',
depends_on=OD([('name', 'make-release-archive'),
('variant', 'releng')]),
commands=[shell_mongoc(r'''
DESTDIR="$(pwd)/dest" sh ./.evergreen/install-uninstall-check.sh
BSON_ONLY=1 sh ./.evergreen/install-uninstall-check.sh
sh ./.evergreen/install-uninstall-check.sh''')]),
]
class IntegrationTask(MatrixTask):
axes = OD([('valgrind', ['valgrind', False]),
('asan', ['asan', False]),
('coverage', ['coverage', False]),
('version', ['latest', '4.0', '3.6', '3.4', '3.2', '3.0']),
('topology', ['server', 'replica_set', 'sharded_cluster']),
('auth', [True, False]),
('sasl', ['sasl', 'sspi', False]),
('ssl', ['openssl', 'darwinssl', 'winssl', False])])
def __init__(self, *args, **kwargs):
super(IntegrationTask, self).__init__(*args, **kwargs)
if self.valgrind:
self.add_tags('test-valgrind')
self.options['exec_timeout_secs'] = 7200
elif self.coverage:
self.add_tags('test-coverage')
self.options['exec_timeout_secs'] = 3600
elif self.asan:
self.add_tags('test-asan')
self.options['exec_timeout_secs'] = 3600
else:
self.add_tags(self.topology,
self.version,
self.display('ssl'),
self.display('sasl'),
self.display('auth'))
# E.g., test-latest-server-auth-sasl-ssl needs debug-compile-sasl-ssl.
# Coverage tasks use a build function instead of depending on a task.
if self.valgrind:
self.add_dependency('debug-compile-valgrind')
elif self.asan and self.ssl:
self.add_dependency('debug-compile-asan-clang-%s' % (
self.display('ssl'),))
elif self.asan:
self.add_dependency('debug-compile-asan-clang')
elif not self.coverage:
self.add_dependency('debug-compile-%s-%s' % (
self.display('sasl'), self.display('ssl')))
@property
def name(self):
def name_part(axis_name):
part = self.display(axis_name)
if part == 'replica_set':
return 'replica-set'
elif part == 'sharded_cluster':
return 'sharded'
return part
return self.name_prefix + '-' + '-'.join(
name_part(axis_name) for axis_name in self.axes
if getattr(self, axis_name) or axis_name in ('auth', 'sasl', 'ssl'))
def to_dict(self):
task = super(IntegrationTask, self).to_dict()
commands = task['commands']
if self.depends_on:
commands.append(
func('fetch build', BUILD_NAME=self.depends_on['name']))
if self.coverage:
commands.append(func('debug-compile-coverage-notest-%s-%s' % (
self.display('sasl'), self.display('ssl')
)))
commands.append(bootstrap(VERSION=self.version,
TOPOLOGY=self.topology,
AUTH='auth' if self.auth else 'noauth',
SSL=self.display('ssl')))
commands.append(run_tests(VALGRIND=self.on_off('valgrind'),
ASAN=self.on_off('asan'),
AUTH=self.display('auth'),
SSL=self.display('ssl')))
if self.coverage:
commands.append(func('update codecov.io'))
return task
def _check_allowed(self):
if self.valgrind:
prohibit(self.asan)
prohibit(self.sasl)
require(self.ssl in ('openssl', False))
prohibit(self.coverage)
# Valgrind only with auth+SSL or no auth + no SSL.
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
if self.auth:
require(self.ssl)
if self.sasl == 'sspi':
# Only one self.
require(self.topology == 'server')
require(self.version == 'latest')
require(self.ssl == 'winssl')
require(self.auth)
if not self.ssl:
prohibit(self.sasl)
if self.coverage:
prohibit(self.sasl)
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
if self.asan:
prohibit(self.sasl)
prohibit(self.coverage)
# Address sanitizer only with auth+SSL or no auth + no SSL.
if self.auth:
require(self.ssl == 'openssl')
else:
prohibit(self.ssl)
all_tasks = chain(all_tasks, IntegrationTask.matrix())
class DNSTask(MatrixTask):
axes = OD([('auth', [False, True]),
('ssl', ['openssl', 'winssl', 'darwinssl'])])
name_prefix = 'test-dns'
def __init__(self, *args, **kwargs):
super(DNSTask, self).__init__(*args, **kwargs)
sasl = 'sspi' if self.ssl == 'winssl' else 'sasl'
self.add_dependency('debug-compile-%s-%s' % (sasl, self.display('ssl')))
@property
def name(self):
return self.name_prefix + '-' + '-'.join(
self.display(axis_name) for axis_name in self.axes
if getattr(self, axis_name))
def to_dict(self):
task = super(MatrixTask, self).to_dict()
commands = task['commands']
commands.append(
func('fetch build', BUILD_NAME=self.depends_on['name']))
orchestration = bootstrap(TOPOLOGY='replica_set',
AUTH='auth' if self.auth else 'noauth',
SSL='ssl')
if self.auth:
orchestration['vars']['AUTHSOURCE'] = 'thisDB'
orchestration['vars']['ORCHESTRATION_FILE'] = 'auth-thisdb-ssl'
commands.append(orchestration)
commands.append(run_tests(SSL='ssl',
AUTH=self.display('auth'),
DNS='dns-auth' if self.auth else 'on'))
return task
all_tasks = chain(all_tasks, DNSTask.matrix())
class CompressionTask(MatrixTask):
axes = OD([('compression', ['zlib', 'snappy', 'compression'])])
name_prefix = 'test-latest-server'
def __init__(self, *args, **kwargs):
super(CompressionTask, self).__init__(*args, **kwargs)
self.add_dependency('debug-compile-' + self._compressor_suffix())
self.add_tags('compression', 'latest')
self.add_tags(*self._compressor_list())
@property
def name(self):
return self.name_prefix + '-' + self._compressor_suffix()
def to_dict(self):
task = super(CompressionTask, self).to_dict()
commands = task['commands']
commands.append(func('fetch build', BUILD_NAME=self.depends_on['name']))
if self.compression == 'compression':
orchestration_file = 'snappy-zlib'
else:
orchestration_file = self.compression
commands.append(bootstrap(
AUTH='noauth',
SSL='nossl',
ORCHESTRATION_FILE=orchestration_file))
commands.append(run_tests(
AUTH='noauth',
SSL='nossl',
COMPRESSORS=','.join(self._compressor_list())))
return task
def _compressor_suffix(self):
if self.compression == 'zlib':
return 'compression-zlib'
elif self.compression == 'snappy':
return 'compression-snappy'
else:
return 'compression'
def _compressor_list(self):
if self.compression == 'zlib':
return ['zlib']
elif self.compression == 'snappy':
return ['snappy']
else:
return ['snappy', 'zlib']
all_tasks = chain(all_tasks, CompressionTask.matrix())
class SpecialIntegrationTask(NamedTask):
def __init__(self, task_name, depends_on='debug-compile-sasl-openssl',
extra_commands=None, uri=None,
tags=None, version='latest', topology='server'):
commands = [func('fetch build', BUILD_NAME=depends_on),
bootstrap(VERSION=version, TOPOLOGY=topology),
run_tests(uri)] + (extra_commands or [])
super(SpecialIntegrationTask, self).__init__(task_name,
commands=commands,
depends_on=depends_on,
tags=tags)
all_tasks = chain(all_tasks, [
# Verify that retryWrites=true is ignored with standalone.
SpecialIntegrationTask('retry-true-latest-server',
uri='mongodb://localhost/?retryWrites=true'),
# Verify that retryWrites=true is ignored with old server.
SpecialIntegrationTask('retry-true-3.4-replica-set',
version='3.4',
topology='replica_set'),
SpecialIntegrationTask('test-latest-server-hardened',
'hardened-compile',
tags=['hardened', 'latest']),
])
class AuthTask(MatrixTask):
axes = OD([('sasl', ['sasl', 'sspi', False]),
('ssl', ['openssl', 'darwinssl', 'winssl'])])
name_prefix = 'authentication-tests'
def __init__(self, *args, **kwargs):
super(AuthTask, self).__init__(*args, **kwargs)
self.add_tags('authentication-tests',
self.display('ssl'),
self.display('sasl'))
self.add_dependency('debug-compile-%s-%s' % (
self.display('sasl'), self.display('ssl')))
self.commands.extend([
func('fetch build', BUILD_NAME=self.depends_on['name']),
func('run auth tests')])
@property
def name(self):
rv = self.name_prefix + '-' + self.display('ssl')
if self.sasl:
return rv
else:
return rv + '-nosasl'
def _check_allowed(self):
both_or_neither(self.ssl == 'winssl', self.sasl == 'sspi')
if not self.sasl:
require(self.ssl == 'openssl')
all_tasks = chain(all_tasks, AuthTask.matrix())
class PostCompileTask(NamedTask):
def __init__(self, *args, **kwargs):
super(PostCompileTask, self).__init__(*args, **kwargs)
self.commands.insert(
0, func('fetch build', BUILD_NAME=self.depends_on['name']))
all_tasks = chain(all_tasks, [
PostCompileTask(
'test-valgrind-memcheck-mock-server',
tags=['test-valgrind'],
depends_on='debug-compile-valgrind',
commands=[func('run mock server tests', VALGRIND='on', SSL='ssl')]),
PostCompileTask(
'test-asan-memcheck-mock-server',
tags=['test-asan'],
depends_on='debug-compile-asan-clang',
commands=[func('run mock server tests', ASAN='on', SSL='ssl')]),
# Compile with a function, not a task: gcov files depend on the absolute
# path of the executable, so we can't compile as a separate task.
NamedTask(
'test-coverage-mock-server',
tags=['test-coverage'],
commands=[func('debug-compile-coverage-notest-nosasl-openssl'),
func('run mock server tests', SSL='ssl'),
func('update codecov.io')]),
NamedTask(
'test-coverage-latest-server-dns',
tags=['test-coverage'],
exec_timeout_secs=3600,
commands=[func('debug-compile-coverage-notest-nosasl-openssl'),
bootstrap(TOPOLOGY='replica_set', AUTH='auth', SSL='ssl'),
run_tests(AUTH='auth', SSL='ssl', DNS='on'),
func('update codecov.io')]),
NamedTask(
'authentication-tests-memcheck',
tags=['authentication-tests', 'valgrind'],
exec_timeout_seconds=3600,
commands=[
shell_mongoc("""
VALGRIND=ON DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=AUTO \
SSL=OPENSSL CFLAGS='-DBSON_MEMCHECK' sh .evergreen/compile.sh
"""),
func('run auth tests', valgrind='true')]),
])
class SSLTask(Task):
def __init__(self, version, patch, cflags=None, fips=False, **kwargs):
full_version = version + patch + ('-fips' if fips else '')
script = ''
if cflags:
script += 'export CFLAGS=%s\n' % (cflags,)
script += "DEBUG=ON CC='${CC}' MARCH='${MARCH}' SASL=OFF"
if 'libressl' in version:
script += " SSL=LIBRESSL"
else:
script += " SSL=OPENSSL"
if fips:
script += " OPENSSL_FIPS=1"
script += " sh .evergreen/compile.sh"
super(SSLTask, self).__init__(commands=[
func('install ssl', SSL=full_version),
shell_mongoc(script),
func('run auth tests', **kwargs),
func('upload build')])
self.version = version
self.fips = fips
@property
def name(self):
s = 'build-and-run-authentication-tests-' + self.version
if self.fips:
return s + '-fips'
return s
all_tasks = chain(all_tasks, [
SSLTask('openssl-0.9.8', 'zh', obsolete_tls=True),
SSLTask('openssl-1.0.0', 't', obsolete_tls=True),
SSLTask('openssl-1.0.1', 'u', cflags='-Wno-redundant-decls'),
SSLTask('openssl-1.0.1', 'u', cflags='-Wno-redundant-decls', fips=True),
SSLTask('openssl-1.0.2', 'l'),
SSLTask('openssl-1.1.0', 'f'),
SSLTask('libressl-2.5', '.2', require_tls12=True),
NamedTask('compile-libmongocapi',
commands=[shell_mongoc(r'''
. ./.evergreen/find-cmake.sh
${setup_android_toolchain|}
export ${libmongocapi_compile_env|}
mkdir cmake-build-libmongocapi
$CMAKE \
-DCMAKE_INSTALL_PREFIX=cmake-build-libmongocapi \
-DENABLE_SNAPPY=OFF \
-DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF \
-DENABLE_ZLIB=OFF -DENABLE_SSL=OFF \
-DENABLE_SASL=OFF \
-DENABLE_TESTS=OFF \
-DENABLE_SRV=OFF \
-DENABLE_EXAMPLES=OFF \
-DENABLE_STATIC=OFF \
-DENABLE_SHM_COUNTERS=OFF \
${libmongocapi_cmake_flags}
make install VERBOSE=1''')]),
])
class IPTask(MatrixTask):
axes = OD([('client', ['ipv6', 'ipv4', 'localhost']),
('server', ['ipv6', 'ipv4'])])
name_prefix = 'test-latest'
def __init__(self, *args, **kwargs):
super(IPTask, self).__init__(*args, **kwargs)
self.add_tags('nossl', 'nosasl', 'server', 'ipv4-ipv6', 'latest')
self.add_dependency('debug-compile-nosasl-nossl')
self.commands.extend([
func('fetch build', BUILD_NAME=self.depends_on['name']),
bootstrap(IPV4_ONLY=self.on_off(server='ipv4')),
run_tests(IPV4_ONLY=self.on_off(server='ipv4'),
URI={'ipv6': 'mongodb://[::1]/',
'ipv4': 'mongodb://127.0.0.1/',
'localhost': 'mongodb://localhost/'}[self.client])])
def display(self, axis_name):
return axis_name + '-' + getattr(self, axis_name)
@property
def name(self):
return '-'.join([
self.name_prefix, self.display('server'), self.display('client'),
'noauth', 'nosasl', 'nossl'])
def _check_allowed(self):
# This would fail by design.
if self.server == 'ipv4':
prohibit(self.client == 'ipv6')
# Default configuration is tested in other variants.
if self.server == 'ipv6':
prohibit(self.client == 'localhost')
all_tasks = chain(all_tasks, IPTask.matrix())
all_tasks = list(all_tasks)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from datetime import date
# disable caching when changing settings
LOAD_CONTENT_CACHE = False
AUTHOR = u'Dragos Stanciu'
SITENAME = u'Dragos Stanciu'
SITEURL = ''
##########################
### Flex theme options ###
##########################
SITETITLE = AUTHOR
SITESUBTITLE = u'Software Developer'
SITEDESCRIPTION = u'Dragos Stanciu\'s website'
SITELOGO = u'https://secure.gravatar.com/avatar/9520bffa0515e54859f849fc8b6b99e1?size=400'
#MAIN_MENU = True
MAIN_MENU = False
# Times and dates
DEFAULT_DATE_FORMAT = '%d %b, %Y'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'en'
PATH = 'content'
#######################
### Static homepage ###
#######################
# found in content/pages/home.md
# Uncomment the 2 liens below to use a static page as home page
INDEX_SAVE_AS = 'blog/index.html'
LINKS = (('Dragon Programmer blog', 'https://dragonprogrammer.com/'),) # add link to blog
# Set article URL
ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{slug}/'
ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{slug}/index.html'
# Set page URL
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
CATEGORY_URL = 'blog/category/{slug}/'
CATEGORY_SAVE_AS = 'blog/category/{slug}/index.html'
TAG_URL = 'blog/tag/{slug}/'
TAG_SAVE_AS = 'blog/tag/{slug}/index.html'
# don't need author pages, as I'm the only author
AUTHOR_URL = ''
AUTHOR_SAVE_AS = ''
# create per year and per month archives
YEAR_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/index.html'
ARCHIVES_SAVE_AS = 'blog/archives.html'
CATEGORIES_SAVE_AS = 'blog/categories.html'
TAGS_SAVE_AS = 'blog/tags.html'
DEFAULT_PAGINATION = 10
STATIC_PATHS = ['images', 'figures', 'downloads', 'extra/CNAME', 'extra/robots.txt', 'extra/favicon.ico']
EXTRA_PATH_METADATA = {
'extra/CNAME': {'path': 'CNAME'},
'extra/robots.txt': {'path': 'robots.txt'},
'extra/favicon.ico': {'path': 'favicon.ico'}
}
THEME = '/home/dragos/src/pelican-themes/Flex'
PLUGIN_PATHS = ['/home/dragos/src/pelican-plugins']
PLUGINS = ['sitemap']
# Sitemap
SITEMAP = {
'format': 'xml',
'priorities': {
'articles': 0.6,
'indexes': 0.5,
'pages': 0.5
},
'changefreqs': {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
},
'exclude': ['tag/', 'category/'],
}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Social widget
SOCIAL = (('linkedin', 'https://www.linkedin.com/in/dragosstanciu'),
('github', 'https://github.com/dnstanciu'),
('twitter', 'https://twitter.com/dnstanciu'),
('facebook', 'https://www.facebook.com/DragonProgrammer/'),
('youtube', 'https://www.youtube.com/channel/UCvxBX7213FF2JCQC68Dx50A'))
COPYRIGHT_YEAR = date.today().year
# MENUITEMS = (('Archives', '/blog/archives.html'),
# ('Categories', '/blog/categories.html'))#,
#('Tags', '/blog/tags.html'),)
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2016-2017 Teo Mrnjavac <teo@kde.org>
# SPDX-FileCopyrightText: 2017 Alf Gaida <agaida@siduction.org>
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Calamares is Free Software: see the License-Identifier above.
#
import platform
from PythonQt.QtGui import *
import PythonQt.calamares as calamares
# WARNING: the Calamares PythonQt API is considered EXPERIMENTAL as of
# Calamares 2.5. It comes with no promise or commitment to API stability.
# Set up translations.
# You may skip this if your Calamares module has no user visible strings.
# DO NOT install _ into the builtin namespace because each module loads
# its own catalog.
# DO use the gettext class-based API and manually alias _ as described in:
# https://docs.python.org/3.5/library/gettext.html#localizing-your-module
import gettext
import inspect
import os
_filename = inspect.getframeinfo(inspect.currentframe()).filename
_path = os.path.dirname(os.path.abspath(_filename))
_ = gettext.gettext
# Example Python ViewModule.
# A Python ViewModule is a Python program which defines a ViewStep class.
# One UI module ==> one ViewStep.
# This class must be marked with the @calamares_module decorator. A
# ViewModule may define other classes, but only one may be decorated with
# @calamares_module. Such a class must conform to the Calamares ViewStep
# interface and functions as the entry point of the module.
# A ViewStep manages one or more "wizard pages" through methods like
# back/next, and reports its status through isNextEnabled/isBackEnabled/
# isAtBeginning/isAtEnd. The whole UI, including all the pages, must be
# exposed as a single QWidget, returned by the widget function.
#
# For convenience, both C++ and PythonQt ViewSteps are considered to be
# implementations of ViewStep.h. Additionally, the Calamares PythonQt API
# allows Python developers to keep their identifiers more Pythonic on the
# Python side. Thus, all of the following are considered valid method
# identifiers in a ViewStep implementation: isNextEnabled, isnextenabled,
# is_next_enabled.
@calamares_module
class DummyPythonQtViewStep:
def __init__(self):
# Importing PythonQt.QtGui provides access to most Qt widget classes.
self.main_widget = QFrame()
self.main_widget.setLayout(QVBoxLayout())
label = QLabel()
self.main_widget.layout().addWidget(label)
accumulator = "\nCalamares+PythonQt running embedded Python " +\
platform.python_version()
label.text = accumulator
btn = QPushButton()
# Python strings can be used wherever a method wants a QString. Python
# gettext translations can be used seamlessly as well.
btn.setText(_("Click me!"))
self.main_widget.layout().addWidget(btn)
# The syntax for signal-slot connections is very simple, though
# slightly different from the C++ equivalent. There are no SIGNAL and
# SLOT macros, and a signal can be connected to any Python method
# (without a special "slot" designation).
btn.connect("clicked(bool)", self.on_btn_clicked)
def on_btn_clicked(self):
self.main_widget.layout().addWidget(QLabel(_("A new QLabel.")))
def prettyName(self):
return _("Dummy PythonQt ViewStep")
def isNextEnabled(self):
return True # The "Next" button should be clickable
def isBackEnabled(self):
return True # The "Back" button should be clickable
def isAtBeginning(self):
# True means the currently shown UI page is the first page of this
# module, thus a "Back" button click will not be handled by this
# module and will cause a skip to the previous ViewStep instead
# (if any). False means that the present ViewStep provides other UI
# pages placed logically "before" the current one, thus a "Back" button
# click will be handled by this module instead of skipping to another
# ViewStep. A module (ViewStep) with only one page will always return
# True here.
return True
def isAtEnd(self):
# True means the currently shown UI page is the last page of this
# module, thus a "Next" button click will not be handled by this
# module and will cause a skip to the next ViewStep instead (if any).
# False means that the present ViewStep provides other UI pages placed
# logically "after" the current one, thus a "Next" button click will
# be handled by this module instead of skipping to another ViewStep.
# A module (ViewStep) with only one page will always return True here.
return True
def jobs(self):
# Returns a list of objects that implement Calamares::Job.
return [DummyPQJob("Dummy PythonQt job reporting for duty")]
def widget(self):
# Returns the base QWidget of this module's UI.
return self.main_widget
def retranslate(self, locale_name):
# This is where it gets slightly weird. In most desktop applications we
# shouldn't need this kind of mechanism, because we could assume that
# the operating environment is configured to use a certain language.
# Usually the user would change the system-wide language in a settings
# UI, restart the application, done.
# Alas, Calamares runs on an unconfigured live system, and one of the
# core features of Calamares is to allow the user to pick a language.
# Unfortunately, strings in the UI do not automatically react to a
# runtime language change. To get UI strings in a new language, all
# user-visible strings must be retranslated (by calling tr() in C++ or
# _() in Python) and reapplied on the relevant widgets.
# When the user picks a new UI translation language, Qt raises a QEvent
# of type LanguageChange, which propagates through the QObject
# hierarchy. By catching and reacting to this event, we can show
# user-visible strings in the new language at the right time.
# The C++ side of the Calamares PythonQt API catches the LanguageChange
# event and calls the present method. It is then up to the module
# developer to add here all the needed code to load the module's
# translation catalog for the new language (which is separate from the
# main Calamares strings catalog) and reapply any user-visible strings.
calamares.utils.debug("PythonQt retranslation event "
"for locale name: {}".format(locale_name))
# First we load the catalog file for the new language...
try:
global _
_t = gettext.translation('dummypythonqt',
localedir=os.path.join(_path, 'lang'),
languages=[locale_name])
_ = _t.gettext
except OSError as e:
calamares.utils.debug(e)
pass
# ... and then we can call setText(_("foo")) and similar methods on
# the relevant widgets here to reapply the strings.
# An example Job class. Implements Calamares::Job. For method identifiers, the
# same rules apply as for ViewStep. No decorators are necessary here, because
# only the ViewStep implementation is the unique entry point, and a module can
# have any number of jobs.
class DummyPQJob:
def __init__(self, my_msg):
self.my_msg = my_msg
def pretty_name(self):
return _("The Dummy PythonQt Job")
def pretty_description(self):
return _("This is the Dummy PythonQt Job. "
"The dummy job says: {}").format(self.my_msg)
def pretty_status_message(self):
return _("A status message for Dummy PythonQt Job.")
def exec(self):
# As an example, we touch a file in the target root filesystem.
rmp = calamares.global_storage['rootMountPoint']
os.system("touch {}/calamares_dpqt_was_here".format(rmp))
calamares.utils.debug("the dummy job says {}".format(self.my_msg))
return {'ok': True}
|
# Copyright (C) 2014 Statoil ASA, Norway.
#
# The file 'load_results_tool.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert_gui.models.connectors.load_results import LoadResultsModel
from ert_gui.tools import Tool
from ert_gui.tools.load_results import LoadResultsPanel
from ert_gui.widgets import util
from ert_gui.widgets.closable_dialog import ClosableDialog
class LoadResultsTool(Tool):
def __init__(self):
super(LoadResultsTool, self).__init__("Load results manually", "tools/load_manually", util.resourceIcon("ide/table_import"))
self.__import_widget = None
self.__dialog = None
self.setVisible(False)
def trigger(self):
if self.__import_widget is None:
self.__import_widget = LoadResultsPanel()
self.__dialog = ClosableDialog("Load results manually", self.__import_widget, self.parent())
self.__import_widget.setCurrectCase()
self.__dialog.addButton("Load", self.load)
self.__dialog.exec_()
def load(self):
self.__import_widget.load()
self.__dialog.accept()
def toggleAdvancedMode(self, advanced_mode):
self.setVisible(advanced_mode)
if not LoadResultsModel().isValidRunPath():
self.setEnabled(False)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""This file compares the difference between parameters of two models."""
from bert import modeling
from bert import tokenization
from bert_extraction.steal_bert_classifier.models import run_classifier
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Other parameters
flags.DEFINE_string(
"init_checkpoint1", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("bert_config_file1", None,
"BERT config file for the first model.")
flags.DEFINE_string(
"init_checkpoint2", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("bert_config_file2", None,
"BERT config file for the second model.")
flags.DEFINE_string("diff_type", "euclidean",
"Type of difference function to be used.")
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"sst-2": run_classifier.SST2Processor,
"mnli": run_classifier.MnliProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint1)
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint2)
bert_config1 = modeling.BertConfig.from_json_file(FLAGS.bert_config_file1)
bert_config2 = modeling.BertConfig.from_json_file(FLAGS.bert_config_file2)
if FLAGS.max_seq_length > bert_config1.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config1.max_position_embeddings))
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name,))
processor = processors[task_name]()
input_ids = tf.placeholder(dtype=tf.int32, shape=(None, FLAGS.max_seq_length))
input_mask = tf.placeholder(
dtype=tf.int32, shape=(None, FLAGS.max_seq_length))
segment_ids = tf.placeholder(
dtype=tf.int32, shape=(None, FLAGS.max_seq_length))
label_ids = tf.placeholder(dtype=tf.int32, shape=(None,))
num_labels = len(processor.get_labels())
with tf.variable_scope("model1"):
run_classifier.create_model(
bert_config1,
False,
input_ids,
input_mask,
segment_ids,
label_ids,
num_labels,
use_one_hot_embeddings=False)
vars1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="model1")
with tf.variable_scope("model2"):
run_classifier.create_model(
bert_config2,
False,
input_ids,
input_mask,
segment_ids,
label_ids,
num_labels,
use_one_hot_embeddings=False)
vars2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="model2")
tf.train.init_from_checkpoint(
FLAGS.init_checkpoint1,
{"%s" % v.name[v.name.index("/") + 1:].split(":")[0]: v for v in vars1})
tf.train.init_from_checkpoint(
FLAGS.init_checkpoint2,
{"%s" % v.name[v.name.index("/") + 1:].split(":")[0]: v for v in vars2})
def abs_diff(var_name):
with tf.variable_scope("model1", reuse=True):
var1 = tf.get_variable(var_name)
with tf.variable_scope("model2", reuse=True):
var2 = tf.get_variable(var_name)
return tf.math.abs(tf.math.subtract(var1, var2))
def sq_diff(var_name):
with tf.variable_scope("model1", reuse=True):
var1 = tf.get_variable(var_name)
with tf.variable_scope("model2", reuse=True):
var2 = tf.get_variable(var_name)
return tf.math.subtract(var1, var2) * tf.math.subtract(var1, var2)
total_diff = 0.0
total_params = 0
bert_diff = 0.0
bert_params = 0
classifier_diff = 0.0
classifier_params = 0
for var in vars1:
if FLAGS.diff_type == "euclidean":
var_diff = tf.reduce_sum(
sq_diff(var.name[var.name.index("/") + 1:var.name.index(":")]))
else:
var_diff = tf.reduce_sum(
abs_diff(var.name[var.name.index("/") + 1:var.name.index(":")]))
var_params = 1
shape = var.get_shape()
for dim in shape:
var_params *= dim
total_diff += var_diff
total_params += var_params
# Setup for BERT parameters
if "bert" in var.name:
bert_diff += var_diff
bert_params += var_params
else:
classifier_diff += var_diff
classifier_params += var_params
if FLAGS.diff_type == "euclidean":
total_diff = tf.sqrt(total_diff)
bert_diff = tf.sqrt(bert_diff)
classifier_diff = tf.sqrt(classifier_diff)
else:
total_diff = total_diff / tf.cast(total_params, tf.float32)
bert_diff = bert_diff / tf.cast(bert_params, tf.float32)
classifier_diff = classifier_diff / tf.cast(classifier_params, tf.float32)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
tf.logging.info("average diff in all params = %.8f", sess.run(total_diff))
tf.logging.info("average diff in bert params = %.8f", sess.run(bert_diff))
tf.logging.info("average diff in classifier params = %.8f",
sess.run(classifier_diff))
return
if __name__ == "__main__":
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file1")
flags.mark_flag_as_required("bert_config_file2")
flags.mark_flag_as_required("init_checkpoint1")
flags.mark_flag_as_required("init_checkpoint2")
tf.app.run()
|
# -*- coding: utf-8 -*-
import json
import re
from datetime import timedelta
import pycurl
from pyload.core.network.http.exceptions import BadHeader
from pyload.core.utils import seconds
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..anticaptchas.SolveMedia import SolveMedia
from ..base.simple_downloader import SimpleDownloader
class RapidgatorNet(SimpleDownloader):
__name__ = "RapidgatorNet"
__type__ = "downloader"
__version__ = "0.54"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?(?:rapidgator\.net|rg\.to)/file/\w+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Rapidgator.net downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("chrox", None),
("stickell", "l.stickell@yahoo.it"),
("Walter Purcaro", "vuolter@gmail.com"),
("GammaCode", "nitzo2001[AT]yahoo[DOT]com"),
]
COOKIES = [("rapidgator.net", "lang", "en")]
NAME_PATTERN = r"<title>Download file (?P<N>.*)</title>"
SIZE_PATTERN = r"File size:\s*<strong>(?P<S>[\d.,]+) (?P<U>[\w^_]+)</strong>"
OFFLINE_PATTERN = r">(File not found|Error 404)"
JSVARS_PATTERN = r"\s+var\s*(startTimerUrl|getDownloadUrl|captchaUrl|fid|secs)\s*=\s*\'?(.*?)\'?;"
PREMIUM_ONLY_PATTERN = (
r"You can download files up to|This file can be downloaded by premium only<"
)
DOWNLOAD_LIMIT_ERROR_PATTERN = (
r"You have reached your (daily|hourly) downloads limit"
)
IP_BLOCKED_ERROR_PATTERN = (
r"You can`t download more than 1 file at a time in free mode\." ""
)
WAIT_PATTERN = r"(?:Delay between downloads must be not less than|Try again in).+"
LINK_FREE_PATTERN = r"return \'(http://\w+.rapidgator.net/.*)\';"
RECAPTCHA_PATTERN = r'"http://api\.recaptcha\.net/challenge\?k=(.*?)"'
ADSCAPTCHA_PATTERN = r'(http://api\.adscaptcha\.com/Get\.aspx[^"\']+)'
SOLVEMEDIA_PATTERN = r'http://api\.solvemedia\.com/papi/challenge\.script\?k=(.*?)"'
URL_REPLACEMENTS = [
(r"//(?:www\.)?rg\.to/", "//rapidgator.net/"),
(r"(//rapidgator.net/file/[0-9A-z]+).*", r"\1"),
]
API_URL = "https://rapidgator.net/api/"
def api_response(self, method, **kwargs):
try:
html = self.load(self.API_URL + method, get=kwargs)
json_data = json.loads(html)
status = json_data["response_status"]
message = json_data["response_details"]
except BadHeader as exc:
status = exc.code
message = exc.message
if status == 200:
return json_data["response"]
elif status == 404:
self.offline()
elif status == 423:
self.restart(message, premium=False)
else:
self.account.relogin()
self.retry(wait=60)
def setup(self):
self.resume_download = self.multi_dl = self.premium
self.chunk_limit = -1 if self.premium else 1
def handle_premium(self, pyfile):
json_data = self.api_response(
"file/info", sid=self.account.info["data"]["sid"], url=pyfile.url
)
self.info["md5"] = json_data["hash"]
pyfile.name = json_data["filename"]
pyfile.size = json_data["size"]
json_data = self.api_response(
"file/download", sid=self.account.info["data"]["sid"], url=pyfile.url
)
self.link = json_data["url"]
def check_errors(self):
SimpleDownloader.check_errors(self)
m = re.search(self.DOWNLOAD_LIMIT_ERROR_PATTERN, self.data)
if m is not None:
self.log_warning(m.group(0))
if m.group(1) == "daily":
wait_time = seconds.to_midnight()
else:
wait_time = timedelta(hours=1).seconds
self.retry(wait=wait_time, msg=m.group(0))
m = re.search(self.IP_BLOCKED_ERROR_PATTERN, self.data)
if m is not None:
msg = self._(
"You can't download more than one file within a certain time period in free mode"
)
self.log_warning(msg)
self.retry(wait=timedelta(hours=24).seconds, msg=msg)
def handle_free(self, pyfile):
jsvars = dict(re.findall(self.JSVARS_PATTERN, self.data))
self.log_debug(jsvars)
url = "https://rapidgator.net{}?fid={}".format(
jsvars.get("startTimerUrl", "/download/AjaxStartTimer"), jsvars["fid"]
)
jsvars.update(self.get_json_response(url))
self.wait(jsvars.get("secs", 180), False)
url = "https://rapidgator.net{}?sid={}".format(
jsvars.get("getDownloadUrl", "/download/AjaxGetDownloadLink"), jsvars["sid"]
)
jsvars.update(self.get_json_response(url))
url = "https://rapidgator.net{}".format(
jsvars.get("captchaUrl", "/download/captcha")
)
self.data = self.load(url, ref=pyfile.url)
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
# self.link = m.group(1)
self.download(m.group(1), ref=url)
else:
captcha = self.handle_captcha()
if not captcha:
self.error(self._("Captcha pattern not found"))
response, challenge = captcha.challenge()
if isinstance(captcha, ReCaptcha):
post_params = {"g-recaptcha-response": response}
elif isinstance(captcha, SolveMedia):
post_params = {
"adcopy_challenge": challenge,
"adcopy_response": response,
}
post_params["DownloadCaptchaForm[verifyCode]"] = response
self.data = self.load(url, post=post_params, ref=url)
if "The verification code is incorrect" in self.data:
self.retry_captcha()
else:
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
# self.link = m.group(1)
self.download(m.group(1), ref=url)
def handle_captcha(self):
for klass in (ReCaptcha, SolveMedia):
captcha = klass(self.pyfile)
if captcha.detect_key():
self.captcha = captcha
return captcha
def get_json_response(self, url):
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
res = self.load(url, ref=self.pyfile.url)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With:"])
if not res.startswith("{"):
self.retry()
self.log_debug(url, res)
return json.loads(res)
|
import copy
from nose import with_setup
from pybbn.graph.variable import Variable
def setup():
"""
Setup.
:return: None.
"""
pass
def teardown():
"""
Teardown.
:return: None.
"""
pass
@with_setup(setup, teardown)
def test_copy():
"""
Tests variable copy.
:return: None.
"""
lhs = Variable(0, 'a', ['t', 'f'])
rhs = copy.copy(lhs)
assert lhs.id == rhs.id
assert lhs.name == rhs.name
assert len(lhs.values) == len(rhs.values)
for lhs_v, rhs_v in zip(lhs.values, rhs.values):
assert lhs_v == rhs_v
lhs.values[0] = 'true'
assert lhs.values[0] == rhs.values[0]
@with_setup(setup, teardown)
def test_deep_copy():
"""
Tests variable deepcopy.
:return: None.
"""
lhs = Variable(0, 'a', ['t', 'f'])
rhs = copy.deepcopy(lhs)
assert lhs.id == rhs.id
assert lhs.name == rhs.name
assert len(lhs.values) == len(rhs.values)
for lhs_v, rhs_v in zip(lhs.values, rhs.values):
assert lhs_v == rhs_v
lhs.values[0] = 'true'
assert lhs.values[0] != rhs.values[0]
|
import fnmatch
import os
import re
import shutil
import time
import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import toUnicode, ss, sp
from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \
getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
from scandir import scandir
from unrar2 import RarFile
import six
from six.moves import filter
log = CPLog(__name__)
autoload = 'Renamer'
class Renamer(Plugin):
renaming_started = False
checking_snatched = False
def __init__(self):
addApiView('renamer.scan', self.scanView, docs = {
'desc': 'For the renamer to check for new files to rename in a folder',
'params': {
'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'},
'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'},
'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'},
'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'},
'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'},
'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'},
'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''},
},
})
addEvent('renamer.scan', self.scan)
addEvent('renamer.check_snatched', self.checkSnatched)
addEvent('app.load', self.scan)
addEvent('app.load', self.setCrons)
# Enable / disable interval
addEvent('setting.save.renamer.enabled.after', self.setCrons)
addEvent('setting.save.renamer.run_every.after', self.setCrons)
addEvent('setting.save.renamer.force_every.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.remove', 'renamer.check_snatched')
if self.isEnabled() and self.conf('run_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True)
fireEvent('schedule.remove', 'renamer.check_snatched_forced')
if self.isEnabled() and self.conf('force_every') > 0:
fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True)
return True
def scanView(self, **kwargs):
async = tryInt(kwargs.get('async', 0))
base_folder = kwargs.get('base_folder')
media_folder = sp(kwargs.get('media_folder'))
# Backwards compatibility, to be removed after a few versions :)
if not media_folder:
media_folder = sp(kwargs.get('movie_folder'))
downloader = kwargs.get('downloader')
download_id = kwargs.get('download_id')
files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')]
status = kwargs.get('status', 'completed')
release_download = None
if not base_folder and media_folder:
release_download = {'folder': media_folder}
if download_id:
release_download.update({
'id': download_id,
'downloader': downloader,
'status': status,
'files': files
})
fire_handle = fireEvent if not async else fireEventAsync
fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download)
return {
'success': True
}
def scan(self, base_folder = None, release_download = None):
if not release_download: release_download = {}
if self.isDisabled():
return
if self.renaming_started is True:
log.info('Renamer is already running, if you see this often, check the logs above for errors.')
return
if not base_folder:
base_folder = sp(self.conf('from'))
from_folder = sp(self.conf('from'))
to_folder = sp(self.conf('to'))
# Get media folder to process
media_folder = release_download.get('folder')
# Quality order for calculation quality priority
quality_order = fireEvent('quality.order', single = True)
# Get all folders that should not be processed
no_process = [to_folder]
cat_list = fireEvent('category.all', single = True) or []
no_process.extend([item['destination'] for item in cat_list])
try:
if Env.setting('library', section = 'manage').strip():
no_process.extend([sp(manage_folder) for manage_folder in splitString(Env.setting('library', section = 'manage'), '::')])
except:
pass
# Check to see if the no_process folders are inside the "from" folder.
if not os.path.isdir(base_folder) or not os.path.isdir(to_folder):
log.error('Both the "To" and "From" folder have to exist.')
return
else:
for item in no_process:
if isSubFolder(item, base_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder.')
return
# Check to see if the no_process folders are inside the provided media_folder
if media_folder and not os.path.isdir(media_folder):
log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder)
# Update to the from folder
if len(release_download.get('files', [])) == 1:
new_media_folder = from_folder
else:
new_media_folder = os.path.join(from_folder, os.path.basename(media_folder))
if not os.path.isdir(new_media_folder):
log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder)
return
# Update the files
new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])]
if new_files and not os.path.isfile(new_files[0]):
log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder)
return
# Update release_download info to the from folder
log.debug('Release %s found in the \'from\' folder.', media_folder)
release_download['folder'] = new_media_folder
release_download['files'] = new_files
media_folder = new_media_folder
if media_folder:
for item in no_process:
if isSubFolder(item, media_folder):
log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder.')
return
# Make sure a checkSnatched marked all downloads/seeds as such
if not release_download and self.conf('run_every') > 0:
self.checkSnatched(fire_scan = False)
self.renaming_started = True
# make sure the media folder name is included in the search
folder = None
files = []
if media_folder:
log.info('Scanning media folder %s...', media_folder)
folder = os.path.dirname(media_folder)
release_files = release_download.get('files', [])
if release_files:
files = release_files
# If there is only one file in the torrent, the downloader did not create a subfolder
if len(release_files) == 1:
folder = media_folder
else:
# Get all files from the specified folder
try:
for root, folders, names in scandir.walk(media_folder):
files.extend([sp(os.path.join(root, name)) for name in names])
except:
log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc()))
db = get_db()
# Extend the download info with info stored in the downloaded release
if release_download:
release_download = self.extendReleaseDownload(release_download)
# Unpack any archives
extr_files = None
if self.conf('unrar'):
folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files,
cleanup = self.conf('cleanup') and not self.downloadIsTorrent(release_download))
groups = fireEvent('scanner.scan', folder = folder if folder else base_folder,
files = files, release_download = release_download, return_ignored = False, single = True) or []
folder_name = self.conf('folder_name')
file_name = self.conf('file_name')
trailer_name = self.conf('trailer_name')
nfo_name = self.conf('nfo_name')
separator = self.conf('separator')
# Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader.
if not groups and self.statusInfoComplete(release_download):
self.tagRelease(release_download = release_download, tag = 'failed_rename')
for group_identifier in groups:
group = groups[group_identifier]
rename_files = {}
remove_files = []
remove_releases = []
media_title = getTitle(group)
# Add _UNKNOWN_ if no library item is connected
if not group.get('media') or not media_title:
self.tagRelease(group = group, tag = 'unknown')
continue
# Rename the files using the library data
else:
# Media not in library, add it first
if not group['media'].get('_id'):
group['media'] = fireEvent('movie.add', params = {
'identifier': group['identifier'],
'profile_id': None
}, search_after = False, status = 'done', single = True)
else:
group['media'] = fireEvent('movie.update_info', media_id = group['media'].get('_id'), single = True)
if not group['media'] or not group['media'].get('_id'):
log.error('Could not rename, no library item to work with: %s', group_identifier)
continue
media = group['media']
media_title = getTitle(media)
# Overwrite destination when set in category
destination = to_folder
category_label = ''
if media.get('category_id') and media.get('category_id') != '-1':
try:
category = db.get('id', media['category_id'])
category_label = category['label']
if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None':
destination = category['destination']
log.debug('Setting category destination for "%s": %s' % (media_title, destination))
else:
log.debug('No category destination found for "%s"' % media_title)
except:
log.error('Failed getting category label: %s', traceback.format_exc())
# Overwrite destination when set in 3D
destination = to_folder
test3D = group['meta_data']['quality'].get('is_3d', 0)
if test3D :
if self.conf('to if 3d') and len(self.conf('to if 3d')) > 0 and self.conf('to if 3d') != 'None':
destination = self.conf('to if 3d')
log.debug('Setting 3D destination for "%s": %s' % (media_title, destination))
else:
log.debug('No 3D folder set')
if self.conf('folder_name_3d') and len(self.conf('folder_name_3d')) > 0 and self.conf('folder_name_3d') != 'None':
folder_name = self.conf('folder_name_3d')
log.debug('Setting 3D folder pattern for "%s": %s' % (media_title, folder_name))
else:
log.debug('No 3D folder pattern set')
if self.conf('file_name_3d') and len(self.conf('file_name_3d')) > 0 and self.conf('file_name_3d') != 'None':
file_name = self.conf('file_name_3d')
log.debug('Setting 3D file pattern for "%s": %s' % (media_title, file_name))
else:
log.debug('No 3D file pattern set')
# Find subtitle for renaming
group['before_rename'] = []
fireEvent('renamer.before', group)
# Add extracted files to the before_rename list
if extr_files:
group['before_rename'].extend(extr_files)
# Remove weird chars from movie name
movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title)
# Put 'The' at the end
name_the = movie_name
if movie_name[:4].lower() == 'the ':
name_the = movie_name[4:] + ', The'
replacements = {
'ext': 'mkv',
'namethe': name_the.strip(),
'thename': movie_name.strip(),
'year': media['info']['year'],
'first': name_the[0].upper(),
'quality': group['meta_data']['quality']['label'],
'quality_type': group['meta_data']['quality_type'],
'video': group['meta_data'].get('video'),
'audio': group['meta_data'].get('audio'),
'group': group['meta_data']['group'],
'source': group['meta_data']['source'],
'resolution_width': group['meta_data'].get('resolution_width'),
'resolution_height': group['meta_data'].get('resolution_height'),
'audio_channels': group['meta_data'].get('audio_channels'),
'imdb_id': group['identifier'],
'cd': '',
'cd_nr': '',
'mpaa': media['info'].get('mpaa', ''),
'category': category_label,
'3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '',
}
for file_type in group['files']:
# Move nfo depending on settings
if file_type is 'nfo' and not self.conf('rename_nfo'):
log.debug('Skipping, renaming of %s disabled', file_type)
for current_file in group['files'][file_type]:
if self.conf('cleanup') and (not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
continue
# Subtitle extra
if file_type is 'subtitle_extra':
continue
# Move other files
multiple = len(group['files'][file_type]) > 1 and not group['is_dvd']
cd = 1 if multiple else 0
for current_file in sorted(list(group['files'][file_type])):
current_file = sp(current_file)
# Original filename
replacements['original'] = os.path.splitext(os.path.basename(current_file))[0]
replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True)
# Extension
replacements['ext'] = getExt(current_file)
# cd #
replacements['cd'] = ' cd%d' % cd if multiple else ''
replacements['cd_nr'] = cd if multiple else ''
# Naming
final_folder_name = self.doReplace(folder_name, replacements, folder = True)
final_file_name = self.doReplace(file_name, replacements)
replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)]
# Meta naming
if file_type is 'trailer':
final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True)
elif file_type is 'nfo':
final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True)
# Seperator replace
if separator:
final_file_name = final_file_name.replace(' ', separator)
# Move DVD files (no structure renaming)
if group['is_dvd'] and file_type is 'movie':
found = False
for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']:
has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep)
if has_string >= 0:
structure_dir = current_file[has_string:].lstrip(os.path.sep)
rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir)
found = True
break
if not found:
log.error('Could not determine dvd structure for: %s', current_file)
# Do rename others
else:
if file_type is 'leftover':
if self.conf('move_leftover'):
rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file))
elif file_type not in ['subtitle']:
rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name)
# Check for extra subtitle files
if file_type is 'subtitle':
remove_multiple = False
if len(group['files']['movie']) == 1:
remove_multiple = True
sub_langs = group['subtitle_language'].get(current_file, [])
# rename subtitles with or without language
sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_extras = self.getRenameExtras(
extra_type = 'subtitle_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file,
remove_multiple = remove_multiple,
)
# Don't add language if multiple languages in 1 subtitle file
if len(sub_langs) == 1:
sub_name = sub_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext']))
rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name)
rename_files = mergeDicts(rename_files, rename_extras)
# Filename without cd etc
elif file_type is 'movie':
rename_extras = self.getRenameExtras(
extra_type = 'movie_extra',
replacements = replacements,
folder_name = folder_name,
file_name = file_name,
destination = destination,
group = group,
current_file = current_file
)
rename_files = mergeDicts(rename_files, rename_extras)
group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)]
group['destination_dir'] = os.path.join(destination, final_folder_name)
if multiple:
cd += 1
# Before renaming, remove the lower quality files
remove_leftovers = True
# Mark movie "done" once it's found the quality with the finish check
try:
if media.get('status') == 'active' and media.get('profile_id'):
profile = db.get('id', media['profile_id'])
if group['meta_data']['quality']['identifier'] in profile.get('qualities', []):
nr = profile['qualities'].index(group['meta_data']['quality']['identifier'])
finish = profile['finish'][nr]
if finish:
mdia = db.get('id', media['_id'])
mdia['status'] = 'done'
mdia['last_edit'] = int(time.time())
db.update(mdia)
except Exception as e:
log.error('Failed marking movie finished: %s', (traceback.format_exc()))
# Go over current movie releases
for release in fireEvent('release.for_media', media['_id'], single = True):
# When a release already exists
if release.get('status') == 'done':
release_order = quality_order.index(release['quality'])
group_quality_order = quality_order.index(group['meta_data']['quality']['identifier'])
# This is where CP removes older, lesser quality releases
if release_order > group_quality_order:
log.info('Removing lesser quality %s for %s.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc
elif release_order == group_quality_order:
log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality')))
for file_type in release.get('files', {}):
for release_file in release['files'][file_type]:
remove_files.append(release_file)
remove_releases.append(release)
# Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan
else:
log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality')))
# Add exists tag to the .ignore file
self.tagRelease(group = group, tag = 'exists')
# Notify on rename fail
download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('identifier'))
fireEvent('movie.renaming.canceled', message = download_message, data = group)
remove_leftovers = False
break
elif release.get('status') in ['snatched', 'seeding']:
if release_download and release_download.get('release_id'):
if release_download['release_id'] == release['_id']:
if release_download['status'] == 'completed':
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
elif release_download['status'] == 'seeding':
# Set the release to seeding
fireEvent('release.update_status', release['_id'], status = 'seeding', single = True)
elif release.get('identifier') == group['meta_data']['quality']['identifier']:
# Set the release to downloaded
fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True)
# Remove leftover files
if not remove_leftovers: # Don't remove anything
break
log.debug('Removing leftover files')
for current_file in group['files']['leftover']:
if self.conf('cleanup') and not self.conf('move_leftover') and \
(not self.downloadIsTorrent(release_download) or self.fileIsAdded(current_file, group)):
remove_files.append(current_file)
# Remove files
delete_folders = []
for src in remove_files:
if rename_files.get(src):
log.debug('Not removing file that will be renamed: %s', src)
continue
log.info('Removing "%s"', src)
try:
src = sp(src)
if os.path.isfile(src):
os.remove(src)
parent_dir = os.path.dirname(src)
if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \
not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \
not isSubFolder(parent_dir, base_folder):
delete_folders.append(parent_dir)
except:
log.error('Failed removing %s: %s', (src, traceback.format_exc()))
self.tagRelease(group = group, tag = 'failed_remove')
# Delete leftover folder from older releases
for delete_folder in delete_folders:
try:
self.deleteEmptyFolder(delete_folder, show_error = False)
except Exception as e:
log.error('Failed to delete folder: %s %s', (e, traceback.format_exc()))
# Rename all files marked
group['renamed_files'] = []
failed_rename = False
for src in rename_files:
if rename_files[src]:
dst = rename_files[src]
log.info('Renaming "%s" to "%s"', (src, dst))
# Create dir
self.makeDir(os.path.dirname(dst))
try:
self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group))
group['renamed_files'].append(dst)
except:
log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc()))
failed_rename = True
break
# If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted.
if failed_rename:
self.tagRelease(group = group, tag = 'failed_rename')
continue
# If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt)
else:
self.untagRelease(group = group, tag = 'failed_rename')
# Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent
if self.movieInFromFolder(media_folder) and self.downloadIsTorrent(release_download):
self.tagRelease(group = group, tag = 'renamed_already')
# Remove matching releases
for release in remove_releases:
log.debug('Removing release %s', release.get('identifier'))
try:
db.delete(release)
except:
log.error('Failed removing %s: %s', (release, traceback.format_exc()))
if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download):
if media_folder:
# Delete the movie folder
group_folder = media_folder
else:
# Delete the first empty subfolder in the tree relative to the 'from' folder
group_folder = sp(os.path.join(base_folder, os.path.relpath(group['parentdir'], base_folder).split(os.path.sep)[0]))
try:
log.info('Deleting folder: %s', group_folder)
self.deleteEmptyFolder(group_folder)
except:
log.error('Failed removing %s: %s', (group_folder, traceback.format_exc()))
# Notify on download, search for trailers etc
download_message = 'Downloaded %s (%s)' % (media_title, replacements['quality'])
try:
fireEvent('renamer.after', message = download_message, group = group, in_order = True)
except:
log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc())
# Break if CP wants to shut down
if self.shuttingDown():
break
self.renaming_started = False
def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False):
if not group: group = {}
if not replacements: replacements = {}
replacements = replacements.copy()
rename_files = {}
def test(s):
return current_file[:-len(replacements['ext'])] in sp(s)
for extra in set(filter(test, group['files'][extra_type])):
replacements['ext'] = getExt(extra)
final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True)
final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple)
rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name)
return rename_files
# This adds a file to ignore / tag a release so it is ignored later
def tagRelease(self, tag, group = None, release_download = None):
if not tag:
return
text = """This file is from CouchPotato
It has marked this release as "%s"
This file hides the release from the renamer
Remove it if you want it to be renamed (again, or at least let it try again)
""" % tag
tag_files = []
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
elif isinstance(release_download, dict):
# Tag download_files if they are known
if release_download.get('files', []):
tag_files = release_download.get('files', [])
# Tag all files in release folder
elif release_download['folder']:
for root, folders, names in scandir.walk(release_download['folder']):
tag_files.extend([os.path.join(root, name) for name in names])
for filename in tag_files:
# Don't tag .ignore files
if os.path.splitext(filename)[1] == '.ignore':
continue
tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag)
if not os.path.isfile(tag_filename):
self.createFile(tag_filename, text)
def untagRelease(self, group = None, release_download = None, tag = ''):
if not release_download:
return
tag_files = []
folder = None
# Tag movie files if they are known
if isinstance(group, dict):
tag_files = [sorted(list(group['files']['movie']))[0]]
folder = group['parentdir']
if not group.get('dirname') or not os.path.isdir(folder):
return False
elif isinstance(release_download, dict):
folder = release_download['folder']
if not os.path.isdir(folder):
return False
# Untag download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Untag all files in release folder
else:
for root, folders, names in scandir.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
if not folder:
return False
# Find all .ignore files in folder
ignore_files = []
for root, dirnames, filenames in scandir.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and delete if found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
for filename in ignore_file:
try:
os.remove(filename)
except:
log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc()))
def hastagRelease(self, release_download, tag = ''):
if not release_download:
return False
folder = release_download['folder']
if not os.path.isdir(folder):
return False
tag_files = []
ignore_files = []
# Find tag on download_files if they are known
if release_download.get('files'):
tag_files = release_download.get('files', [])
# Find tag on all files in release folder
else:
for root, folders, names in scandir.walk(folder):
tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore'])
# Find all .ignore files in folder
for root, dirnames, filenames in scandir.walk(folder):
ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag))
# Match all found ignore files with the tag_files and return True found
for tag_file in tag_files:
ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*')))
if ignore_file:
return True
return False
def moveFile(self, old, dest, forcemove = False):
dest = ss(dest)
try:
if forcemove or self.conf('file_action') not in ['copy', 'link']:
try:
shutil.move(old, dest)
except:
if os.path.exists(dest):
log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc()))
os.unlink(old)
else:
raise
elif self.conf('file_action') == 'copy':
shutil.copy(old, dest)
elif self.conf('file_action') == 'link':
# First try to hardlink
try:
log.debug('Hardlinking file "%s" to "%s"...', (old, dest))
link(old, dest)
except:
# Try to simlink next
log.debug('Couldn\'t hardlink file "%s" to "%s". Simlinking instead. Error: %s.', (old, dest, traceback.format_exc()))
shutil.copy(old, dest)
try:
symlink(dest, old + '.link')
os.unlink(old)
os.rename(old + '.link', old)
except:
log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc()))
try:
os.chmod(dest, Env.getPermission('file'))
if os.name == 'nt' and self.conf('ntfs_permission'):
os.popen('icacls "' + dest + '"* /reset /T')
except:
log.error('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1)))
except:
log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc()))
raise
return True
def doReplace(self, string, replacements, remove_multiple = False, folder = False):
"""
replace confignames with the real thing
"""
replacements = replacements.copy()
if remove_multiple:
replacements['cd'] = ''
replacements['cd_nr'] = ''
replaced = toUnicode(string)
for x, r in replacements.items():
if x in ['thename', 'namethe']:
continue
if r is not None:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
else:
#If information is not available, we don't want the tag in the filename
replaced = replaced.replace('<' + x + '>', '')
replaced = self.replaceDoubles(replaced.lstrip('. '))
for x, r in replacements.items():
if x in ['thename', 'namethe']:
replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r))
replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced)
sep = self.conf('foldersep') if folder else self.conf('separator')
return replaced.replace(' ', ' ' if not sep else sep)
def replaceDoubles(self, string):
replaces = [
('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'),
('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'),
]
for r in replaces:
reg, replace_with = r
string = re.sub(reg, replace_with, string)
return string
def checkSnatched(self, fire_scan = True):
if self.checking_snatched:
log.debug('Already checking snatched')
return False
self.checking_snatched = True
try:
db = get_db()
rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True))
if not rels:
#No releases found that need status checking
self.checking_snatched = False
return True
# Collect all download information with the download IDs from the releases
download_ids = []
no_status_support = []
try:
for rel in rels:
if not rel.get('download_info'): continue
if rel['download_info'].get('id') and rel['download_info'].get('downloader'):
download_ids.append(rel['download_info'])
ds = rel['download_info'].get('status_support')
if ds is False or ds == 'False':
no_status_support.append(ss(rel['download_info'].get('downloader')))
except:
log.error('Error getting download IDs from database')
self.checking_snatched = False
return False
release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else []
if len(no_status_support) > 0:
log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support)))
if not release_downloads:
if fire_scan:
self.scan()
self.checking_snatched = False
return True
scan_releases = []
scan_required = False
log.debug('Checking status snatched releases...')
try:
for rel in rels:
movie_dict = db.get('id', rel.get('media_id'))
download_info = rel.get('download_info')
if not isinstance(download_info, dict):
log.error('Faulty release found without any info, ignoring.')
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
continue
# Check if download ID is available
if not download_info.get('id') or not download_info.get('downloader'):
log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name']))
scan_required = True
# Continue with next release
continue
# Find release in downloaders
nzbname = self.createNzbName(rel['info'], movie_dict)
found_release = False
for release_download in release_downloads:
found_release = False
if download_info.get('id'):
if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']:
log.debug('Found release by id: %s', release_download['id'])
found_release = True
break
else:
if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict):
log.debug('Found release by release name or imdb ID: %s', release_download['name'])
found_release = True
break
if not found_release:
log.info('%s not found in downloaders', nzbname)
#Check status if already missing and for how long, if > 1 week, set to ignored else to missing
if rel.get('status') == 'missing':
if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60:
fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True)
else:
# Set the release to missing
fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True)
# Continue with next release
continue
# Log that we found the release
timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft']
log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft))
# Check status of release
if release_download['status'] == 'busy':
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading
if self.movieInFromFolder(release_download['folder']):
self.tagRelease(release_download = release_download, tag = 'downloading')
elif release_download['status'] == 'seeding':
#If linking setting is enabled, process release
if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download):
log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio']))
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and set the torrent to paused if required
release_download.update({'pause': True, 'scan': True, 'process_complete': False})
scan_releases.append(release_download)
else:
#let it seed
log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio']))
# Set the release to seeding
fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True)
elif release_download['status'] == 'failed':
# Set the release to failed
fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True)
fireEvent('download.remove_failed', release_download, single = True)
if self.conf('next_on_failed'):
fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id'))
elif release_download['status'] == 'completed':
log.info('Download of %s completed!', release_download['name'])
#Make sure the downloader sent over a path to look in
if self.statusInfoComplete(release_download):
# If the release has been seeding, process now the seeding is done
if rel.get('status') == 'seeding':
if self.conf('file_action') != 'move':
# Set the release to done as the movie has already been renamed
fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True)
# Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': False, 'process_complete': True})
scan_releases.append(release_download)
else:
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
# Set the release to snatched if it was missing before
fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True)
# Remove the downloading tag
self.untagRelease(release_download = release_download, tag = 'downloading')
# Scan and Allow the downloader to clean-up
release_download.update({'pause': False, 'scan': True, 'process_complete': True})
scan_releases.append(release_download)
else:
scan_required = True
except:
log.error('Failed checking for release in downloader: %s', traceback.format_exc())
# The following can either be done here, or inside the scanner if we pass it scan_items in one go
for release_download in scan_releases:
# Ask the renamer to scan the item
if release_download['scan']:
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = True, single = True)
self.scan(release_download = release_download)
if release_download['pause'] and self.conf('file_action') == 'link':
fireEvent('download.pause', release_download = release_download, pause = False, single = True)
if release_download['process_complete']:
# First make sure the files were successfully processed
if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'):
# Remove the seeding tag if it exists
self.untagRelease(release_download = release_download, tag = 'renamed_already')
# Ask the downloader to process the item
fireEvent('download.process_complete', release_download = release_download, single = True)
if fire_scan and (scan_required or len(no_status_support) > 0):
self.scan()
self.checking_snatched = False
return True
except:
log.error('Failed checking snatched: %s', traceback.format_exc())
self.checking_snatched = False
return False
def extendReleaseDownload(self, release_download):
rls = None
db = get_db()
if release_download and release_download.get('id'):
try:
rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc']
except:
log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader')))
if rls:
media = db.get('id', rls['media_id'])
release_download.update({
'imdb_id': getIdentifier(media),
'quality': rls['quality'],
'is_3d': rls['is_3d'],
'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'),
'release_id': rls['_id'],
})
return release_download
def downloadIsTorrent(self, release_download):
return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet']
def fileIsAdded(self, src, group):
if not group or not group.get('before_rename'):
return False
return src in group['before_rename']
def statusInfoComplete(self, release_download):
return release_download.get('id') and release_download.get('downloader') and release_download.get('folder')
def movieInFromFolder(self, media_folder):
return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder
def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False):
if not files: files = []
# RegEx for finding rar files
archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))'
extr_files = []
from_folder = sp(self.conf('from'))
# Check input variables
if not folder:
folder = from_folder
check_file_date = True
if media_folder:
check_file_date = False
if not files:
for root, folders, names in scandir.walk(folder):
files.extend([sp(os.path.join(root, name)) for name in names])
# Find all archive files
archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)]
#Extract all found archives
for archive in archives:
# Check if it has already been processed by CPS
if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}):
continue
# Find all related archive files
archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)]
archive['files'].append(archive['file'])
# Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute
if check_file_date:
files_too_new, time_string = self.checkFilesChanged(archive['files'])
if files_too_new:
log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file'])))
continue
log.info('Archive %s found. Extracting...', os.path.basename(archive['file']))
try:
rar_handle = RarFile(archive['file'])
extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder))
self.makeDir(extr_path)
for packedinfo in rar_handle.infolist():
if not packedinfo.isdir and not os.path.isfile(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename)))):
log.debug('Extracting %s...', packedinfo.filename)
rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False)
extr_files.append(sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))))
del rar_handle
except Exception as e:
log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc()))
continue
# Delete the archive files
for filename in archive['files']:
if cleanup:
try:
os.remove(filename)
except Exception as e:
log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc()))
continue
files.remove(filename)
# Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided)
if extr_files and folder != from_folder:
for leftoverfile in list(files):
move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder))
try:
self.makeDir(os.path.dirname(move_to))
self.moveFile(leftoverfile, move_to, cleanup)
except Exception as e:
log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc()))
# As we probably tried to overwrite the nfo file, check if it exists and then remove the original
if os.path.isfile(move_to):
if cleanup:
log.info('Deleting left over file %s instead...', leftoverfile)
os.unlink(leftoverfile)
else:
continue
files.remove(leftoverfile)
extr_files.append(move_to)
if cleanup:
# Remove all left over folders
log.debug('Removing old movie folder %s...', media_folder)
self.deleteEmptyFolder(media_folder)
media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder))
folder = from_folder
if extr_files:
files.extend(extr_files)
# Cleanup files and folder if media_folder was not provided
if not media_folder:
files = []
folder = None
return folder, media_folder, files, extr_files
rename_options = {
'pre': '<',
'post': '>',
'choices': {
'ext': 'Extention (mkv)',
'namethe': 'Moviename, The',
'thename': 'The Moviename',
'year': 'Year (2011)',
'first': 'First letter (M)',
'quality': 'Quality (720p)',
'quality_type': '(HD) or (SD)',
'3d': '3D',
'video': 'Video (x264)',
'audio': 'Audio (DTS)',
'group': 'Releasegroup name',
'source': 'Source media (Bluray)',
'resolution_width': 'resolution width (1280)',
'resolution_height': 'resolution height (720)',
'audio_channels': 'audio channels (7.1)',
'original': 'Original filename',
'original_folder': 'Original foldername',
'imdb_id': 'IMDB id (tt0123456)',
'cd': 'CD number (cd1)',
'cd_nr': 'Just the cd nr. (1)',
'mpaa': 'MPAA Rating',
'category': 'Category label',
},
}
config = [{
'name': 'renamer',
'order': 40,
'description': 'Move and rename your downloaded movies to your movie directory.',
'groups': [
{
'tab': 'renamer',
'name': 'renamer',
'label': 'Rename downloaded movies',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'from',
'type': 'directory',
'description': 'Folder where CP searches for movies.',
},
{
'name': 'to',
'type': 'directory',
'description': 'Default folder where the movies are moved to.',
},
{
'name': 'to if 3d',
'type': 'directory',
'description': 'Default folder where the movies are moved to if 3D.',
},
{
'name': 'folder_name',
'label': 'Folder naming',
'description': 'Name of the folder. Keep empty for no folder.',
'default': '<namethe> (<year>)',
'type': 'choice',
'options': rename_options
},
{
'name': 'folder_name_3d',
'label': 'Folder naming if 3D',
'description': 'Name of the folder for 3D. Keep empty for no folder.',
'default': '<namethe> <3d> (<year>)',
'type': 'choice',
'options': rename_options
},
{
'name': 'file_name',
'label': 'File naming',
'description': 'Name of the file',
'default': '<thename><cd>.<ext>',
'type': 'choice',
'options': rename_options
},
{
'name': 'file_name_3d',
'label': 'File naming if 3D',
'description': 'Name of the file for 3D',
'default': '<thename><cd> <3d>.<ext>',
'type': 'choice',
'options': rename_options
},
{
'name': 'unrar',
'type': 'bool',
'description': 'Extract rar files if found.',
'default': False,
},
{
'name': 'cleanup',
'type': 'bool',
'description': 'Cleanup leftover files after successful rename.',
'default': False,
},
{
'advanced': True,
'name': 'run_every',
'label': 'Run every',
'default': 1,
'type': 'int',
'unit': 'min(s)',
'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'),
},
{
'advanced': True,
'name': 'force_every',
'label': 'Force every',
'default': 2,
'type': 'int',
'unit': 'hour(s)',
'description': 'Forces the renamer to scan every X hours',
},
{
'advanced': True,
'name': 'next_on_failed',
'default': True,
'type': 'bool',
'description': 'Try the next best release for a movie after a download failed.',
},
{
'name': 'move_leftover',
'type': 'bool',
'description': 'Move all leftover file after renaming, to the movie folder.',
'default': False,
'advanced': True,
},
{
'advanced': True,
'name': 'separator',
'label': 'File-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'advanced': True,
'name': 'foldersep',
'label': 'Folder-Separator',
'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'),
},
{
'name': 'file_action',
'label': 'Torrent File Action',
'default': 'link',
'type': 'dropdown',
'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')],
'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.',
'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link">sym link</a> and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'),
'advanced': True,
},
{
'advanced': True,
'name': 'ntfs_permission',
'label': 'NTFS Permission',
'type': 'bool',
'hidden': os.name != 'nt',
'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).',
'default': False,
},
],
}, {
'tab': 'renamer',
'name': 'meta_renamer',
'label': 'Advanced renaming',
'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.',
'advanced': True,
'options': [
{
'name': 'rename_nfo',
'label': 'Rename .NFO',
'description': 'Rename original .nfo file',
'type': 'bool',
'default': True,
},
{
'name': 'nfo_name',
'label': 'NFO naming',
'default': '<filename>.orig.<ext>',
'type': 'choice',
'options': rename_options
},
],
},
],
}]
|
# [SublimeLinter flake8-max-line-length:120]
from datetime import datetime, timedelta
import random
import os
from os.path import join, isdir
import secrets
from matplotlib import pyplot as plt
from flask_security.utils import encrypt_password
import lipsum
from .talky import app, mail
from .login import user_datastore
from .schema import db, Role, Experiment, Conference, Comment, Submission, Category, Talk, Contact
__all__ = [
'build_sample_db',
'build_production_db',
]
def get_delta(days=2):
return timedelta(
days=random.randrange(days),
hours=random.randrange(24),
minutes=random.randrange(60),
seconds=random.randrange(60)
)
def make_example_submission(talk, version):
plt.title(talk.title)
plt.text(0.1, 0.5, talk.experiment.name)
submission_dir = join(app.config['FILE_PATH'], str(talk.id), str(version))
assert not isdir(submission_dir)
os.makedirs(submission_dir)
plt.savefig(join(submission_dir, 'my_example_file.pdf'))
plt.close()
def make_submissions(first_names, conference, talk):
submissions = []
current_time = conference.start_date
for n_submission in range(random.randrange(5)):
talk.n_submissions += 1
version = talk.n_submissions
make_example_submission(talk, version)
current_time = current_time + get_delta()
submission = Submission(talk=talk, time=current_time, version=version, filename='my_example_file.pdf')
db.session.add(submission)
submissions.append(submission)
current_time = conference.start_date
for n_comment in range(random.randrange(1, 6)):
make_comment(first_names, current_time, talk, submissions, parent=None)
def make_comment(first_names, current_time, talk, submissions, parent=None, child_prob=0.75):
current_time = current_time + get_delta(3)
name = random.sample(first_names, 2)
s = [s for s in submissions if s.time < current_time]
comment = Comment(
name=' '.join(name),
email=f'chrisburr73+{name[0]}.{name[1]}@gmail.com',
comment=lipsum.generate_sentences(random.randrange(1, 5)),
time=current_time,
talk=talk,
submission=s[-1] if s else None,
parent_comment_id=parent
)
db.session.add(comment)
db.session.commit()
if random.random() > 1-child_prob:
for n_comment in range(random.randrange(1, 4)):
make_comment(first_names, current_time, talk, submissions, comment.id, child_prob*0.5)
def build_production_db():
db.drop_all()
db.create_all()
with app.app_context():
lhcb = Experiment(name='LHCb')
db.session.add(lhcb)
db.session.commit()
user_role = Role(name='user')
super_user_role = Role(name='superuser')
db.session.add(user_role)
db.session.add(super_user_role)
db.session.commit()
admin_password = secrets.token_urlsafe()
print(f'Admin password is "{admin_password}"')
user_datastore.create_user(
name='Admin',
email='christopher.burr@cern.ch',
password=encrypt_password(admin_password),
roles=[user_role, super_user_role],
experiment=lhcb
)
db.session.commit()
def build_sample_db(fast=False):
"""Populate a db with some example entries."""
# Set a seed to avoid flakiness
random.seed(42)
# Prevent sending email
_send = mail.send
mail.send = lambda msg: None
db.drop_all()
db.create_all()
with app.app_context():
lhcb = Experiment(name='LHCb')
belle = Experiment(name='Belle')
belle_2 = Experiment(name='Belle 2')
db.session.add(lhcb)
db.session.add(belle)
db.session.add(belle_2)
db.session.commit()
user_role = Role(name='user')
super_user_role = Role(name='superuser')
db.session.add(user_role)
db.session.add(super_user_role)
db.session.commit()
test_admin = user_datastore.create_user(
name='Admin',
email='admin',
password=encrypt_password('admin'),
roles=[user_role, super_user_role],
experiment=lhcb
)
test_user_lhcb = user_datastore.create_user(
name='User',
email='userlhcb',
password=encrypt_password('user'),
roles=[user_role],
experiment=lhcb
)
test_user_belle = user_datastore.create_user(
name='User',
email='userbelle',
password=encrypt_password('user'),
roles=[user_role],
experiment=belle
)
test_user_belle2 = user_datastore.create_user(
name='User',
email='userbelle2',
password=encrypt_password('user'),
roles=[user_role],
experiment=belle_2
)
first_names = [
'Harry', 'Amelia', 'Oliver', 'Jack', 'Isabella', 'Charlie', 'Sophie', 'Mia',
'Jacob', 'Thomas', 'Emily', 'Lily', 'Ava', 'Isla', 'Alfie', 'Olivia', 'Jessica',
'Riley', 'William', 'James', 'Geoffrey', 'Lisa', 'Benjamin', 'Stacey', 'Lucy'
]
contacts = []
for i in range(9 if fast else len(first_names)):
tmp_experiment = [lhcb, belle, belle_2][i % 3]
tmp_email = f'chrisburr73+{first_names[i].lower()}.{tmp_experiment.name}@gmail.com'
contacts.append(Contact(email=tmp_email, experiment=tmp_experiment))
db.session.add(contacts[-1])
db.session.commit()
lhcb_charm = Category(name='Charm', experiment=lhcb, contacts=contacts[:1])
belle_charm = Category(name='Charm', experiment=belle, contacts=contacts[1:2])
db.session.add(lhcb_charm)
db.session.add(belle_charm)
db.session.commit()
conferences = []
for year in range(2018 if fast else 2000, 2020):
conf_time = datetime.now() - timedelta(days=random.randrange(50, 500))
llwi = Conference(name='LLWI '+str(year), venue='Canada', start_date=conf_time)
db.session.add(llwi)
conf_time = datetime.now() - timedelta(days=random.randrange(50, 500))
morriond = Conference(name='Moriond '+str(year), venue='La Thuile', start_date=conf_time, url=f'http://moriond.in2p3.fr/QCD/{year}/')
db.session.add(morriond)
conferences.extend([llwi, morriond])
db.session.commit()
for conference in conferences:
charm_prod = Talk(
title='Charm hadron production cross-sections at √s = 13 TeV using 300pb⁻¹',
duration=f'{random.randrange(10, 90)}" (+ questions)',
speaker=f'chrisburr73+{".".join(random.sample(first_names, 2))}@gmail.com',
experiment=lhcb, interesting_to=[belle, belle_2], conference=conference,
abstract=lipsum.generate_sentences(10)
)
db.session.add(charm_prod)
db.session.commit()
make_submissions(first_names, conference, charm_prod)
talk = Talk(
title=lipsum.generate_words(10), duration=f'{random.randrange(10, 90)}"',
speaker=f'chrisburr73+{".".join(random.sample(first_names, 2))}@gmail.com',
experiment=belle, interesting_to=[lhcb], conference=conference,
abstract=lipsum.generate_paragraphs(2)
)
db.session.add(talk)
db.session.commit()
make_submissions(first_names, conference, talk)
talk = Talk(
title=lipsum.generate_words(10), duration=f'{random.randrange(10, 90)}"',
speaker=f'chrisburr73+{".".join(random.sample(first_names, 2))}@gmail.com',
experiment=belle_2, interesting_to=[belle], conference=conference,
abstract=lipsum.generate_paragraphs(2)
)
db.session.add(talk)
db.session.commit()
make_submissions(first_names, conference, talk)
db.session.commit()
# Restore mail sending functionality
mail.send = _send
|
import os
import subprocess
from typing import Sequence
from valohai_cli.exceptions import NoCommit, NoGitRepo
def check_git_output(args: Sequence[str], directory: str) -> bytes:
try:
return subprocess.check_output(
args=args,
cwd=directory,
shell=False,
stderr=subprocess.STDOUT,
env=dict(os.environ, LC_ALL='C'),
)
except subprocess.CalledProcessError as cpe:
if cpe.returncode == 128:
output_text = cpe.output.decode().lower()
if 'not a git repository' in output_text:
raise NoGitRepo(directory)
if 'bad revision' in output_text:
raise NoCommit(directory)
raise
def get_current_commit(directory: str) -> str:
"""
(Try to) get the current commit of the Git working copy in `directory`.
:param directory: Directory path.
:return: Commit SHA
"""
return check_git_output(['git', 'rev-parse', 'HEAD'], directory).strip().decode()
def describe_current_commit(directory: str) -> str:
"""
(Try to) describe the lineage and status of the Git working copy in `directory`.
:param directory: Directory path.
:return: Git description string
"""
return check_git_output(['git', 'describe', '--always', '--long', '--dirty', '--all'], directory).strip().decode()
def get_file_at_commit(directory: str, commit: str, path: str) -> bytes:
"""
Get the contents of repository `path` at commit `commit` given the
Git working directory `directory`.
:param directory: Git working directory.
:param commit: Commit ID
:param path: In-repository path
:return: File contents as bytes
"""
args = ['git', 'show', f'{commit}:{path}']
return check_git_output(args, directory)
def expand_commit_id(directory: str, commit: str) -> str:
"""
Expand the possibly abbreviated (or otherwise referred to, i.e. "HEAD")
commit ID, and verify it exists.
:param directory: Git working directory
:param commit: Commit ID
:return: Expanded commit ID.
"""
return check_git_output(['git', 'rev-parse', '--verify', commit], directory).decode().strip()
|
from django.http import (
HttpResponse, Http404, HttpResponseForbidden,
HttpResponseRedirect)
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.template.loader import render_to_string
from django.contrib.syndication.views import Feed
from django.contrib.auth.models import User, Group
from django.conf import settings
from django.contrib.auth.views import (
login as auth_login, logout as auth_logout)
from django.views.generic.list import ListView
from datetime import datetime, timedelta
import logging
import json
from dinette.models import Ftopics, Category, Reply, DinetteUserProfile
from dinette.forms import FtopicForm, ReplyForm
#Create module logger
#several logging configurations are configured in the models
mlogger = logging.getLogger(__name__)
json_mimetype = 'application/javascript'
def index_page(request):
#groups which this user has access
if request.user.is_authenticated():
groups = [group for group in request.user.groups.all()] + \
[group for group in Group.objects.filter(name="general")]
else:
#we are treating user who have not loggedin belongs to general group
groups = Group.objects.filter(name="general")
#logic which decide which forum does this user have access to
forums = []
for group in groups:
forums.extend([each for each in group.can_access_forums.all()])
forums = set(forums)
forums = sorted(forums, cmp=lambda x, y: int(y.ordering) - int(x.ordering))
totaltopics = Ftopics.objects.count()
totalposts = totaltopics + Reply.objects.count()
totalusers = User.objects.count()
now = datetime.now()
users_online = DinetteUserProfile.objects.filter(
last_activity__gte=now - timedelta(seconds=900)).count()
last_registered_user = User.objects.order_by('-date_joined')[0]
payload = {
'users_online': users_online, 'forums_list': forums,
'totaltopics': totaltopics, 'totalposts': totalposts,
'totalusers': totalusers, 'last_registered_user': last_registered_user
}
return render_to_response(
"dinette/mainindex.html", payload, RequestContext(request))
def category_details(request, categoryslug, pageno=1):
#build a form for posting topics
topicform = FtopicForm()
category = get_object_or_404(Category, slug=categoryslug)
queryset = Ftopics.objects.filter(category__id__exact=category.id)
topic_page_size = getattr(settings, "TOPIC_PAGE_SIZE", 10)
payload = {
'topicform': topicform, 'category': category,
'authenticated': request.user.is_authenticated(),
'topic_list': queryset, "topic_page_size": topic_page_size
}
return render_to_response(
"dinette/category_details.html", payload, RequestContext(request))
topic_list = ListView.as_view(
template_name='dinette/topiclist.html',
model=Ftopics, context_object_name='topic', paginate_by=2)
def topic_detail(request, categoryslug, topic_slug, pageno=1):
topic = get_object_or_404(Ftopics, slug=topic_slug)
show_moderation_items = False
if request.user in topic.category.moderated_by.all():
show_moderation_items = True
#some body has viewed this topic
topic.viewcount = topic.viewcount + 1
topic.save()
#we also need to display the reply form
replylist = topic.reply_set.all()
reply_page_size = getattr(settings, "REPLY_PAGE_SIZE", 10)
replyform = ReplyForm()
payload = {
'topic': topic, 'replyform': replyform, 'reply_list': replylist,
'show_moderation_items': show_moderation_items,
"reply_page_size": reply_page_size}
return render_to_response(
"dinette/topic_detail.html", payload, RequestContext(request))
@login_required
def postTopic(request):
mlogger.info("In post Topic page.....................")
mlogger.debug("Type of request.user %s" % type(request.user))
topic = FtopicForm(request.POST, request.FILES)
if not topic.is_valid():
d = {"is_valid": "false", "response_html": topic.as_table()}
json = json.dumps(d)
if request.FILES:
json = "<textarea>"+json.dumps(d)+"</textarea>"
else:
json = json.dumps(d)
return HttpResponse(json, mimetype=json_mimetype)
#code which checks for flood control
if (datetime.now()-request.user.dinetteuserprofile.last_posttime).seconds < settings.FLOOD_TIME:
#oh....... user trying to flood us Stop him
d2 = {"is_valid": "flood", "errormessage": "Flood control.................."}
if request.FILES:
json = "<textarea>"+json.dumps(d2)+"</textarea>"
else :
json = json.dumps(d2)
return HttpResponse(json, mimetype = json_mimetype)
ftopic = topic.save(commit=False)
#only if there is any file
if request.FILES :
if(request.FILES['file'].content_type.find("image") >= 0 ) :
ftopic.attachment_type = "image"
else :
ftopic.attachment_type = "text"
ftopic.filename = request.FILES['file'].name
ftopic.posted_by = request.user
mlogger.debug("categoryid= %s" %request.POST['categoryid'])
ftopic.category = Category.objects.get(pk = request.POST['categoryid'])
#Assigning user rank
mlogger.debug("Assigning an user rank and last posted datetime")
assignUserElements(request.user)
ftopic.save()
#autosubsribe
ftopic.subscribers.add(request.user)
mlogger.debug("what is the message (%s %s) " % (ftopic.message,ftopic.subject))
payload = {'topic':ftopic}
response_html = render_to_string('dinette/topic_detail_frag.html', payload,RequestContext(request))
mlogger.debug("what is the response = %s " % response_html)
d2 = {"is_valid":"true","response_html":response_html}
#this the required for ajax file uploads
if request.FILES :
json = "<textarea>"+json.dumps(d2)+"</textarea>"
else :
json = json.dumps(d2)
return HttpResponse(json, mimetype = json_mimetype)
@login_required
def postReply(request):
mlogger.info("in post reply.................")
freply = ReplyForm(request.POST,request.FILES)
if not freply.is_valid():
d = {"is_valid":"false","response_html":freply.as_table()}
json = json.dumps(d)
if request.FILES :
json = "<textarea>"+json.dumps(d)+"</textarea>"
else:
json = json.dumps(d)
return HttpResponse(json, mimetype = json_mimetype)
#code which checks for flood control
if (datetime.now() -(request.user.dinetteuserprofile.last_posttime)).seconds <= settings.FLOOD_TIME:
#oh....... user trying to flood us Stop him
d2 = {"is_valid":"flood","errormessage":"You have posted message too recently. Please wait a while before trying again."}
if request.FILES :
json = "<textarea>"+json.dumps(d2)+"</textarea>"
else :
json = json.dumps(d2)
return HttpResponse(json, mimetype = json_mimetype)
reply = freply.save(commit=False)
#only if there is any file
if len(request.FILES.keys()) == 1 :
if(request.FILES['file'].content_type.find("image") >= 0 ) :
reply.attachment_type = "image"
else :
reply.attachment_type = "text"
reply.filename = request.FILES['file'].name
reply.posted_by = request.user
mlogger.debug("toipcid= %s" %request.POST['topicid'])
reply.topic = Ftopics.objects.get(pk = request.POST['topicid'])
#Assigning user rank
mlogger.debug("Assigning an user rank, and last posted datetime")
assignUserElements(request.user)
reply.save()
payload = {'reply':reply}
mlogger.debug("what is the replymesage = %s" %reply.message)
response_html = render_to_string('dinette/replydetail_frag.html', payload ,RequestContext(request))
mlogger.debug("what is the response = %s " % response_html)
d2 = {"is_valid":"true","response_html":response_html}
if request.FILES :
#this the required for ajax file uploads
json = "<textarea>"+json.dumps(d2)+"</textarea>"
else:
json = json.dumps(d2)
return HttpResponse(json, mimetype = json_mimetype)
@login_required
def deleteReply(request, reply_id):
resp= {"status": "1", "message": "Successfully deleted the reply"}
try:
reply = Reply.objects.get(pk=reply_id)
if not (reply.posted_by == request.user or request.user in reply.topic.category.moderated_by.all()):
return HttpResponseForbidden()
reply.delete()
except:
resp["status"] = 0
resp["message"] = "Error deleting message"
json = json.dumps(resp)
return HttpResponse(json, mimetype = json_mimetype)
@login_required
def editReply(request, reply_id):
reply = get_object_or_404(Reply, pk=reply_id)
if not (reply.posted_by == request.user or request.user in reply.topic.category.moderated_by.all()):
return HttpResponseForbidden()
if request.POST:
form = ReplyForm(request.POST, request.FILES, instance=reply)
if form.is_valid():
form.save()
#redirect to prev page
return HttpResponseRedirect(reply.get_url_with_fragment())
else:
# message should be original input, not the rendered one
form = ReplyForm(instance=reply, initial={'message': reply.message.raw})
return render_to_response('dinette/edit_reply.html', {'replyform': form, 'reply_id': reply_id}, context_instance=RequestContext(request))
class LatestTopicsByCategory(Feed):
title_template = 'dinette/feeds/title.html'
description_template = 'dinette/feeds/description.html'
def get_object(self, request, whichcategory):
mlogger.debug("Feed for category %s " % whichcategory)
return get_object_or_404(Category, slug=whichcategory)
def title(self, obj):
return "Latest topics in category %s" % obj.name
def link(self, obj):
return settings.SITE_URL
def items(self, obj):
return obj.ftopics_set.all()[:10]
#construct these links by means of reverse lookup by
#using permalink decorator
def item_link(self,obj):
return obj.get_absolute_url()
def item_pubdate(self,obj):
return obj.created_on
class LatestRepliesOfTopic(Feed):
title_template = 'dinette/feeds/title.html'
description_template = 'dinette/feeds/description.html'
def get_object(self, request, whichtopic):
mlogger.debug("Feed for category %s " % whichtopic)
return get_object_or_404(Ftopics, slug=whichtopic)
def title(self, obj):
return "Latest replies in topic %s" % obj.subject
def link(self, obj):
return settings.SITE_URL
def items(self, obj):
list = []
list.insert(0,obj)
for obj in obj.reply_set.all()[:10] :
list.append(obj)
return list
#construct these links by means of reverse lookup by
#using permalink decorator
def item_link(self,obj):
return obj.get_absolute_url()
def item_pubdate(self,obj):
return obj.created_on
def assignUserElements(user):
ranks = getattr(settings, 'RANKS_NAMES_DATA')
rank = ''
if ranks:
totalposts = user.ftopics_set.count() + user.reply_set.count()
for el in ranks:
if totalposts == el[0]:
rank = el[1]
if rank:
userprofile = user.dinetteuserprofile
userprofile.userrank = rank
#this is the time when user posted his last post
userprofile.last_posttime = datetime.now()
userprofile.save()
###Moderation views###
@login_required
def moderate_topic(request, topic_id, action):
topic = get_object_or_404(Ftopics, pk = topic_id)
if not request.user in topic.category.moderated_by.all():
raise Http404
if request.method == 'POST':
if action == 'close':
if topic.is_closed:
message = 'You have reopened topic %s'%topic.subject
else:
message = 'You have closed topic %s'%topic.subject
topic.is_closed = not topic.is_closed
elif action == 'announce':
if topic.announcement_flag:
message = '%s is no longer an announcement.' % topic.subject
else:
message = '%s is now an announcement.' % topic.subject
topic.announcement_flag = not topic.announcement_flag
elif action == 'sticky':
if topic.is_sticky:
message = '%s has been unstickied.' % topic.subject
else:
message = '%s has been stickied.' % topic.subject
topic.is_sticky = not topic.is_sticky
elif action == 'hide':
if topic.is_hidden:
message = '%s has been unhidden.' % topic.subject
else:
message = "%s has been hidden and won't show up any further." % topic.subject
topic.is_hidden = not topic.is_hidden
topic.save()
payload = {'topic_id':topic.pk, 'message':message}
resp = json.dumps(payload)
return HttpResponse(resp, mimetype = json_mimetype)
else:
return HttpResponse('This view must be called via post')
def login(request):
return auth_login(request)
def logout(request):
return auth_logout(request)
def user_profile(request, slug):
user_profile = get_object_or_404(User, dinetteuserprofile__slug=slug)
return render_to_response('dinette/user_profile.html', {}, RequestContext(request, {'user_profile': user_profile}))
@login_required
def new_topics(request):
userprofile = request.user.dinetteuserprofile
new_topic_list = userprofile.get_since_last_visit()
return topic_list(request, new_topic_list, page_message = "Topics since your last visit")
def active(request):
#Time filter = 48 hours
days_ago_2 = datetime.now() - timedelta(days = 2)
topics = Ftopics.objects.filter(last_reply_on__gt = days_ago_2)
active_topics = topics.extra(select= {"activity":"viewcount+100*num_replies"}).order_by("-activity")
return topic_list(request, active_topics, page_message = "Most active Topics")
def unanswered(request):
unanswered_topics = Ftopics.objects.filter(replies = 0)
return topic_list(request, unanswered_topics, page_message = "Unanswered Topics")
def topic_list(request, queryset, page_message):
payload = {"new_topic_list": queryset, "page_message": page_message}
return render_to_response("dinette/new_topics.html", payload, RequestContext(request))
def search(request):
from haystack.views import SearchView
search_view = SearchView(template = "dinette/search.html")
return search_view(request)
@login_required
def subscribeTopic(request, topic_id):
topic = get_object_or_404(Ftopics, pk=topic_id)
topic.subscribers.add(request.user)
next = request.GET.get('next', topic.get_absolute_url())
return redirect(next)
@login_required
def unsubscribeTopic(request, topic_id):
topic = get_object_or_404(Ftopics, pk=topic_id)
topic.subscribers.remove(request.user)
next = request.GET.get('next', topic.get_absolute_url())
return redirect(next)
@login_required
def subscribeDigest(request):
user = get_object_or_404(User, pk=request.user.id)
profile = user.dinetteuserprofile
profile.is_subscribed_to_digest = True
profile.save()
next = request.GET.get('next', user.dinetteuserprofile.get_absolute_url())
return redirect(next)
@login_required
def unsubscribeDigest(request):
user = get_object_or_404(User, pk=request.user.id)
profile = user.dinetteuserprofile
profile.is_subscribed_to_digest = False
profile.save()
next = request.GET.get('next', user.dinetteuserprofile.get_absolute_url())
return redirect(next)
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import urllib
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://moviewatcher.to'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'MovieWatcher'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, allow_redirect=False, cache_limit=0)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
for item in dom_parser.parse_dom(html, 'a', {'class': 'full-torrent1'}):
stream_url = dom_parser.parse_dom(item, 'span', ret='onclick')
host = dom_parser.parse_dom(item, 'div', {'class': 'small_server'})
match = re.search('Views:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
views = match.group(1) if match else None
match = re.search('Size:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
size = int(match.group(1)) * 1024 * 1024 if match else None
if stream_url and host:
stream_url = stream_url[0]
host = host[0].lower()
host = host.replace('stream server: ', '')
match = re.search("'(/redirect/[^']+)", stream_url)
if match:
stream_url = match.group(1)
quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': views, 'rating': None, 'url': stream_url, 'direct': False}
if size is not None: hoster['size'] = scraper_utils.format_size(size, 'B')
hosters.append(hoster)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]*/s0*%se0*%s(?!\d)[^"]*)' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
results = []
if video_type == VIDEO_TYPES.MOVIE:
vid_type = 'movies'
else:
vid_type = 'series'
search_url = urlparse.urljoin(self.base_url, '/search?query=%s&type=%s')
search_url = search_url % (urllib.quote_plus(title), vid_type)
html = self._http_get(search_url, allow_redirect=False, cache_limit=8)
if html.startswith('http'):
results = [{'url': scraper_utils.pathify_url(html), 'title': scraper_utils.cleanse_title(title), 'year': ''}]
else:
for item in dom_parser.parse_dom(html, 'div', {'class': 'one_movie-item'}):
match_url = dom_parser.parse_dom(item, 'a', ret='href')
match_title = dom_parser.parse_dom(item, 'img', ret='alt')
match_year = ''
if match_url and match_title:
match_url = match_url[0]
match_title = match_title[0]
if match_year:
match_year = match_year[0]
else:
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
|
#!flask/bin/python
from flask import Flask, jsonify, abort, make_response, request
from resources import posts, todos, comments, albums, photos, users
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
#METHODS = 'GET'
@app.route('/posts', methods=['GET'])
def get_posts():
return jsonify({'posts': posts})
@app.route('/todos', methods=['GET'])
def get_todos():
return jsonify({'todos': todos})
@app.route('/comments', methods=['GET'])
def get_comments():
return jsonify({'comments': comments})
@app.route('/albums', methods=['GET'])
def get_albums():
return jsonify({'albums': albums})
@app.route('/photos', methods=['GET'])
def get_photos():
return jsonify({'photos': photos})
@app.route('/users', methods=['GET'])
def get_users():
return jsonify({'users': users})
#METHODS = "GET ID"
@app.route('/posts/<int:post_id>', methods=['GET'])
def get_task(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
return jsonify({'post': post[0]})
@app.route('/todos/<int:todo_id>', methods=['GET'])
def get_todos(todo_id):
todo = [todo for todo in todos if todo['id'] == todo_id]
if len(todo) == 0:
abort(404)
return jsonify({'todo': todo[0]})
@app.route('/comments/<int:comment_id>', methods=['GET'])
def get_comments(comment_id):
comment = [comment for comment in comments if comment['id'] == comment_id]
if len(comment) == 0:
abort(404)
return jsonify({'comment': comment[0]})
@app.route('/albums/<int:album_id>', methods=['GET'])
def get_albums(album_id):
album = [album for album in albums if album['id'] == album_id]
if len(album) == 0:
abort(404)
return jsonify({'album': album[0]})
@app.route('/photos/<int:photo_id>', methods=['GET'])
def get_photos(photo_id):
photo = [photo for photo in photos if photo['id'] == photo_id]
if len(photo) == 0:
abort(404)
return jsonify({'photo': photo[0]})
@app.route('/users/<int:user_id>', methods=['GET'])
def get_users(user_id):
user = [user for user in users if user['id'] == user_id]
if len(user) == 0:
abort(404)
return jsonify({'user': user[0]})
#METHODS = 'POST'
@app.route('/posts', methods=['POST'])
def create_post():
if not request.json or not 'title' in request.json or not 'userId' in request.json or not 'body' in request.json:
abort(400)
post = {
'id': posts[-1]['id'] + 1,
'userId': request.json['userId'],
'title': request.json['title'],
'body': request.json['body'],
}
posts.append(post)
return jsonify({'post': post}), 201
@app.route('/posts/<int:post_id>', methods=['PUT'])
def update_post(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
if not request.json:
abort(400)
post[0]['title'] = request.json.get('title', post[0]['title'])
post[0]['body'] = request.json.get('body', post[0]['body'])
return jsonify({'post': post[0]})
@app.route('/posts/<int:post_id>', methods=['DELETE'])
def delete_post(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
posts.remove(post[0])
return jsonify({'result': True})
if __name__ == '__main__':
app.debug = True
app.run("0.0.0.0")
|
import unittest
import pytest
from geographiclib.geodesic import Geodesic
from geographiclib_cython import Geodesic as CythonGeodesic
from geopy.distance import great_circle
# Run with: python -m pytest tests.py
class TestGeodesic(unittest.TestCase):
def test_inverse(self):
actual = CythonGeodesic.WGS84.Inverse(10, 20, 30, 40)
expected = Geodesic.WGS84.Inverse(10, 20, 30, 40)
assert actual['s12'] == pytest.approx(expected['s12'], 1e-10)
assert actual['azi1'] == pytest.approx(expected['azi1'], 1e-10)
assert actual['azi2'] == pytest.approx(expected['azi2'], 1e-10)
def test_direct(self):
actual = CythonGeodesic.WGS84.Direct(10, 20, 30, 4000)
expected = Geodesic.WGS84.Direct(10, 20, 30, 4000)
assert actual['lat2'] == pytest.approx(expected['lat2'], 1e-10)
assert actual['lon2'] == pytest.approx(expected['lon2'], 1e-10)
assert actual['azi2'] == pytest.approx(expected['azi2'], 1e-10)
def test_inverse_line(self):
actual_line = CythonGeodesic.WGS84.InverseLine(10, 20, 30, 40)
expected_line = Geodesic.WGS84.InverseLine(10, 20, 30, 40)
assert actual_line.s13 == pytest.approx(expected_line.s13, 1e-10)
actual_pos = actual_line.Position(100000)
expected_pos = expected_line.Position(100000)
assert actual_pos['lat2'] == pytest.approx(expected_pos['lat2'], 1e-10)
assert actual_pos['lon2'] == pytest.approx(expected_pos['lon2'], 1e-10)
def test_sphere_distance(self):
actual = CythonGeodesic.Sphere().Inverse(10, 20, 30, 40)
expected = great_circle((10, 20), (30, 40))
assert actual['s12'] == pytest.approx(expected.meters, 1e-10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def parse_requirements(filename):
return [line.strip()
for line in read(filename).strip().split('\n')
if line.strip()]
pkg = {}
exec(read('alchy/__pkg__.py'), pkg)
readme = read('README.rst')
changelog = read('CHANGELOG.rst')
requirements = parse_requirements('requirements.txt')
setup(
name=pkg['__package_name__'],
version=pkg['__version__'],
url=pkg['__url__'],
license=pkg['__license__'],
author=pkg['__author__'],
author_email=pkg['__email__'],
description=pkg['__description__'],
long_description=readme + '\n\n' + changelog,
packages=find_packages(exclude=['tests', 'tasks']),
install_requires=requirements,
keywords='sqlalchemy databases orm declarative',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
]
)
|
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK server.
#
# REDHAWK server is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK server is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import struct
import numpy
def wav_hdr(num_channels, sample_rate, sample_width):
'''
Returns a string that is a valid WAVE header. Useful for web streams/sockets.
:param num_channels: 1 = Mono, 2 = Stereo
:param sample_rate:
:param sample_width: bytes per sample, ie 16bit PCM = 2
:return:
'''
chunk_hdr = struct.pack('4si4s',
'RIFF',
0, # Chunk Size
'WAVE')
# fmt chunk
byte_rate = sample_rate * sample_width * num_channels
block_align = num_channels * sample_width
bits_per_sample = sample_width * 8
format_chunk = struct.pack('4sihHIIHH',
'fmt ',
16, # Fmt Sub Chunk Size
1, # AudioFormat (1 = PCM)
num_channels,
sample_rate,
byte_rate,
block_align,
bits_per_sample)
output = chunk_hdr + format_chunk + 'data'
return output
def pcm2wav(data, num_channels, sample_rate):
'''
Converts PCM to Wave format. Converts PCM to 16-bit
:param data:
:param num_channels: 1 = Mono, 2 = Stereo
:param sample_rate:
:return:
'''
# TODO: Handle different data formats. Current implementation just
# casts. Need to handle the more standard normalized floats
sample_width = 2
pcm_data = numpy.array(data).astype('int16')
chunk_hdr = struct.pack('4si4s',
'RIFF',
36 + pcm_data.nbytes, # Chunk Size
'WAVE')
# fmt chunk
byte_rate = sample_rate * sample_width * num_channels
block_align = num_channels * sample_width
bits_per_sample = sample_width * 8
format_chunk = struct.pack('4sihHIIHH',
'fmt ',
16, # Fmt Sub Chunk Size
1, # AudioFormat (1 = PCM)
num_channels,
sample_rate,
byte_rate,
block_align,
bits_per_sample)
output = chunk_hdr + format_chunk + 'data' + pcm_data.tostring()
return output
|
"""
Tests for the BSE main API
"""
import pytest
import basis_set_exchange as bse
from .common_testvars import *
@pytest.mark.slow
@pytest.mark.parametrize('basis_name', bs_names)
@pytest.mark.parametrize('fmt', bs_write_formats)
@pytest.mark.parametrize('unc_gen', true_false)
@pytest.mark.parametrize('unc_seg', true_false)
@pytest.mark.parametrize('unc_spdf', true_false)
@pytest.mark.parametrize('make_gen', true_false)
@pytest.mark.parametrize('opt_gen', true_false)
def test_slow_get_basis_1(basis_name, fmt, unc_gen, unc_seg, unc_spdf, make_gen, opt_gen):
"""Tests getting all basis sets in all formats
and with every combination of option
Also tests memoization
"""
this_metadata = bs_metadata[basis_name]
for ver in this_metadata['versions'].keys():
bs1 = bse.get_basis(basis_name,
fmt=fmt,
version=ver,
uncontract_general=unc_gen,
uncontract_segmented=unc_seg,
uncontract_spdf=unc_spdf,
make_general=make_gen,
optimize_general=opt_gen,
header=False)
bs2 = bse.get_basis(basis_name,
fmt=fmt,
version=ver,
uncontract_general=unc_gen,
uncontract_segmented=unc_seg,
uncontract_spdf=unc_spdf,
make_general=make_gen,
optimize_general=opt_gen,
header=False)
assert bs1 == bs2
|
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
NaT,
Timedelta,
TimedeltaIndex,
timedelta_range,
)
import pandas._testing as tm
class TestTimedeltaIndex:
def test_astype_object(self):
idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx")
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex(
[timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx"
)
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
NaT,
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN], name="idx")
result = idx.astype(object)
expected = Index(
[Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx"
)
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = idx.astype(int)
expected = Int64Index(
[100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
rng = timedelta_range("1 days", periods=10)
with tm.assert_produces_warning(FutureWarning):
result = rng.astype("i8")
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range("1H", periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
with tm.assert_produces_warning(FutureWarning):
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
result = idx.astype("timedelta64")
expected = Float64Index([1e14] + [np.NaN] * 3, dtype="float64")
tm.assert_index_equal(result, expected)
result = idx.astype("timedelta64[ns]")
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype("timedelta64[ns]", copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
msg = "Cannot cast TimedeltaArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = timedelta_range("1H", periods=2, freq="H")
result = obj.astype("category")
expected = pd.CategoricalIndex([Timedelta("1H"), Timedelta("2H")])
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# Keep in sync with master/buildbot/__init__.py
#
# We can't put this method in utility modules, because they import dependency packages
#
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
from subprocess import PIPE
from subprocess import STDOUT
from subprocess import Popen
def gitDescribeToPep440(version):
# git describe produce version in the form: v0.9.8-20-gf0f45ca
# where 20 is the number of commit since last release, and gf0f45ca is the short commit id preceded by 'g'
# we parse this a transform into a pep440 release version 0.9.9.dev20 (increment last digit and add dev before 20)
VERSION_MATCH = re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.post(?P<post>\d+))?(-(?P<dev>\d+))?(-g(?P<commit>.+))?')
v = VERSION_MATCH.search(version)
if v:
major = int(v.group('major'))
minor = int(v.group('minor'))
patch = int(v.group('patch'))
if v.group('dev'):
patch += 1
dev = int(v.group('dev'))
return "{0}.{1}.{2}-dev{3}".format(major, minor, patch, dev)
if v.group('post'):
return "{0}.{1}.{2}.post{3}".format(major, minor, patch, v.group('post'))
return "{0}.{1}.{2}".format(major, minor, patch)
return v
def mTimeVersion(init_file):
cwd = os.path.dirname(os.path.abspath(init_file))
m = 0
for root, dirs, files in os.walk(cwd):
for f in files:
m = max(os.path.getmtime(os.path.join(root, f)), m)
d = datetime.datetime.utcfromtimestamp(m)
return d.strftime("%Y.%m.%d")
def getVersionFromArchiveId(git_archive_id='$Format:%ct %d$'):
""" Extract the tag if a source is from git archive.
When source is exported via `git archive`, the git_archive_id init value is modified
and placeholders are expanded to the "archived" revision:
%ct: committer date, UNIX timestamp
%d: ref names, like the --decorate option of git-log
See man gitattributes(5) and git-log(1) (PRETTY FORMATS) for more details.
"""
# mangle the magic string to make sure it is not replaced by git archive
if not git_archive_id.startswith('$For''mat:'):
# source was modified by git archive, try to parse the version from
# the value of git_archive_id
match = re.search(r'tag:\s*v([^,)]+)', git_archive_id)
if match:
# archived revision is tagged, use the tag
return gitDescribeToPep440(match.group(1))
# archived revision is not tagged, use the commit date
tstamp = git_archive_id.strip().split()[0]
d = datetime.datetime.utcfromtimestamp(int(tstamp))
return d.strftime('%Y.%m.%d')
return None
def getVersion(init_file):
"""
Return BUILDBOT_VERSION environment variable, content of VERSION file, git
tag or 'latest'
"""
try:
return os.environ['BUILDBOT_VERSION']
except KeyError:
pass
try:
cwd = os.path.dirname(os.path.abspath(init_file))
fn = os.path.join(cwd, 'VERSION')
with open(fn) as f:
return f.read().strip()
except IOError:
pass
version = getVersionFromArchiveId()
if version is not None:
return version
try:
p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd)
out = p.communicate()[0]
if (not p.returncode) and out:
v = gitDescribeToPep440(str(out))
if v:
return v
except OSError:
pass
try:
# if we really can't find the version, we use the date of modification of the most recent file
# docker hub builds cannot use git describe
return mTimeVersion(init_file)
except Exception:
# bummer. lets report something
return "latest"
version = getVersion(__file__)
__version__ = version
|
from __future__ import unicode_literals
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
fix_xml_ampersands,
parse_duration,
qualities,
strip_jsonp,
unified_strdate,
url_basename,
)
class NPOBaseIE(SubtitlesInfoExtractor):
def _get_token(self, video_id):
token_page = self._download_webpage(
'http://ida.omroep.nl/npoplayer/i.js',
video_id, note='Downloading token')
return self._search_regex(
r'npoplayer\.token = "(.+?)"', token_page, 'token')
class NPOIE(NPOBaseIE):
IE_NAME = 'npo.nl'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/(?!live|radio)[^/]+/[^/]+/(?P<id>[^/?]+)'
_TESTS = [
{
'url': 'http://www.npo.nl/nieuwsuur/22-06-2014/VPWON_1220719',
'md5': '4b3f9c429157ec4775f2c9cb7b911016',
'info_dict': {
'id': 'VPWON_1220719',
'ext': 'm4v',
'title': 'Nieuwsuur',
'description': 'Dagelijks tussen tien en elf: nieuws, sport en achtergronden.',
'upload_date': '20140622',
},
},
{
'url': 'http://www.npo.nl/de-mega-mike-mega-thomas-show/27-02-2009/VARA_101191800',
'md5': 'da50a5787dbfc1603c4ad80f31c5120b',
'info_dict': {
'id': 'VARA_101191800',
'ext': 'm4v',
'title': 'De Mega Mike & Mega Thomas show',
'description': 'md5:3b74c97fc9d6901d5a665aac0e5400f4',
'upload_date': '20090227',
'duration': 2400,
},
},
{
'url': 'http://www.npo.nl/tegenlicht/25-02-2013/VPWON_1169289',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
'duration': 3000,
},
},
{
'url': 'http://www.npo.nl/de-nieuwe-mens-deel-1/21-07-2010/WO_VPRO_043706',
'info_dict': {
'id': 'WO_VPRO_043706',
'ext': 'wmv',
'title': 'De nieuwe mens - Deel 1',
'description': 'md5:518ae51ba1293ffb80d8d8ce90b74e4b',
'duration': 4680,
},
'params': {
# mplayer mms download
'skip_download': True,
}
},
# non asf in streams
{
'url': 'http://www.npo.nl/hoe-gaat-europa-verder-na-parijs/10-01-2015/WO_NOS_762771',
'md5': 'b3da13de374cbe2d5332a7e910bef97f',
'info_dict': {
'id': 'WO_NOS_762771',
'ext': 'mp4',
'title': 'Hoe gaat Europa verder na Parijs?',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self._get_info(video_id)
def _get_info(self, video_id):
metadata = self._download_json(
'http://e.omroep.nl/metadata/aflevering/%s' % video_id,
video_id,
# We have to remove the javascript callback
transform_source=strip_jsonp,
)
token = self._get_token(video_id)
formats = []
pubopties = metadata.get('pubopties')
if pubopties:
quality = qualities(['adaptive', 'wmv_sb', 'h264_sb', 'wmv_bb', 'h264_bb', 'wvc1_std', 'h264_std'])
for format_id in pubopties:
format_info = self._download_json(
'http://ida.omroep.nl/odi/?prid=%s&puboptions=%s&adaptive=yes&token=%s'
% (video_id, format_id, token),
video_id, 'Downloading %s JSON' % format_id)
if format_info.get('error_code', 0) or format_info.get('errorcode', 0):
continue
streams = format_info.get('streams')
if streams:
video_info = self._download_json(
streams[0] + '&type=json',
video_id, 'Downloading %s stream JSON' % format_id)
else:
video_info = format_info
video_url = video_info.get('url')
if not video_url:
continue
if format_id == 'adaptive':
formats.extend(self._extract_m3u8_formats(video_url, video_id))
else:
formats.append({
'url': video_url,
'format_id': format_id,
'quality': quality(format_id),
})
streams = metadata.get('streams')
if streams:
for i, stream in enumerate(streams):
stream_url = stream.get('url')
if not stream_url:
continue
if '.asf' not in stream_url:
formats.append({
'url': stream_url,
'quality': stream.get('kwaliteit'),
})
continue
asx = self._download_xml(
stream_url, video_id,
'Downloading stream %d ASX playlist' % i,
transform_source=fix_xml_ampersands)
ref = asx.find('./ENTRY/Ref')
if ref is None:
continue
video_url = ref.get('href')
if not video_url:
continue
formats.append({
'url': video_url,
'ext': stream.get('formaat', 'asf'),
'quality': stream.get('kwaliteit'),
})
self._sort_formats(formats)
subtitles = {}
if metadata.get('tt888') == 'ja':
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self.extract_subtitles(video_id, subtitles)
return {
'id': video_id,
'title': metadata['titel'],
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'upload_date': unified_strdate(metadata.get('gidsdatum')),
'duration': parse_duration(metadata.get('tijdsduur')),
'formats': formats,
'subtitles': subtitles,
}
class NPOLiveIE(NPOBaseIE):
IE_NAME = 'npo.nl:live'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/live/(?P<id>.+)'
_TEST = {
'url': 'http://www.npo.nl/live/npo-1',
'info_dict': {
'id': 'LI_NEDERLAND1_136692',
'display_id': 'npo-1',
'ext': 'mp4',
'title': 're:^Nederland 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'Livestream',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
live_id = self._search_regex(
r'data-prid="([^"]+)"', webpage, 'live id')
metadata = self._download_json(
'http://e.omroep.nl/metadata/%s' % live_id,
display_id, transform_source=strip_jsonp)
token = self._get_token(display_id)
formats = []
streams = metadata.get('streams')
if streams:
for stream in streams:
stream_type = stream.get('type').lower()
if stream_type == 'ss':
continue
stream_info = self._download_json(
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
% (stream.get('url'), token),
display_id, 'Downloading %s JSON' % stream_type)
if stream_info.get('error_code', 0) or stream_info.get('errorcode', 0):
continue
stream_url = self._download_json(
stream_info['stream'], display_id,
'Downloading %s URL' % stream_type,
transform_source=strip_jsonp)
if stream_type == 'hds':
f4m_formats = self._extract_f4m_formats(stream_url, display_id)
# f4m downloader downloads only piece of live stream
for f4m_format in f4m_formats:
f4m_format['preference'] = -1
formats.extend(f4m_formats)
elif stream_type == 'hls':
formats.extend(self._extract_m3u8_formats(stream_url, display_id, 'mp4'))
else:
formats.append({
'url': stream_url,
})
self._sort_formats(formats)
return {
'id': live_id,
'display_id': display_id,
'title': self._live_title(metadata['titel']),
'description': metadata['info'],
'thumbnail': metadata.get('images', [{'url': None}])[-1]['url'],
'formats': formats,
'is_live': True,
}
class NPORadioIE(InfoExtractor):
IE_NAME = 'npo.nl:radio'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/(?P<id>[^/]+)/?$'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-1',
'info_dict': {
'id': 'radio-1',
'ext': 'mp3',
'title': 're:^NPO Radio 1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'is_live': True,
},
'params': {
'skip_download': True,
}
}
@staticmethod
def _html_get_attribute_regex(attribute):
return r'{0}\s*=\s*\'([^\']+)\''.format(attribute)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
self._html_get_attribute_regex('data-channel'), webpage, 'title')
stream = self._parse_json(
self._html_search_regex(self._html_get_attribute_regex('data-streams'), webpage, 'data-streams'),
video_id)
codec = stream.get('codec')
return {
'id': video_id,
'url': stream['url'],
'title': self._live_title(title),
'acodec': codec,
'ext': codec,
'is_live': True,
}
class NPORadioFragmentIE(InfoExtractor):
IE_NAME = 'npo.nl:radio:fragment'
_VALID_URL = r'https?://(?:www\.)?npo\.nl/radio/[^/]+/fragment/(?P<id>\d+)'
_TEST = {
'url': 'http://www.npo.nl/radio/radio-5/fragment/174356',
'md5': 'dd8cc470dad764d0fdc70a9a1e2d18c2',
'info_dict': {
'id': '174356',
'ext': 'mp3',
'title': 'Jubileumconcert Willeke Alberti',
},
}
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
title = self._html_search_regex(
r'href="/radio/[^/]+/fragment/%s" title="([^"]+)"' % audio_id,
webpage, 'title')
audio_url = self._search_regex(
r"data-streams='([^']+)'", webpage, 'audio url')
return {
'id': audio_id,
'url': audio_url,
'title': title,
}
class TegenlichtVproIE(NPOIE):
IE_NAME = 'tegenlicht.vpro.nl'
_VALID_URL = r'https?://tegenlicht\.vpro\.nl/afleveringen/.*?'
_TESTS = [
{
'url': 'http://tegenlicht.vpro.nl/afleveringen/2012-2013/de-toekomst-komt-uit-afrika.html',
'md5': 'f8065e4e5a7824068ed3c7e783178f2c',
'info_dict': {
'id': 'VPWON_1169289',
'ext': 'm4v',
'title': 'Tegenlicht',
'description': 'md5:d6476bceb17a8c103c76c3b708f05dd1',
'upload_date': '20130225',
},
},
]
def _real_extract(self, url):
name = url_basename(url)
webpage = self._download_webpage(url, name)
urn = self._html_search_meta('mediaurn', webpage)
info_page = self._download_json(
'http://rs.vpro.nl/v2/api/media/%s.json' % urn, name)
return self._get_info(info_page['mid'])
|
"""
Copyright 2020 Kat Holt
Copyright 2020 Ryan Wick (rrwick@gmail.com)
https://github.com/katholt/Kleborate/
This file is part of Kleborate. Kleborate is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by the Free Software Foundation,
either version 3 of the License, or (at your option) any later version. Kleborate is distributed in
the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details. You should have received a copy of the GNU General Public License along with Kleborate. If
not, see <http://www.gnu.org/licenses/>.
"""
import os
import subprocess
import sys
import tempfile
def get_kaptive_paths():
this_file = os.path.realpath(__file__)
kaptive_dir = os.path.join(os.path.dirname(os.path.dirname(this_file)), 'kaptive')
if not os.path.isdir(kaptive_dir):
sys.exit('Error: could not find Kaptive directory. Did you git clone with --recursive?')
kaptive_py = os.path.join(kaptive_dir, 'kaptive.py')
if not os.path.isfile(kaptive_py):
sys.exit('Error: could not find kaptive.py')
db_dir = os.path.join(kaptive_dir, 'reference_database')
kaptive_k_db = os.path.join(db_dir, 'Klebsiella_k_locus_primary_reference.gbk')
if not os.path.isfile(kaptive_k_db):
sys.exit('Error: could not find Klebsiella_k_locus_primary_reference.gbk')
kaptive_o_db = os.path.join(db_dir, 'Klebsiella_o_locus_primary_reference.gbk')
if not os.path.isfile(kaptive_o_db):
sys.exit('Error: could not find Klebsiella_o_locus_primary_reference.gbk')
return kaptive_py, kaptive_k_db, kaptive_o_db
def get_kaptive_results(locus_type, kaptive_py, kaptive_db, contigs, args):
assert locus_type == 'K' or locus_type == 'O'
headers = ['K_locus', 'K_locus_confidence', 'K_locus_problems', 'K_locus_identity',
'K_locus_missing_genes']
if locus_type == 'O':
headers = [x.replace('K_locus', 'O_locus') for x in headers]
headers.append('O_type')
if (args.kaptive_k and locus_type == 'K') or (args.kaptive_o and locus_type == 'O'):
if locus_type == 'K':
outfile = args.kaptive_k_outfile
else: # locus_type == 'O':
outfile = args.kaptive_o_outfile
kaptive_results = run_kaptive(kaptive_py, kaptive_db, contigs, outfile,
args.min_kaptive_confidence, one_thread=False)
if kaptive_results is None:
kaptive_results = run_kaptive(kaptive_py, kaptive_db, contigs, outfile,
args.min_kaptive_confidence, one_thread=True)
if locus_type == 'O':
o_locus = kaptive_results[0]
kaptive_results.append(get_o_type(o_locus))
assert len(headers) == len(kaptive_results)
return dict(zip(headers, kaptive_results))
else:
return {}
def run_kaptive(kaptive_py, kaptive_db, contigs, output_file, min_confidence, one_thread):
thread_option = ' --threads 1' if one_thread else ''
with tempfile.TemporaryDirectory() as tmp_dir:
kaptive_prefix = tmp_dir + '/kaptive'
kaptive_table = kaptive_prefix + '_table.txt'
p = subprocess.Popen(sys.executable + ' ' + kaptive_py +
' -a ' + contigs +
' -k ' + kaptive_db +
' -o ' + kaptive_prefix +
' --verbose --no_seq_out --no_json' +
thread_option,
shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# Make sure the output is a string, whether we are in Python 2 or 3.
if not isinstance(stdout, str):
stdout = stdout.decode()
if not isinstance(stderr, str):
stderr = stderr.decode()
# If we hit the BLAST threading problem, return None and we'll try again with one thread.
if 'tblastn crashed!' in stderr and not one_thread:
return None
if p.returncode != 0:
if stderr:
sys.exit('Error: Kaptive failed to run with the following error:\n' +
stderr.strip())
else:
sys.exit('Error: Kaptive failed to run')
locus, confidence, problems, identity = None, None, None, None
missing = []
# Parse the required information from the Kaptive verbose output.
output_lines = stdout.splitlines()
missing_gene_lines = False
for line in output_lines:
if 'Best match locus:' in line:
locus = line.split('Best match locus:')[1].strip()
if 'Match confidence:' in line:
confidence = line.split('Match confidence:')[1].strip()
if 'Problems:' in line:
problems = line.split('Problems:')[1].strip()
if problems == 'None':
problems = problems.lower()
if 'Identity:' in line:
identity = line.split('Identity:')[1].strip()
if 'Other genes in locus:' in line:
missing_gene_lines = False
if missing_gene_lines:
missing_gene = line.strip()
if missing_gene:
missing.append(missing_gene)
if 'Missing expected genes:' in line:
missing_gene_lines = True
if output_file: # if we are saving Kaptive results to file...
with open(kaptive_table, 'rt') as f:
kaptive_table_lines = f.readlines()
assert len(kaptive_table_lines) == 2
if not os.path.isfile(output_file):
with open(output_file, 'wt') as f:
f.write(kaptive_table_lines[0]) # write header line
with open(output_file, 'at') as f:
f.write(kaptive_table_lines[1]) # write data line
if locus is None or confidence is None or problems is None or identity is None:
sys.exit('Error: Kaptive failed to produce the expected output')
if not confidence_meets_threshold(confidence, min_confidence):
locus = f'unknown (best match = {locus})'
return [locus, confidence, problems, identity, ','.join(missing)]
def confidence_meets_threshold(confidence, min_confidence):
"""
Returns True if the confidence level meets or exceeds the minimum confidence level.
"""
min_confidence = min_confidence.replace('_', ' ')
scores = {'None': 0, 'Low': 1, 'Good': 2, 'High': 3, 'Very high': 4, 'Perfect': 5}
return scores[confidence] >= scores[min_confidence]
def get_o_type(o_locus):
"""
This function returns an O type using the O locus. In many cases, they are the same, except for:
* loci O1v1 and O1v2 = type O1
* loci O2v1 and O2v2 = type O2
* loci O1/O2v1 and O1/O2v2 = 'unknown'
"""
if 'unknown' in o_locus.lower():
return 'unknown'
if 'O1/O2' in o_locus:
return 'unknown'
if 'v1' in o_locus:
return o_locus.replace('v1', '')
if 'v2' in o_locus:
return o_locus.replace('v2', '')
return o_locus
|
from django.conf import settings
from django.contrib import messages
from auth.forms.settings.avatar import AvatarForm
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from PIL import Image
from io import BytesIO
size = 32, 32
class AvatarView(LoginRequiredMixin, TemplateView):
template_name = 'accounts/settings/avatar.html'
def get_context_data(self, **kwargs):
return {
'form': self.form,
'user': self.user,
}
def dispatch(self, request, pk, *args, **kwargs):
self.user = get_object_or_404(get_user_model().objects.all(), pk=pk)
if not request.user.is_superuser and request.user != self.user:
raise PermissionDenied(_('You do not have permission to edit this user.'))
self.form = AvatarForm(request.POST or None, request.FILES or None)
return super(AvatarView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.form.is_valid():
try:
avatar_content = request.FILES['avatar'].read()
avatar = Image.open(BytesIO(avatar_content))
avatar = avatar.resize(size)
avatar_out = BytesIO()
avatar.save(avatar_out, format='PNG')
avatar_out.seek(0)
manager = settings.PLATFORM_MANAGER()
manager.upload_avatar(avatar_out, self.user)
except Exception as exception:
print(exception)
messages.success(request, _('The avatar has been updated.'))
return redirect(reverse('users:avatar', args=[self.user.pk]))
return self.render_to_response(self.get_context_data())
|
__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty')
import collections
import heapq
from . import events
from . import locks
class QueueEmpty(Exception):
"""Raised when Queue.get_nowait() is called on an empty Queue."""
pass
class QueueFull(Exception):
"""Raised when the Queue.put_nowait() method is called on a full Queue."""
pass
class Queue:
"""A queue, useful for coordinating producer and consumer coroutines.
If maxsize is less than or equal to zero, the queue size is infinite. If it
is an integer greater than 0, then "await put()" will block when the
queue reaches maxsize, until an item is removed by get().
Unlike the standard library Queue, you can reliably know this Queue's size
with qsize(), since your single-threaded asyncio application won't be
interrupted between calling qsize() and doing an operation on the Queue.
"""
def __init__(self, maxsize=0, *, loop=None):
if loop is None:
self._loop = events.get_event_loop()
else:
self._loop = loop
self._maxsize = maxsize
# Futures.
self._getters = collections.deque()
# Futures.
self._putters = collections.deque()
self._unfinished_tasks = 0
self._finished = locks.Event(loop=self._loop)
self._finished.set()
self._init(maxsize)
# These three are overridable in subclasses.
def _init(self, maxsize):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def _wakeup_next(self, waiters):
# Wake up the next waiter (if any) that isn't cancelled.
while waiters:
waiter = waiters.popleft()
if not waiter.done():
waiter.set_result(None)
break
def __repr__(self):
return f'<{type(self).__name__} at {id(self):#x} {self._format()}>'
def __str__(self):
return f'<{type(self).__name__} {self._format()}>'
def _format(self):
result = f'maxsize={self._maxsize!r}'
if getattr(self, '_queue', None):
result += f' _queue={list(self._queue)!r}'
if self._getters:
result += f' _getters[{len(self._getters)}]'
if self._putters:
result += f' _putters[{len(self._putters)}]'
if self._unfinished_tasks:
result += f' tasks={self._unfinished_tasks}'
return result
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def empty(self):
"""Return True if the queue is empty, False otherwise."""
return not self._queue
def full(self):
"""Return True if there are maxsize items in the queue.
Note: if the Queue was initialized with maxsize=0 (the default),
then full() is never True.
"""
if self._maxsize <= 0:
return False
else:
return self.qsize() >= self._maxsize
async def put(self, item):
"""Put an item into the queue.
Put an item into the queue. If the queue is full, wait until a free
slot is available before adding item.
"""
while self.full():
putter = self._loop.create_future()
self._putters.append(putter)
try:
await putter
except:
putter.cancel() # Just in case putter is not done yet.
try:
# Clean self._putters from canceled putters.
self._putters.remove(putter)
except ValueError:
# The putter could be removed from self._putters by a
# previous get_nowait call.
pass
if not self.full() and not putter.cancelled():
# We were woken up by get_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._putters)
raise
return self.put_nowait(item)
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise QueueFull.
"""
if self.full():
raise QueueFull
self._put(item)
self._unfinished_tasks += 1
self._finished.clear()
self._wakeup_next(self._getters)
async def get(self):
"""Remove and return an item from the queue.
If queue is empty, wait until an item is available.
"""
while self.empty():
getter = self._loop.create_future()
self._getters.append(getter)
try:
await getter
except:
getter.cancel() # Just in case getter is not done yet.
try:
# Clean self._getters from canceled getters.
self._getters.remove(getter)
except ValueError:
# The getter could be removed from self._getters by a
# previous put_nowait call.
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
return self.get_nowait()
def get_nowait(self):
"""Remove and return an item from the queue.
Return an item if one is immediately available, else raise QueueEmpty.
"""
if self.empty():
raise QueueEmpty
item = self._get()
self._wakeup_next(self._putters)
return item
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
async def join(self):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if self._unfinished_tasks > 0:
await self._finished.wait()
class PriorityQueue(Queue):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
"""
def _init(self, maxsize):
self._queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self._queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self._queue)
class LifoQueue(Queue):
"""A subclass of Queue that retrieves most recently added entries first."""
def _init(self, maxsize):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Indicates document sanity-check validation failure pre- or post-rendering.
DOCUMENT_SANITY_CHECK_FAILURE = 'D001'
# Indicates document post-rendering validation failure.
DOCUMENT_POST_RENDERING_FAILURE = 'D002'
class ValidationMessage(object):
"""ValidationMessage per UCP convention:
https://github.com/att-comdev/ucp-integration/blob/master/docs/source/api-conventions.rst#output-structure # noqa
Construction of ``ValidationMessage`` message:
:param string message: Validation failure message.
:param boolean error: True or False, if this is an error message.
:param string name: Identifying name of the validation.
:param string level: The severity of validation result, as "Error",
"Warning", or "Info"
:param string schema: The schema of the document being validated.
:param string doc_name: The name of the document being validated.
:param string diagnostic: Information about what lead to the message,
or details for resolution.
"""
def __init__(self,
message='Document validation error.',
error=True,
name='Deckhand validation error',
level='Error',
doc_schema='',
doc_name='',
doc_layer='',
diagnostic=''):
level = 'Error' if error else 'Info'
self._output = {
'message': message,
'error': error,
'name': name,
'documents': [],
'level': level,
'kind': self.__class__.__name__
}
self._output['documents'].append(
dict(schema=doc_schema, name=doc_name, layer=doc_layer))
if diagnostic:
self._output.update(diagnostic=diagnostic)
def format_message(self):
"""Return ``ValidationMessage`` message.
:returns: The ``ValidationMessage`` for the Validation API response.
:rtype: dict
"""
return self._output
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.