prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .job_information_basic import JobInformationBasic
class JobInformation(JobInformationBasic):
"""The extended Data Lake Analytics job information properties returned when
retrieving a specific job.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar job_id: the job's unique identifier (a GUID).
:vartype job_id: str
:param name: the friendly name of the job.
:type name: str
:param type: the job type of the current job (Hive, USql, or Scope (for
internal use only)). Possible values include: 'USql', 'Hive', 'Scope'
:type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType
:ivar submitter: the user or account that submitted the job.
:vartype submitter: str
:param degree_of_parallelism: the degree of parallelism used for this job.
This must be greater than 0, if set to less than 0 it will default to 1.
Default value: 1 .
:type degree_of_parallelism: int
:param priority: the priority value for the current job. Lower numbers
have a higher priority. By default, a job has a priority of 1000. This
must be greater than 0.
:type priority: int
:ivar submit_time: the time the job was submitted to the service.
:vartype submit_time: datetime
:ivar start_time: the start time of the job.
:vartype start_time: datetime
:ivar end_time: the completion time of the job.
:vartype end_time: datetime
:ivar state: the job state. When the job is in the Ended state, refer to
Result and ErrorMessage for details. Possible values include: 'Accepted',
'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling',
'Starting', 'Paused', 'WaitingForCapacity'
:vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState
:ivar result: the result of job execution or the current result of the
running job. Possible values include: 'None', 'Succeeded', 'Cancelled',
'Failed'
:vartype result: str or
~azure.mgmt.datalake.analytics.job.models.JobResult
:ivar log_folder: the log folder path to use in the following format:
adl://<accountName>.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/.
:vartype log_folder: str
:param log_file_patterns: the list of log file name patterns to find in
the logFolder. '*' is the only matching character allowed. Example format:
jobExecution*.log or *mylog*.txt
:type log_file_patterns: list[str]
:param related: the recurring job relationship information properties.
:type related:
~azure.mgmt.datalake.analytics.job.models.JobRelation | shipProperties
:param tags: the ke | y-value pairs used to add additional metadata to the
job information. (Only for use internally with Scope job type.)
:type tags: dict[str, str]
:ivar error_message: the error message details for the job, if the job
failed.
:vartype error_message:
list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails]
:ivar state_audit_records: the job state audit records, indicating when
various operations have been performed on this job.
:vartype state_audit_records:
list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord]
:param properties: the job specific properties.
:type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties
"""
_validation = {
'job_id': {'readonly': True},
'name': {'required': True},
'type': {'required': True},
'submitter': {'readonly': True},
'submit_time': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'state': {'readonly': True},
'result': {'readonly': True},
'log_folder': {'readonly': True},
'error_message': {'readonly': True},
'state_audit_records': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'job_id': {'key': 'jobId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'JobType'},
'submitter': {'key': 'submitter', 'type': 'str'},
'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'},
'priority': {'key': 'priority', 'type': 'int'},
'submit_time': {'key': 'submitTime', 'type': 'iso-8601'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'state': {'key': 'state', 'type': 'JobState'},
'result': {'key': 'result', 'type': 'JobResult'},
'log_folder': {'key': 'logFolder', 'type': 'str'},
'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'},
'related': {'key': 'related', 'type': 'JobRelationshipProperties'},
'tags': {'key': 'tags', 'type': '{str}'},
'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'},
'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'},
'properties': {'key': 'properties', 'type': 'JobProperties'},
}
def __init__(self, name, type, properties, degree_of_parallelism=1, priority=None, log_file_patterns=None, related=None, tags=None):
super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags)
self.error_message = None
self.state_audit_records = None
self.properties = properties
|
def neighbors(node, all_nodes):
dirs = [[0, 1], [1, 0], [-1, 0], [0, -1]]
ddirs = [[1, 1], [1, -1], [-1, 1], [-1, -1]]
result = set()
# cdef bool x
for dir in dirs:
nx, ny = node[0] + dir[0], node[1] + dir[1]
try:
all_nodes[nx][ny]
except IndexError:
pass
else:
result.add((nx, ny))
for dir in ddirs:
nx, ny = node[0] + dir[0], node[1] + dir[1]
try:
all_nodes[nx][ny]
except IndexError:
pass
else:
x, y = False, False
for r in result:
if nx - 1 == r[0] and ny == r[1]:
x = True
elif nx + 1 == r[0] and ny == r[1]:
x = True
if ny - 1 == r[1] and nx == r[0]:
y = True
elif ny + 1 == r[1] and nx == r[0]:
y = True
if y and x:
result.add((nx, ny))
return result
def get_score(c, node, goal, heightmap):
score = c.score
if c.node[0] != node[0] and c.node[1] != node[1]:
score += 14
else:
score += 10
gx = abs(goal[0] - c.node[0])
gy = abs(goal[1] - c.node[1])
score += (gx + gy) * 5
penalty = heightmap[c.node[0]][c.node[1]] * 1
# print(score, "penalty:", penalty)
score -= penalty
return score
class Candidate:
def __init__(self, node, lastnode=None):
self.node = node
self.score = 0
self.visited = False
self.lastnode = lastnode
def get_path(all_nodes, node, goal, heightmap):
open_list = []
closed_list = []
path_list = []
final_list = []
start = Candidate(node, None)
current = Candidate(node, start)
count, current.count = 0, 0
while current.node != goal:
candidates = []
for n in neighbors(current.node, all_nodes):
c = Candidate(n, current)
candidates.append(c)
for c in candidates:
closed = False
for cc in closed_list:
if c.node == cc.node:
closed = True
for co in open_list:
if co | .node == c.node: |
closed = True
if not closed:
c.count = count
count += 1
c.score = get_score(c, current.node, goal, heightmap)
open_list.append(c)
open_list = sorted(
open_list,
key=lambda x: x.count,
reverse=False
)
if len(open_list) > 0:
# count += 1
next_c = open_list[0]
closed_list.append(next_c)
current = next_c
open_list.remove(next_c)
else:
print("Goal not found. Node {0} broke it.".format(node))
break
nextnode = current # goal
path_list = [nextnode.node]
while nextnode.node != start.node:
nextnode = nextnode.lastnode
path_list.append(nextnode.node)
for c in reversed(path_list):
final_list.append(c)
if len(final_list) > 0:
print("Pathfinding successful!")
print("Steps: {0}".format(len(final_list)))
return final_list, True
else:
print("ERROR: Pathfinding went wrong, returning to start.")
final_list = [start]
return final_list, False
|
import demistomock as demisto
from CommonServerPython import BaseClient
import BitSightForSecurityPerformanceManagement as bitsight
from datetime import datetime
def test_get_companies_guid_command(mocker):
# Positive Scenario
client = bitsight.Client(base_url='https://test.com')
res = {"my_company": {"guid": "123"}, "companies": [{"name": "abc", "shortname": "abc", "guid": "123"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_companies_guid_command(client)
assert outputs[0].get('guid') == '123'
def test_get_company_details_command(mocker):
inp_args = {'guid': '123'}
client = bitsight.Client(base_url='https://test.com')
res = {"name": "abc"}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_details_command(client, inp_args)
assert outputs.get('name') == 'abc'
def test_get_company_findings_command(mocker):
inp_args = {'guid': '123', 'first_seen': '2021-01-01', 'last_seen': '2021-01-02'}
client = bitsight.Client(base_url='https://test.com')
res = {"results": [{"severity": "severe"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_findings_command(client, inp_args)
assert outputs[0].get('severity') == 'severe'
def test_fetch_incidents(mocker):
inp_args = {'guid': '123', 'findings_min_severity': 'severe', 'findings_grade': 'WARN',
'findings_asset_category': 'high', 'risk_vector': 'breaches,dkim'}
client = bitsight.Client(base_url='https://test.com')
mocker.patch.object(demisto, 'params', return_value=inp_args)
res = {"results": [{"severity": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
last_run, events = bitsight.fetch_incidents(client=client,
last_run={'time': '2020-12-01T01:01:01Z'},
params=inp_args)
curr_date = datetime.now().strftime('%Y-%m-%d')
assert curr_date in last_run['time']
assert events == [{'name': 'BitSight Finding - temp1', 'occurred': ' | 2021-02-01T00:00:00Z',
'rawJSON': '{"severity | ": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}'}]
|
# Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Mechanical Turk"
prefix = "mechanicalturk"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AcceptQualificationRequest = Action("AcceptQualificationRequest")
ApproveAssignment = Action("ApproveAssignment")
ApproveRejectedAssignment = Action("ApproveRejectedAssignment")
AssignQualification = Action("AssignQualification")
AssociateQualificationWit | hWorker = Action("AssociateQualificationWithWorker")
BlockWorker = Action("BlockWorker")
ChangeHITTypeOfHIT = Action("ChangeHITTypeOfHIT")
CreateAdditionalAssignmentsForHIT = Action("CreateAdditionalAssignmentsForHIT")
CreateHIT = Action("CreateHIT")
CreateHITType = Action("CreateHITType")
CreateHITWithHITType = Action("CreateHITWithHITT | ype")
CreateQualificationType = Action("CreateQualificationType")
CreateWorkerBlock = Action("CreateWorkerBlock")
DeleteHIT = Action("DeleteHIT")
DeleteQualificationType = Action("DeleteQualificationType")
DeleteWorkerBlock = Action("DeleteWorkerBlock")
DisableHIT = Action("DisableHIT")
DisassociateQualificationFromWorker = Action("DisassociateQualificationFromWorker")
DisposeHIT = Action("DisposeHIT")
DisposeQualificationType = Action("DisposeQualificationType")
ExtendHIT = Action("ExtendHIT")
ForceExpireHIT = Action("ForceExpireHIT")
GetAccountBalance = Action("GetAccountBalance")
GetAssignment = Action("GetAssignment")
GetAssignmentsForHIT = Action("GetAssignmentsForHIT")
GetBlockedWorkers = Action("GetBlockedWorkers")
GetBonusPayments = Action("GetBonusPayments")
GetFileUploadURL = Action("GetFileUploadURL")
GetHIT = Action("GetHIT")
GetHITsForQualificationType = Action("GetHITsForQualificationType")
GetQualificationRequests = Action("GetQualificationRequests")
GetQualificationScore = Action("GetQualificationScore")
GetQualificationType = Action("GetQualificationType")
GetQualificationsForQualificationType = Action("GetQualificationsForQualificationType")
GetRequesterStatistic = Action("GetRequesterStatistic")
GetRequesterWorkerStatistic = Action("GetRequesterWorkerStatistic")
GetReviewResultsForHIT = Action("GetReviewResultsForHIT")
GetReviewableHITs = Action("GetReviewableHITs")
GrantBonus = Action("GrantBonus")
GrantQualification = Action("GrantQualification")
ListAssignmentsForHIT = Action("ListAssignmentsForHIT")
ListBonusPayments = Action("ListBonusPayments")
ListHITs = Action("ListHITs")
ListHITsForQualificationType = Action("ListHITsForQualificationType")
ListQualificationRequests = Action("ListQualificationRequests")
ListQualificationTypes = Action("ListQualificationTypes")
ListReviewPolicyResultsForHIT = Action("ListReviewPolicyResultsForHIT")
ListReviewableHITs = Action("ListReviewableHITs")
ListWorkerBlocks = Action("ListWorkerBlocks")
ListWorkersWithQualificationType = Action("ListWorkersWithQualificationType")
NotifyWorkers = Action("NotifyWorkers")
RegisterHITType = Action("RegisterHITType")
RejectAssignment = Action("RejectAssignment")
RejectQualificationRequest = Action("RejectQualificationRequest")
RevokeQualification = Action("RevokeQualification")
SearchHITs = Action("SearchHITs")
SearchQualificationTypes = Action("SearchQualificationTypes")
SendBonus = Action("SendBonus")
SendTestEventNotification = Action("SendTestEventNotification")
SetHITAsReviewing = Action("SetHITAsReviewing")
SetHITTypeNotification = Action("SetHITTypeNotification")
UnblockWorker = Action("UnblockWorker")
UpdateExpirationForHIT = Action("UpdateExpirationForHIT")
UpdateHITReviewStatus = Action("UpdateHITReviewStatus")
UpdateHITTypeOfHIT = Action("UpdateHITTypeOfHIT")
UpdateNotificationSettings = Action("UpdateNotificationSettings")
UpdateQualificationScore = Action("UpdateQualificationScore")
UpdateQualificationType = Action("UpdateQualificationType")
|
# Copyright 2015 Cisco Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_cisco import backwa | rds_compatibility as bc
from networking_cisco.plugins.cisco.db.device_manager import ( # noqa
hd_models)
from networking_cisco.plugins.cisco.db.l3 import ( # noqa
ha_db)
from networking_cisco.plugins.cisco.db.l3 import ( # noqa
l3_models)
from n | etworking_cisco.plugins.ml2.drivers.cisco.n1kv import ( # noqa
n1kv_models)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2)
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ( # noqa
ucsm_model)
def get_metadata():
return bc.model_base.BASEV2.metadata
|
subject_id = node._id
attribute_type_node = None
if attr_key in attr_type_dict:
attribute_type_node = attr_type_dict[attr_key]
else:
attribute_type_node = node_collection.one({
'_type': "AttributeType",
'$or': [{
'name': {'$regex': "^" + attr_key + "$", '$options': 'i'}
}, {
'altnames': {'$regex': "^" + attr_key + "$", '$options': 'i'}
}]
})
attr_type_dict[attr_key] = attribute_type_node
object_value = json_document[key]
ga_node = None
info_message = "\n Creating GAttribute (" + node.name + " -- " + attribute_type_node.name + " -- " + str(json_document[key]) + ") ...\n"
log_list.append(info_message)
ga_node = create_gattribute(subject_id, attribute_type_node, object_value)
except Exception as e:
error_message = "\n While creating GAttribute (" + attr_key + ") for "+gsystem_type_name+"'s GSystem ("+json_document['name']+") got following error...\n " + str(e) + "\n"
log_list.append(error_message)
print error_message # Keep it!
# To break outer for loop as key found
break
else:
error_message = "\n DataNotFound: No data found for field ("+attr_key+") while creating GSystem (" + gsystem_type_name + " -- " + node.name + ") !!!\n"
log_list.append(error_message)
if is_relation:
relation_list.append(key)
if not relation_list:
# No possible relations defined for this node
info_message = "\n "+gsystem_type_name+" ("+node.name+"): No possible relations defined for this node !!!\n"
log_list.append(info_message)
else:
gst_possible_relations_dict = node.get_possible_relations(gsystem_type_id)
# Write code for setting relations
for key in relation_list:
is_relation = True
for rel_key, rel_value in gst_possible_relations_dict.iteritems():
if key == rel_value['altnames'].lower() or key == rel_key.lower():
is_relation = False
if json_document[key]:
# Here semi-colon(';') is used instead of comma(',')
# Beacuse one of the value may contain comma(',') which causes problem in finding required value in database
try:
if ";" not in json_document[key]:
# Necessary to inform perform_eval_type() that handle this value as list
json_document[key] = "\""+json_document[key]+"\", "
else:
formatted_value = ""
for v in json_document[key].split(";"):
formatted_value += "\""+v.strip(" ")+"\", "
json_document[key] = formatted_value
info_message = "\n For GRelation parsing content | key: " + rel_key + " -- " + json_document[key]
log_list.append(info_message)
perform_eval_type(key, json_document, "GSystem", "GSystem")
# for right_subject_id in json_document[key]:
subject_id = node._id
# Here we are appending list of ObjectIds of GSystemType's type_of field
# along with the ObjectId of GSystemType's itself (whose GSystem is getting created)
# This is because some of the RelationType's are holding Base class's ObjectId
# and not that of the Derived one's
# Delibrately keeping GSystemType's ObjectId first in the list
# And hence, used $in operator in the query!
rel_subject_type = []
rel_subject_type.append(gsystem_type_id)
if gsystem_type_node.type_of:
rel_subject_type.ex | tend(gsystem_type_node.type_of)
relation_type_no | de = None
if rel_key in rel_type_dict:
relation_type_node = rel_type_dict[rel_key]
else:
relation_type_node = node_collection.one({
'_type': "RelationType",
'$or': [{
'name': {'$regex': "^" + rel_key + "$", '$options': 'i'}
}, {
'altnames': {'$regex': "^" + rel_key + "$", '$options': 'i'}
}],
'subject_type': {'$in': rel_subject_type}
})
rel_type_dict[rel_key] = relation_type_node
info_message = "\n Creating GRelation ("+node.name+" -- "+rel_key+" -- "+str(json_document[key])+") ...\n"
log_list.append(info_message)
gr_node = create_grelation(subject_id, relation_type_node, json_document[key])
except Exception as e:
error_message = "\n While creating GRelation (" + rel_key + ") for "+gsystem_type_name+"'s GSystem ("+json_document['name']+") got following error...\n" + str(e) + "\n"
log_list.append(error_message)
pass
if college_gst._id in relation_type_node.object_type:
# Fetch college node's group id
# Append it to node's group_set
node_group_set = node.group_set
is_group_set_changed = False
# Iterate through each college
# Find it's corresponding group's ObjectId
# Append it to node's group_set
for each in json_document[key]:
each = ObjectId(each)
each_str = str(each)
if each_str in college_dict:
college_group_id = college_dict[each_str]
|
from flask import Flask
from flask import make_response
from flask import request
from flask import render_template
from flask import redirect
from flask import url_for
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
@app.route('/')
def index():
app.logger.info('index')
username = request.cookies.get('username')
if (username == None):
return redirect(url_for('login'))
else:
return render_template('index.html', username=username)
@app.route('/login', methods=['GET','POST'])
def login():
app.logger. | info('login')
if request.method == 'POST':
if validate_credentials(request.form['username'], request. | form['password']):
resp = make_response(redirect(url_for('index')))
resp.set_cookie('username', request.form['username'])
return resp
else:
return render_template('login.html', error='Invalid username or password')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
app.logger.info('logout')
resp = make_response(redirect(url_for('index')))
resp.set_cookie('username', '', expires=0)
return resp
def validate_credentials(username, password):
return username == password
if __name__ == '__main__':
handler = RotatingFileHandler('todo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run()
|
class EmptyResult(object):
'''
Null Object pattern to pr | event Null reference errors
when there is no result
'''
def __init__(self):
self.status = 0
self.body = ''
self.msg = ''
self.reason = ''
def __nonzero__(s | elf):
return False
class HapiError(ValueError):
"""Any problems get thrown as HapiError exceptions with the relevant info inside"""
as_str_template = u'''
---- request ----
{method} {host}{url}, [timeout={timeout}]
---- body ----
{body}
---- headers ----
{headers}
---- result ----
{result_status}
---- body -----
{result_body}
---- headers -----
{result_headers}
---- reason ----
{result_reason}
---- trigger error ----
{error}
'''
def __init__(self, result, request, err=None):
super(HapiError,self).__init__(result and result.reason or "Unknown Reason")
if result == None:
self.result = EmptyResult()
else:
self.result = result
if request == None:
request = {}
self.request = request
self.err = err
def __str__(self):
return self.__unicode__().encode('ascii', 'replace')
def __unicode__(self):
params = {}
request_keys = ('method', 'host', 'url', 'data', 'headers', 'timeout', 'body')
result_attrs = ('status', 'reason', 'msg', 'body', 'headers')
params['error'] = self.err
for key in request_keys:
params[key] = self.request.get(key)
for attr in result_attrs:
params['result_%s' % attr] = getattr(self.result, attr, '')
params = self._dict_vals_to_unicode(params)
return self.as_str_template.format(**params)
def _dict_vals_to_unicode(self, data):
unicode_data = {}
for key, val in data.items():
if not isinstance(val, basestring):
unicode_data[key] = unicode(val)
elif not isinstance(val, unicode):
unicode_data[key] = unicode(val, 'utf8', 'ignore')
else:
unicode_data[key] = val
return unicode_data
# Create more specific error cases, to make filtering errors easier
class HapiBadRequest(HapiError):
'''Error wrapper for most 40X results and 501 results'''
class HapiNotFound(HapiError):
'''Error wrapper for 404 and 410 results'''
class HapiTimeout(HapiError):
'''Wrapper for socket timeouts, sslerror, and 504'''
class HapiUnauthorized(HapiError):
'''Wrapper for 401 Unauthorized errors'''
class HapiServerError(HapiError):
'''Wrapper for most 500 errors'''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from bs4 import BeautifulSoup as bs
from sasila.system_normal.spider.spider_core import SpiderCore
from sasila.system_normal.pipeline.console_pipeline import ConsolePipeline
from sasila.system_normal.processor.ba | se_processor import BaseProcessor
from sasila.system_nor | mal.downloader.http.spider_request import Request
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
class FirstProcessor(BaseProcessor):
spider_id = 'test'
spider_name = 'test'
allowed_domains = ['mzitu.com']
start_requests = [Request(url="http://www.mzitu.com/")]
def process(self, response):
soup = bs(response.m_response.content, 'lxml')
a_list = soup.select("a")
for a in a_list:
if "href" in a.attrs:
url = response.nice_join(a["href"])
yield {'url': url}
# if __name__ == '__main__':
# spider = SpiderCore(FirstProcessor()).set_pipeline(ConsolePipeline()).start()
|
= [x * 1.0 for x in members] + [0.0, 0.0, 1.0]
return tuple.__new__(Affi | ne, mat3x3)
else:
raise TypeError(
| "Expected 6 number args, got %s" % len(members))
@classmethod
def identity(cls):
"""Return the identity transform.
:rtype: Affine
"""
return identity
@classmethod
def translation(cls, offset):
"""Create a translation transform from an offset vector.
:param offset: Translation offset.
:type offset: :class:`~planar.Vec2`
:rtype: Affine
"""
ox, oy = offset
return tuple.__new__(cls,
(1.0, 0.0, ox,
0.0, 1.0, oy,
0.0, 0.0, 1.0))
@classmethod
def scale(cls, scaling):
"""Create a scaling transform from a scalar or vector.
:param scaling: The scaling factor. A scalar value will
scale in both dimensions equally. A vector scaling
value scales the dimensions independently.
:type scaling: float or :class:`~planar.Vec2`
:rtype: Affine
"""
try:
sx = sy = float(scaling)
except TypeError:
sx, sy = scaling
return tuple.__new__(cls,
(sx, 0.0, 0.0,
0.0, sy, 0.0,
0.0, 0.0, 1.0))
@classmethod
def shear(cls, x_angle=0, y_angle=0):
"""Create a shear transform along one or both axes.
:param x_angle: Angle in degrees to shear along the x-axis.
:type x_angle: float
:param y_angle: Angle in degrees to shear along the y-axis.
:type y_angle: float
:rtype: Affine
"""
sx = math.tan(math.radians(x_angle))
sy = math.tan(math.radians(y_angle))
return tuple.__new__(cls,
(1.0, sy, 0.0,
sx, 1.0, 0.0,
0.0, 0.0, 1.0))
@classmethod
def rotation(cls, angle, pivot=None):
"""Create a rotation transform at the specified angle,
optionally about the specified pivot point.
:param angle: Rotation angle in degrees
:type angle: float
:param pivot: Point to rotate about, if omitted the
rotation is about the origin.
:type pivot: :class:`~planar.Vec2`
:rtype: Affine
"""
ca, sa = cos_sin_deg(angle)
if pivot is None:
return tuple.__new__(cls,
(ca, sa, 0.0,
-sa, ca, 0.0,
0.0, 0.0, 1.0))
else:
px, py = pivot
return tuple.__new__(cls,
(ca, sa, px - px*ca + py*sa,
-sa, ca, py - px*sa - py*ca,
0.0, 0.0, 1.0))
def __str__(self):
"""Concise string representation."""
return ("|% .2f,% .2f,% .2f|\n"
"|% .2f,% .2f,% .2f|\n"
"|% .2f,% .2f,% .2f|") % self
def __repr__(self):
"""Precise string representation."""
return ("Affine(%r, %r, %r,\n"
" %r, %r, %r)") % self[:6]
@cached_property
def determinant(self):
"""The determinant of the transform matrix. This value
is equal to the area scaling factor when the transform
is applied to a shape.
"""
a, b, c, d, e, f, g, h, i = self
return a*e - b*d
@cached_property
def is_identity(self):
"""True if this transform equals the identity matrix,
within rounding limits.
"""
return self is identity or self.almost_equals(identity)
@cached_property
def is_rectilinear(self):
"""True if the transform is rectilinear, i.e., whether a shape would
remain axis-aligned, within rounding limits, after applying the
transform.
"""
a, b, c, d, e, f, g, h, i = self
return ((abs(a) < planar.EPSILON and abs(e) < planar.EPSILON)
or (abs(d) < planar.EPSILON and abs(b) < planar.EPSILON))
@cached_property
def is_conformal(self):
"""True if the transform is conformal, i.e., if angles between points
are preserved after applying the transform, within rounding limits.
This implies that the transform has no effective shear.
"""
a, b, c, d, e, f, g, h, i = self
return abs(a*b + d*e) < planar.EPSILON
@cached_property
def is_orthonormal(self):
"""True if the transform is orthonormal, which means that the
transform represents a rigid motion, which has no effective scaling or
shear. Mathematically, this means that the axis vectors of the
transform matrix are perpendicular and unit-length. Applying an
orthonormal transform to a shape always results in a congruent shape.
"""
a, b, c, d, e, f, g, h, i = self
return (self.is_conformal
and abs(1.0 - (a*a + d*d)) < planar.EPSILON
and abs(1.0 - (b*b + e*e)) < planar.EPSILON)
@cached_property
def is_degenerate(self):
"""True if this transform is degenerate, which means that it will
collapse a shape to an effective area of zero. Degenerate transforms
cannot be inverted.
"""
return abs(self.determinant) < planar.EPSILON
@property
def column_vectors(self):
"""The values of the transform as three 2D column vectors"""
a, b, c, d, e, f, _, _, _ = self
return planar.Vec2(a, d), planar.Vec2(b, e), planar.Vec2(c, f)
def almost_equals(self, other):
"""Compare transforms for approximate equality.
:param other: Transform being compared.
:type other: Affine
:return: True if absolute difference between each element
of each respective tranform matrix < ``EPSILON``.
"""
for i in (0, 1, 2, 3, 4, 5):
if abs(self[i] - other[i]) >= planar.EPSILON:
return False
return True
def __gt__(self, other):
return assert_unorderable(self, other)
__ge__ = __lt__ = __le__ = __gt__
# Override from base class. We do not support entrywise
# addition, subtraction or scalar multiplication because
# the result is not an affine transform
def __add__(self, other):
raise TypeError("Operation not supported")
__iadd__ = __add__
def __mul__(self, other):
"""Apply the transform using matrix multiplication, creating a
resulting object of the same type. A transform may be applied to
another transform, a vector, vector array, or shape.
:param other: The object to transform.
:type other: Affine, :class:`~planar.Vec2`,
:class:`~planar.Vec2Array`, :class:`~planar.Shape`
:rtype: Same as ``other``
"""
sa, sb, sc, sd, se, sf, _, _, _ = self
if isinstance(other, Affine):
oa, ob, oc, od, oe, of, _, _, _ = other
return tuple.__new__(Affine,
(sa*oa + sb*od, sa*ob + sb*oe, sa*oc + sb*of + sc,
sd*oa + se*od, sd*ob + se*oe, sd*oc + se*of + sf,
0.0, 0.0, 1.0))
elif hasattr(other, 'from_points'):
# Point/vector array
Point = planar.Point
points = getattr(other, 'points', other)
try:
return other.from_points(
Point(px*sa + py*sd + sc, px*sb + py*se + sf)
for px, py in points)
except TypeError:
return NotImplemented
else:
try:
vx, vy = other
except Exception:
return NotImplemented
return planar.Vec2(vx*sa + vy*sd + sc, vx*sb + vy*se + sf)
def __rmul__(self, other):
# We should not be called if other is an affine instance
# This is just a guarantee, since we would potentially
# return the wrong answer in that case
assert not isinstance(other, Affine)
return self.__mul__(other)
def __imul__(self, other):
if isinstance(other, Affine) or isinstance(other, planar.Vec2):
return self.__mul__(ot |
'''
# print type(self)
if type(i)==int:
return self.table[i]
elif type(i)==str:
#assume that they are searching by column, i.e.
#table['col_name']
#this allows access by column and then row
ind=self.attributes.index(i)
col=[]
for row_no in range(0, len(self.table)-1):
col.append(self.table[row_no][ind])
return tuple(col)
def build_where_clause(where_params_list, where_values_list):
if where_params_list!=None and where_values_list!=None:
where_clause=" WHERE "
where_clause+=" %s='%s' "%(str(where_params_list[0]), str(where_values_list[0]))
for i in range(1,len(where_values_list)):
where_clause+=" AND %s='%s' "%(str(where_params_list[i]), str(where_values_list[i]))
else :
where_clause=""
return where_clause
def build_select_query(tablename, select_params_list, where_params_list=None, where_values_list=None):
select_query="SELECT "
select_query+=" %s"%select_params_list[0]
for i in range(1,len(select_params_list)):
select_query+=", %s"%select_params_list[i]
select_query+=" FROM %s "%tablename
select_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
select_query+=";"
return select_query
def build_update_query(tablename, update_params_list, update_values_list, where_params_list=None, where_values_list=None):
update_query="UPDATE "+tablename+" SET "
update_query+=" %s='%s' "%(str(update_params_list[0]), str(update_values_list[0]))
for i in range(1,len(update_values_list)):
update_query+=", %s='%s' "%(str(update_params_list[i]), str(update_values_list[i]))
update_query+=build_where_clause(where_params_list=where_params_list, where_values_list=where_values_list)
update_query+=";"
return update_query
def build_insert_query(tablename, insert_params_list, tuple_values_list):
insert_query="INSERT INTO %s(" %tablename+"%s"%insert_params_list[0]
# print insert_query
for param in insert_params_list:
if insert_params_list[0]!= param:
insert_query+=", %s"%param
insert_query+=") VALUES "
#print insert_query
insert_query+="\n('%s'"%tuple_values_list[0][0]
for j in range(1,len(tuple_values_list[0])):
insert_query+=" ,'%s'"%tuple_values_list[0][j]
insert_query+=")"
for i in range(1,len(tuple_values_list)):
insert_query+=",\n('%s'"%tuple_values_list[i][0]
for j in range(1,len(tuple_values_list[i])):
insert_query+=" ,'%s'"%tuple_values_list[i][j]
insert_query+=";"
# print insert_query
return insert_query
def build_date(d, m, y):
return datetime.date(y,m,d)
def build_date2(day, month, year):
return datetime.date(year,month,day)
""" <---------------THE CORRECT WAY TO HANDLE DATES IN SQLITE3 with sqliteDefaults------------------>
#Create a random table
conn.execute('''Create table if not exists person(
ID INTEGER PRIMARY KEY,
Name TEXT,
DOB DATE
);
''')
conn.commit()
#Insert values into the table in one of the accepted formats
sqliteDefaults.insert_table_sqlite(conn,
'person',
('ID', 'Name', 'DOB'),
[
(1, 'Bob', sqliteDefaults.build_date(07,10,1999) ),
(2, 'John', sqliteDefaults.build_date(y=2005,m=8,d=21) ),
(3, 'Stacy', sqliteDefaults.build_date2(month=6,day=25,year=2003)),
(4, 'Emma', datetime.date(2001, 10, 27) )
]
)
#Source: http://stackoverflow.com/questions/4272908/sqlite-date-storage-and-conversion
table=sqliteDefaults.verified_select_sqlite(conn,"select * from person order by DOB desc;")
for row in table:
print row
#OUTPUT:
#(2, u'John', datetime.date(2005, 8, 21))
#(3, u'Stacy', datetime.date(2003, 6, 25))
#(4, u'Emma', datetime.date(2001, 10, 27))
#(1, u'Bob', datetime.date(1999, 10, 7))
print table[2][2].day
#OUTPUT:
# 27
#We can now compare the values as we do normal datetime objects: with > and <, etc
i=1; j=2;
if table[i][2]<table[j][2]:
print "%s is older than %s"%(table[i][1], table[j][1])
elif table[j][2]<table[i][2]:
print "%s is older than %s"%(table[j][1], table[i][1])
#OUTPUT:
# Emma is older than Stacy
"""
def insert_table_sqlite(conn, tablename, insert_params_list, tuple_values_list, commit=True):
insert_query= build_insert_query(tablename=tablename, insert_params_list=insert_params_list, tuple_values_list=tuple_values_list)
# print insert_query
cursor=conn.cursor()
cursor.execute(insert_query)
if commit:
conn.commit()
# database_in_use(conn)
def insert_table_sqlite2(conn, tablename, paramet | ers_tuple=(), tuple_values_list=[], commit=True, print_query=False):
if tuple_values_list==[]:
print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: tuple_value_list cannot be empty")
return
query=""
if parameters_tuple==():
query="INSERT INTO %s VALUES " %(tablename);
else:
query="INSERT INTO %s %s V | ALUES" %(tablename, parameters_tuple);
#else:
#print("\n\tSQLiteDefaults: insert_table_sqlite() ERROR: parameters_tuple must be a tuple")
query=query+"(?" + (",?"*(len(parameters_tuple)-1)) + ")" #source: https://docs.python.org/2/library/sqlite3.html
if print_query:
print query
conn.executemany(query, tuple_values_list)
if commit:
conn.commit()
def verified_select_sqlite(conn, select_query, fetch="all", printing=True):
'''This function verifies that the entered query is a valid select query (to prevent SQL injection).
If it is, it executes it and gets the table object. It returns None if the table is Empty, and prints an ERROR.
If the table is non-empty, it returns the table object.'''
if 'select' in select_query.lower():
temp = select_query.strip()
if not ';' in temp:
temp+=';'
# print temp
if temp.index(';') == (len(temp)-1):
cursor=conn.cursor()
cursor.execute(temp)
attributes=[]
for i in cursor.description:
attributes.append(i[0])
result_table=()
if fetch.lower()=="all":
result_table=cursor.fetchall()
elif fetch.lower()=="one":
result_table=cursor.fetchone()
else:
if printing:
print "verified_select() ERROR: Improper value '%s' passed to argument 'fetch'"%fetch
return None
if result_table is ():
if printing:
print 'verified_select() ERROR: Empty table'
return None
return Table(input_table=result_table, input_attributes=attributes)
else:
if printing:
print 'verified_select() ERROR: Only one query can be fired at a time'
else:
if printing:
print 'verified_select() ERROR: Only select queries can be executed'
def print_table(conn, select_query):
table = verified_select_sqlite(conn, select_query, printing=False)
if table is not None:
print '\n\n----------------------------------------------------------------'
for row in table:
print '\n'
for i in range(0,len(row)):
print row[i],"\t\t",
print '\n\n----------------------------------------------------------------\n'
def list_all_tables(db_file_name):
conn=get_conn(db_file_name)
print_table(conn,"select name from sqlite_master where type = 'table';")
'''
print("\n\n<------------TEST CODE----------->\n")
def select_table_sqlite(conn, tablename, parameters_tuple=(), where_string="", order_by_string=""):
query=""
if parameters_tuple==():
query="SELECT * FROM %s"%(tablename)
elif type(parameters_tuple)=="tuple":
query="SELECT %s FROM %s"%(parameters_tuple, tablename)
else:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: parameters_tuple must be a tuple")
if where_string!="":
query=query+" WHERE "+where_string
elif where_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: where_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
if order_by_string!="":
query=query+" ORDER BY "+order_by_string
elif order_by_string.find(";") != -1:
print("\n\tSQLiteDefaults: select_table_sqlite() ERROR: order_by_string cannot have a semicolon in it (this is to prevent SQL injection)")
return
query=query+";"
table=conn.execute(query) |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root | for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if t | he code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BusinessIdentity(Model):
"""The integration account partner's business identity.
:param qualifier: The business identity qualifier e.g. as2identity, ZZ,
ZZZ, 31, 32
:type qualifier: str
:param value: The user defined business identity value.
:type value: str
"""
_validation = {
'qualifier': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'qualifier': {'key': 'qualifier', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(self, qualifier, value):
self.qualifier = qualifier
self.value = value
|
# -*- coding: utf-8 -*-
from peewee import *
import urllib
import tempfile
import os
from contextlib import contextmanager
from sshtunnel import SSHTunnelForwarder
import traceback
db = MySQLDatabase(
"cod", host="127.0.0.1", user="cod_reader", port=3308, connect_timeout=10000
)
# Get
# ssh wout@axil1.ua.ac.be -L 3307:www.crystallography.net:3306 -N &
# python -m pwiz cod -e mysql -u cod_reader -H 127.0.0.1 -p 3307
#
# mysql -ucod_reader -h 127.0.0.1 -P 3307
# SELECT DATABASE();
# USE cod;
# SHOW TABLES;
# DESCRIBE data;
class BaseModel(Model):
class Meta:
database = db
class Data(BaseModel):
rfsqd = FloatField(db_column="RFsqd", null=True)
ri = FloatField(db_column="RI", null=True)
rall = FloatField(db_column="Rall", null=True)
robs = FloatField(db_column="Robs", null=True)
rref = FloatField(db_column="Rref", null=True)
z = IntegerField(db_column="Z", index=True, null=True)
zprime = FloatField(db_column="Zprime", index=True, null=True)
a = FloatField(index=True, null=True)
acce_code = CharField(index=True, null=True)
alpha = FloatField(index=True, null=True)
authors = TextField(null=True)
b = FloatField(index=True, null=True)
beta = FloatField(index=True, null=True)
c = FloatField(index=True, null=True)
calcformula = CharField(index=True, null=True)
cellformula = CharField(null=True)
cellpressure = FloatField(null=True)
celltemp = FloatField(null=True)
chemname = CharField(index=True, null=True)
commonname = CharField(index=True, null=True)
compoundsource = CharField(null=True)
date = DateField(index=True, null=True)
diffrpressure = FloatField(null=True)
diffrtemp = FloatField(null=True)
doi = CharField(index=True, null=True)
duplicateof = IntegerField(null=True)
file = PrimaryKeyField()
firstpage = CharField(null=True)
flags = CharField(null=True)
formula = CharField(index=True, null=True)
gamma = FloatField(index=True, null=True)
gofall = FloatField(null=True)
gofgt = FloatField(null=True)
gofobs = FloatField(null=True)
issue = CharField(null=True)
journal = CharField(index=True, null=True)
lastpage = CharField(null=True)
method = CharField(index=True, null=True)
mineral = CharField(index=True, null=True)
nel = CharField(index=True, null=True)
onhold = DateField(null=True)
optimal = IntegerField(null=True)
pressurehist = CharField(null=True)
radsymbol = CharField(db_column="radSymbol", null=True)
radtype = CharField(db_column="radType", null=True)
radiation = CharField(null=True)
sg = CharField(index=True, null=True)
sghall = CharField(db_column="sgHall", index=True, null=True)
siga = FloatField(null=True)
sigalpha = FloatField(null=True)
sigb = FloatField(null=True)
sigbeta = FloatField(null=True)
sigc = FloatField(null=True)
sigcellpressure = FloatField(null=True)
sigcelltemp = FloatField(null=True)
sigdiffrpressure = FloatField(null=True)
sigdiffrtemp = FloatField(null=True)
siggamma = FloatField(null=True)
sigvol = FloatField(null=True)
status = CharField(null=True)
svnrevision = IntegerField(index=True, null=True)
text = TextField(index=True)
thermalhist = CharField(null=True)
time = TimeField(index=True, null=True)
title = TextField(null=True)
vol = FloatField(index=True, null=True)
volume = IntegerField(null=True)
wrall = FloatField(db_column="wRall", null=True)
wrobs = FloatField(db_column="wRobs", null=True)
wrref = FloatField(db_column="wRref", null=True)
wavelength = FloatField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = "data"
indexes = ((("mineral", "chemname", "commonname"), False),)
def __str__(self):
ret = "{} ({})\n".format(self.mineral, self.commonname)
ret += "{} ({})\n".format(self.formula, self.chemname)
ret += "{} ({} {} {} {} {} {})\n".format(
self.sg, self.a, self.b, self.c, self.alpha, self.beta, self.gamma
)
ret += "P = {} kPa, T = {} K\n".format(self.diffrpressure, self.diffrtemp)
ret += "P = {} kPa, T = {} K\n".format(self.cellpressure, self.celltemp)
ret += "{} ({})\n".format(self.authors, self.year)
ret += "https://doi.org/{}\n".format(self.doi)
return ret
@staticmethod
def sap(p):
if p is None:
return True
a = 0.9 # atm
b = 1.1
a *= 101.325 # kPa
b *= 101.325
return p >= a and p <= b
@staticmethod
def sat(t):
if t is None:
return True
a = 15 # celcius
b = 30
a += 273.15 # kelvin
b += 273.15
return t >= a and t <= b
def satp(self):
return (
self.sap(self.diffrpressure)
and self.sap(self.cellpressure)
and self.sat(self.diffrtemp)
and self.sat(self.celltemp)
)
@property
def filename(self):
return os.path.join("{}.cif".format(self.file))
@property
def path(self):
return os.path.join(tempfile.gettempdir(), "spectrocrunch", "cif")
@property
def resourcename(self):
return os.path.join(self.path, self.filename)
@property
def url(self):
return "http://www.crystallography.net/cod/{}.cif".format(self.file)
def download(self):
filename = self.resourcename
if not os.path.isfile(filename):
path = self.path
if not os.path.exists(path):
os.makedirs(path)
ciffile = urllib.URLopener()
ciffile.retrieve(self.url, filename)
@classmethod
def namequery(cls, name):
return (
cl | s.select()
.where( |
cls.mineral == name or cls.commonname == name or cls.chemname == name
)
.order_by(cls.year.desc())
)
@contextmanager
def codtunnel():
server = SSHTunnelForwarder(
ssh_address_or_host=("axil1.ua.ac.be", 22),
ssh_username="wout",
ssh_pkey="/users/denolf/.ssh/id_rsa",
remote_bind_address=("www.crystallography.net", 3306),
local_bind_address=("127.0.0.1", 3308),
)
try:
server.start()
yield
except:
print traceback.format_exc()
server.stop()
if __name__ == "__main__":
with codtunnel():
query = Data.namequery("copper acetate")
# for entry in query:
# print entry
for entry in query:
if entry.satp():
print entry
entry.download()
break
|
from tests.base import TestBase
from pascal.program import Program
class TestVariables(TestBase):
def test_pass_valid_var(self):
file_name = "tests/mock_pas/all_var.pas"
pascal_program = Program(file_name)
pascal_program.run()
self.assertEqual(len(pascal_program.symbol_table), 7)
self.assertEqual(pascal_ | program.symbol_address, 23)
def test_pass_assign(self):
file_name = "tests/mock_pas/variables.pas"
pascal_pr | ogram = Program(file_name)
pascal_program.run()
|
import _plotly_utils.basevalidators
cla | ss ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="cone", **kwargs):
| super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs
)
|
.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Lucterios courier'
copyright = '2016, sd-libre'
author = 'sd-libre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.2.15122316'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'fr'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# do | cuments.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = | False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'fr'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Lucteriosmailingdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Lucteriosmailing.tex', 'Lucterios mailing Documentation',
'sd-libre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lucterioscore', 'Documentation Lucterios mailing',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Lucteriosmailing', 'Documentation Lucterios mailing',
author, 'Lucteriosmailing', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailm |
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
from orthography import add_suffix
import unittest
class OrthographyTestCase(unittest.TestCase):
def test_add_suffix(self):
cases = (
('artistic', 'ly', 'artistically'),
('cosmetic', 'ly', 'cosmetically'),
('establish', 's', 'establishes'),
('speech', 's', 'speeches'),
('approach', 's', 'approaches'),
('beach', 's', 'beaches'),
('arch', 's', 'arches'),
('larch', 's', 'larches'),
('march', 's', 'marches'),
('search', 's', 'searches'),
('starch', 's', 'starches'),
('stomach', 's', 'stomachs'),
('monarch', 's', 'monarchs'),
('patriarch', 's', 'patriarchs'),
('oligarch', 's', 'oligarchs'),
('cherry', 's', 'cherries'),
('day', 's', 'days'),
('penny', 's', 'pennies'),
('pharmacy', 'ist', 'pharmacist'),
('melody', 'ist', 'melodist'),
('pacify', 'ist', 'pacifist'),
('geology', 'ist', 'geologist'),
('metallurgy', 'ist', 'metallurgist'),
('anarchy', 'ist', 'anarchist'),
('monopoly', 'ist', 'monopolist'),
('alchemy', 'ist', 'alchemist'),
('botany', 'ist', 'botanist'),
('therapy', 'ist', 'therapist'),
('theory', 'ist', 'theorist'),
('psychiatry', 'ist', 'psychiatrist'),
('lobby', 'ist', 'lobbyist'),
('hobby', 'ist', 'hobbyist'),
('copy', 'ist', 'copyist'),
('beauty', 'ful', 'beautiful'),
('weary', 'ness', 'weariness'),
('weary', 'some', 'wearisome'),
('lonely', 'ness', 'loneliness'),
('narrate', 'ing', 'narrating'),
('narrate', 'or', 'narrator'),
('generalize', 'ability', 'generalizability'),
('reproduce', 'able', 'reproducible'),
('grade', 'ations', 'gradations'),
('urine', 'ary', 'urinary'),
('achieve', 'able', 'achievable'),
('polarize', 'ation', 'polarization'),
('done', 'or', 'donor'),
('analyze', 'ed', 'analyzed'),
('narrate', 'ing', 'narrating'),
('believe', 'able', 'believable'),
('animate', 'ors', 'animators'),
('discontinue', 'ation', 'discontinuation'),
('innovate', 'ive', 'innovative'),
('future', 'ists', 'futurists'),
('illustrate', 'or', 'illust | rator'),
('emerge', 'ent', 'emergent'),
('equip', 'ed', 'equipped'),
('defer', 'ed', 'deferred'),
('defer', 'er', 'deferrer'),
('defer', 'ing', 'deferring'),
| ('pigment', 'ed', 'pigmented'),
('refer', 'ed', 'referred'),
('fix', 'ed', 'fixed'),
('alter', 'ed', 'altered'),
('interpret', 'ing', 'interpreting'),
('wonder', 'ing', 'wondering'),
('target', 'ing', 'targeting'),
('limit', 'er', 'limiter'),
('maneuver', 'ing', 'maneuvering'),
('monitor', 'ing', 'monitoring'),
('color', 'ing', 'coloring'),
('inhibit', 'ing', 'inhibiting'),
('master', 'ed', 'mastered'),
('target', 'ing', 'targeting'),
('fix', 'ed', 'fixed'),
('scrap', 'y', 'scrappy'),
('trip', 's', 'trips'),
('equip', 's', 'equips'),
('bat', 'en', 'batten'),
('smite', 'en', 'smitten'),
('got', 'en', 'gotten'),
('bite', 'en', 'bitten'),
('write', 'en', 'written'),
('flax', 'en', 'flaxen'),
('wax', 'en', 'waxen'),
('fast', 'est', 'fastest'),
('white', 'er', 'whiter'),
('crap', 'y', 'crappy'),
('lad', 'er', 'ladder'),
)
failed = []
for word, suffix, expected in cases:
if add_suffix(word, suffix) != expected:
failed.append((word, suffix, expected))
for word, suffix, expected in failed:
print 'add_suffix(%s, %s) is %s not %s' % (word, suffix, add_suffix(word, suffix),expected)
self.assertEqual(len(failed), 0)
if __name__ == '__main__':
unittest.main() |
params size: %.2fM' % (sum(para.numel() for para in model.parameters())/1000000.0))
# define criterion and optimizer
criterion = torch.nn.MSELoss(size_average=True).cuda()
optimizer = torch.optim.RMSprop(model.parameters(),
lr = args.lr,
momentum = args.momentum,
weight_decay = args.weight_decay)
# optionally resume from a checkpoint
# --------
title = 'mpii-' + args.arch
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# --------
else:
# open the log file
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
# set names of log file
logger.set_names(['train-loss', 'val-loss', 'val-acc'])
# using the fastest algorithm
cudnn.benchmark = True
# Data loading code
train_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath),
batch_size = args.train_batch,
shuffle = True,
num_workers = args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
dataset = datasets.Mpii('data/mpii/mpii_annotations.json', args.dataPath, train=False),
batch_size = args.test_batch,
shuffle = False,
num_workers = args.workers,
pin_memory=True)
if args.evaluate:
print('\nEvaluation only')
loss, acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
save_pred(predictions, checkpoint=args.checkpoint)
return
for epoch in range(args.start_epoch, args.Epochs):
# lr decay
lr = LRDecay(optimizer, epoch, args.lr)
print('\nEpoch: %d | lr: %.8f' % (epoch, lr))
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch - 1, args.debug)
# evaluate on validation set
valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.debug, args.flip)
# append logger file
logger.append([train_loss, valid_loss, valid_acc])
# remember best acc and save checkpoint
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint({
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, predictions, is_best, checkpoint = args.checkpoint)
logger.close()
logger.plot()
plt.savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(train_loader, model, criterion, optimizer, epoch, debug=False):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
# switch to train mode
model.train()
end = time.time()
gt_win, pred_win = None, None
bar = Bar('Processing', max=len(train_loader))
print("the length of train_loader: {}".format(len(train_loader)))
for i, (inputs, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
inputs = inputs.cuda()
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
# Calculate intermediate loss
loss = criterion(output[0], target_var)
for j in range(1, len(output)):
loss += criterion(output[j], target_var)
if debug: # visualize groundtruth and predictions
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, output[-1].data)
if not gt_win or not pred_win:
ax1 = plt.subplot(121)
ax1.title.set_text('Groundtruth')
gt_win = plt.imshow(gt_batch_img)
ax2 = plt.subplot(122)
ax2.title.set_text('Prediction')
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.8f}'.format(
batch=i + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
)
bar.next()
bar.finish()
return losses.avg
def validate(val_loader, model, criterion, debug=False, flip=True):
batch_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# predictions
predictions = torch.Tensor(val_loader.dataset.__len__(), 16, 2)
# switch to evaluate mode
model.eval()
gt_win, pred_win = None, None
end = time.time()
bar = Bar('Processing', max=len(val_loader))
print("length of output:{}".format(len(val_loader)))
for i, (inputs, target, meta) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(inputs.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
# score_map: 16*64*64
score_map = output[-1].data.cpu()
if flip:
flip_input_var = torch.autograd.Variable(
torch.from_numpy(fliplr(inputs.clone().numpy())).float().cuda(),
volatile=True
)
flip_output_var = model(flip_input_var)
flip_output = flip_back(flip_output_var[-1].data.cpu())
score_map += flip_output
#print("scor")
loss = 0
for o in output:
loss += criterion(o, target_var)
# target : 16*64*64
acc = accuracy(score_map.cuda(), target, idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(inputs, target)
pred_batch_img = batch_with_heatmap(inputs, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_ | win = plt.imshow(gt_batch_img)
plt.subplot(122)
| pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.data[0], inputs.size(0))
acces.update(acc[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix |
#!/usr/bin/env python
from ConfigParser import ConfigParser
from ordereddict import OrderedDict
import sys
def make_parser():
parser = ConfigParser(dict_type=OrderedDict)
parser.optionxform = str
return parser
def transform(sectionName):
sectionName = sectionName.replace(",Dialog=", ", Dialog=")
if sectionName.startswith("View="):
if sectionName.endswith("V | iewer"):
return "Type=Viewer, " + sectionName.split(", ")[0]
else:
parts = sectionName.split(",")
parts.reverse()
if len(parts) == 1:
parts.insert(0, "Type=View")
return ", ".join(parts)
else:
return sectionName
if __name__ == "__main__":
fileName = sys.argv[1]
parser = make_parser()
parser.read([ fileName ])
newParser = make_parser()
for section in pa | rser.sections():
newSection = transform(section)
newParser.add_section(newSection)
for option, value in parser.items(section):
newParser.set(newSection, option, value)
newParser.write(open(fileName + ".tmp", "w"))
|
# -*- coding: utf-8 -*-
"""
"Sandbox" module for exploring API useful for digital labbooks.
Examples
--------
>>> from chempy.units import to_unitless, default_units as u
>>> s1 = Solution(0.1*u.dm3, {'CH3OH': 0.1 * u.molar})
>>> s2 = Solution(0.3*u.dm3, {'CH3OH': 0.4 * u.molar, 'Na+': 2e-3*u.molar, 'Cl-': 2e-3*u.molar})
>>> s3 = s1 + s2
>>> abs(to_unitless(s3.volume - 4e-4 * u.m**3, u.dm3)) < 1e-15
True
>>> s3.concentrations.isclose({'CH3OH': 0.325*u.molar, 'Na+': 1.5e-3*u.molar, 'Cl-': 1.5e-3*u.molar})
True
>>> s4 = s3.dissolve({'CH3OH': 1*u.gram})
>>> abs(s4.concentrations['CH3OH'] - (0.325 + 1/(12.011 + 4*1.008 + 15.999)/.4)*u.molar) < 1e-4
True
"""
import copy
from .chemistry import Substance
from .units import (
get_derived_unit,
html_of_unit,
is_unitless,
SI_base_registry,
to_unitless,
rescale,
default_units as u,
)
from .util.arithmeticdict import ArithmeticDict, _imul, _itruediv
from .printing import as_per_substance_html_table
class QuantityDict(ArithmeticDict):
def __init__(self, units, *args, **kwargs):
self.units = units
super(QuantityDict, self).__init__(lambda: 0 * self.units, *args, **kwargs)
self._check()
@classmethod
def of_quantity(cls, quantity_name, *args, **kwargs):
instance = cls(
get_derived_unit(SI_base_registry, quantity_name), *args, **kwargs
)
instance.quantity_name = quantity_name
return instance
def rescale(self, new_units):
return self.__class__(
new_units, {k: rescale(v, new_units) for k, v in self.items()}
)
def _repr_html_(self):
if hasattr(self, "quantity_name"):
header = self.quantity_name.capitalize() + " / "
else:
header = ""
header += html_of_unit(self.units)
tab = as_per_substance_html_table(to_unitless(self, self.units), header=header)
return tab._repr_html_()
def _check(self):
for k, v in self.items():
if not is_unitless(v / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (k, v, self.units)
)
def __setitem__(self, key, value):
if not is_unitless(value / self.units):
raise ValueError(
"entry for %s (%s) is not compatible with %s" % (key, value, self.units)
)
super(QuantityDict, self).__setitem__(key, value)
def copy(self):
return self.__class__(self.units, copy.deepcopy(list(self.items())))
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, repr(self.units), dict(self)
)
def __mul__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_imul(d, other)
return self.__class__(self.units * getattr(other, "units", 1), d)
def __truediv__(self, other):
d = dict(copy.deepcopy(list(self.items())))
_itruediv(d, other)
return self.__class__(self.units / getattr(other, "units", 1), d)
def __floordiv__(self, other):
a = self.copy()
if getattr(other, "units", 1) != 1:
raise ValueError("Floor division with quantities not defined")
a //= other
return a
def __rtruediv__(self, other):
""" other / self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other / v for k, v in self.items()},
)
def __rfloordiv__(self, other):
""" other // self """
return self.__class__(
getattr(other, "units", 1) / self.units,
{k: other // v for k, v in self.items()},
)
class AutoRegisteringSubstanceDict(object):
def __init__(self, factory=Substance.from_formula):
self.factory = factory
self._store = {}
def __getitem__(self, key):
if k | ey not in self._store:
self._store[key] = self.factory(key)
return self. | _store[key]
class Solution(object):
def __init__(self, volume, concentrations, substances=None, solvent=None):
if not is_unitless(volume / u.dm3):
raise ValueError("volume need to have a unit (e.g. dm3)")
self.volume = volume
self.concentrations = QuantityDict(u.molar, concentrations)
if substances is None:
substances = AutoRegisteringSubstanceDict()
self.substances = substances
self.solvent = solvent
def __eq__(self, other):
if not isinstance(other, Solution):
return NotImplemented
return all(
[
getattr(self, k) == getattr(other, k)
for k in "volume concentrations substances solvent".split()
]
)
def __add__(self, other):
if self.solvent != other.solvent:
raise NotImplementedError(
"Mixed solvent should be represented as concentrations"
)
tot_amount = (
self.concentrations * self.volume + other.concentrations * other.volume
)
tot_vol = self.volume + other.volume
return Solution(tot_vol, tot_amount / tot_vol, self.substances, self.solvent)
def dissolve(self, masses):
contrib = QuantityDict(
u.molar,
{
k: v / self.substances[k].molar_mass() / self.volume
for k, v in masses.items()
},
)
return Solution(
self.volume, self.concentrations + contrib, self.substances, self.solvent
)
def withdraw(self, volume):
if volume > self.volume:
raise ValueError(
"Cannot withdraw a volume greater than the solution volume"
)
if volume < volume * 0:
raise ValueError("Cannot withdraw a negative volume")
self.volume -= volume
return Solution(volume, self.concentrations, self.substances, self.solvent)
|
from django import forms
from django.contrib.auth.models import User
from django.forms.models import ModelFo | rm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import UserProfile
class UserForm(ModelForm):
| password = forms.CharField(widget=forms.PasswordInput())
#email = forms.EmailField(max_length=100, required=False)
class Meta:
model = User
#fields = ('username', 'email', 'password')
## I really don't need your email and you're safer not sharing it with me
fields = ('username', 'password')
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('post', 'post', css_class='btn-primary'))
class LoginForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'password')
class UserProfileForm(ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture') |
# This file | is part of Rubber and thus covered by the GPL
# (c) Sebastian Reichel, 2012
"""
Dependency analysis for package 'ltxtable' in Rubber.
"""
def setup (document, context):
global doc
doc = document
doc.hook_macro('LTXtable', 'aa', hook_ltxtable)
def hook_ltxtable (loc, width, file):
# If the file name looks like it contains a control sequence or a macro
# argument, forget about this \LTXtable.
if file.find('\\') < 0 and file.find('#') < 0:
doc.add_s | ource(file)
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any | person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permi | ssion notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class Dimension(dict):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Name':
self._name = value
elif name == 'Value':
if self._name in self:
self[self._name].append(value)
else:
self[self._name] = [value]
else:
setattr(self, name, value)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_sas_logical_jbod_attachment_facts
short_description: Retrieve facts about one or more of the OneView SAS Logical JBOD Attachments.
version_added: "2.3"
description:
- Retrieve facts about one or more of the SAS Logical JBOD Attachments from OneView.
requirem | ents:
- "python >= 2.7.9"
- "hpeOneView >= 3.0"
author: "Abilio Parada (@abiliogp)"
options:
name:
description:
- Name of SAS Logical JBOD Attachment.
| required: false
notes:
- This resource is only available on HPE Synergy
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all SAS Logical JBOD Attachment
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config_path }}"
- debug: var=sas_logical_jbod_attachments
- name: Gather paginated, filtered and sorted facts about SAS Logical JBOD Attachment
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config }}"
params:
start: 0
count: 2
sort: 'name:descending'
filter: "state=Deployed"
- debug: var=sas_logical_jbod_attachments
- name: Gather facts about a SAS Logical JBOD Attachment by name
oneview_sas_logical_jbod_attachment_facts:
config: "{{ config_path }}"
name: "logical-enclosure-SAS-Logical-Interconnect-Group-BDD-1-SLJA-1"
- debug: var=sas_logical_jbod_attachments
'''
RETURN = '''
sas_logical_jbod_attachments:
description: Has all the OneView facts about the SAS Logical JBOD Attachment.
returned: Always, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class SasLogicalJbodAttachmentFactsModule(OneViewModuleBase):
def __init__(self):
argument_spec = dict(
name=dict(required=False, type='str'),
params=dict(required=False, type='dict'),
)
super(SasLogicalJbodAttachmentFactsModule, self).__init__(additional_arg_spec=argument_spec)
def execute_module(self):
if self.module.params['name']:
name = self.module.params['name']
resources = self.oneview_client.sas_logical_jbod_attachments.get_by('name', name)
else:
resources = self.oneview_client.sas_logical_jbod_attachments.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=resources))
def main():
SasLogicalJbodAttachmentFactsModule().run()
if __name__ == '__main__':
main()
|
lass:`Pool`, :class:`Prefix`) or a list of
instances. The :func:`search` and :func:`smart_search` functions also
embeds the lists in dicts which contain search meta data.
The easiest way to get data out of NIPAP is to use the :func:`get`-method,
given that you know the ID of the object you want to fetch::
# Fetch VRF with ID 1 and print its name
vrf = VRF.get(1)
print(vrf.name)
To list all objects each object has a :func:`list`-function. ::
# list all pools
pools = Pool.list()
# print the name of the pools
for p in pools:
print(p.name)
Each of the list functions can also take a `spec`-dict as a second
argument. With the spec you can perform a simple search operation by
specifying object attribute values. ::
# List pools with a default type of 'assignment'
pools = Pool.list({ 'default_type': 'assignment' })
Performing searches
^^^^^^^^^^^^^^^^^^^
Commin' up, commin' up.
Saving changes
^^^^^^^^^^^^^^
Changes made to objects are not automatically saved. To save the changes,
simply run the object's :func:`save`-method::
vrf.name = "Spam spam spam"
vrf.save()
Error handling
--------------
As is customary in Python applications, an error results in an exception
being thrown. All pynipap exceptions extend the main exception
:class:`NipapError`. A goal with the pynipap library has been to make the
XML-RPC-channel to the backend as transparent as possible, so the XML-RPC
Faults which the NIPAP server returns in case of errors are converted and
re-thrown as new exceptions which also they extend :class:`NipapError`,
for example the NipapDuplicateError which is thrown when a duplicate key
error occurs in NIPAP.
Classes
-------
"""
import sys
import logging
if sys.version_info[0] < 3:
import xmlrpclib
int = long
else:
import xmlrpc.client as xmlrpclib
__version__ = "0.28.4"
__author__ = "Kristian Larsson, Lukas Garberg"
__author_email__= "kll@tele2.net, lukas@spritelink.net"
__copyright__ = "Copyright 2011, Kristian Larsson, Lukas Garberg"
__license__ = "MIT"
__status__ = "Development"
__url__ = "http://SpriteLink.github.com/NIPAP"
# This variable holds the URI to the nipap XML-RPC service which will be used.
# It must be set before the Pynipap can be used!
xmlrpc_uri = None
# Caching of objects is enabled per default but can be disabled for certain
# scenarios. Since we don't have any cache expiration time it can be useful to
# disable for long running applications.
CACHE = True
class AuthOptions:
""" A global-ish authentication option container.
Note that this essentially is a global variable. If you handle multiple
queries from different users, you need to make sure that the
AuthOptions-instances are set to the current user's.
"""
__shared_state = {}
options = None
def __init__(self, options = None):
""" Create a shared option container.
The argument 'options' must be a dict containing authentication
options.
"""
self.__dict__ = self.__shared_state
if len(self.__shared_state) == 0 and options is None:
raise NipapMissingInputError("authentication options not set")
if options is not None:
self.options = options
class XMLRPCConnection:
""" Handles a shared XML-RPC connection.
"""
__shared_state = {}
connection = None
_logger = None
def __init__(self):
""" Create XML-RPC connection.
The connection will be created to the URL set in the module
variable `xmlrpc_uri`. The instanciation will fail unless this
variable is set.
"""
if xmlrpc_uri is None:
raise NipapError('XML-RPC URI not specified')
# creating new instance
self.connection = xmlrpclib.ServerProxy(xmlrpc_uri, allow_none=True,
use_datetime=True)
self._logger = logging.getLogger(self.__class__.__name__)
class Pynipap:
""" A base class for the pynipap model classes.
All Pynipap | classes which maps to data in NIPAP (:py:class:VRF,
:py:class:Pool, :py:class:Prefix) extends this class.
"""
_logger = None
""" Logging instance for this object.
"""
id = None
""" Intern | al database ID of object.
"""
def __eq__(self, other):
""" Perform test for equality.
"""
# Only possible if we have ID numbers set
if self.id is None or other.id is None:
return False
return self.id == other.id
def __init__(self, id=None):
""" Creates logger and XML-RPC-connection.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._auth_opts = AuthOptions()
self.id = id
class Tag(Pynipap):
""" A Tag.
"""
name = None
""" The Tag name
"""
@classmethod
def from_dict(cls, tag=None):
""" Create new Tag-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if tag is None:
tag = {}
l = Tag()
l.name = tag['name']
return l
@classmethod
def search(cls, query, search_opts=None):
""" Search VRFs.
"""
if search_opts is None:
search_opts = {}
xmlrpc = XMLRPCConnection()
try:
search_result = xmlrpc.connection.search_tag(
{
'query': query,
'search_options': search_opts,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['result'] = []
result['search_options'] = search_result['search_options']
for xml_tag in search_result['result']:
result['result'].append(Tag.from_dict(xml_tag))
return result
class VRF(Pynipap):
""" A VRF.
"""
rt = None
""" The VRF RT, as a string (x:y or x.x.x.x:y).
"""
name = None
""" The name of the VRF, as a string.
"""
description = None
""" VRF description, as a string.
"""
num_prefixes_v4 = None
""" Number of IPv4 prefixes in this VRF
"""
num_prefixes_v6 = None
""" Number of IPv6 prefixes in this VRF
"""
total_addresses_v4 = None
""" Total number of IPv4 addresses in this VRF
"""
total_addresses_v6 = None
""" Total number of IPv6 addresses in this VRF
"""
used_addresses_v4 = None
""" Number of used IPv4 addresses in this VRF
"""
used_addresses_v6 = None
""" Number of used IPv6 addresses in this VRF
"""
free_addresses_v4 = None
""" Number of free IPv4 addresses in this VRF
"""
free_addresses_v6 = None
""" Number of free IPv6 addresses in this VRF
"""
def __init__(self):
Pynipap.__init__(self)
self.tags = {}
self.avps = {}
@classmethod
def list(cls, vrf=None):
""" List VRFs.
"""
if vrf is None:
vrf = {}
xmlrpc = XMLRPCConnection()
try:
vrf_list = xmlrpc.connection.list_vrf(
{
'vrf': vrf,
'auth': AuthOptions().options
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
res = list()
for v in vrf_list:
res.append(VRF.from_dict(v))
return res
@classmethod
def from_dict(cls, parm, vrf = None):
""" Create new VRF-object from dict.
Suitable for creating objects from XML-RPC data.
All available keys must exist.
"""
if vrf is None:
vrf = VRF()
vrf.id = parm['id']
vrf.rt = parm['rt']
vrf.name = parm['name']
vrf.description = parm['description']
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
changelog:
0.27 - 2012-08-12 - hgg
fix "global name 'js_answer' is not defined" bug
fix captcha bug #1 (failed on non-english "captcha wrong" errors)
"""
import re
from time import time
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.plugins.internal.CaptchaService import ReCaptcha
from module.common.json_layer import json_loads
class FilepostCom(SimpleHoster):
__name__ = "FilepostCom"
__type__ = "hoster"
__pattern__ = r'https?://(?:www\.)?(?:filepost\.com/files|fp.io)/([^/]+).*'
__version__ = "0.28"
__description__ = """Filepost.com hoster plugin"""
__author_name__ = "zoidberg"
__author_mail__ = "zoidberg@mujmail.cz"
FILE_INFO_PATTERN = r'<input type="text" id="url" value=\'<a href[^>]*>(?P<N>[^>]+?) - (?P<S>[0-9\.]+ [kKMG]i?B)</a>\' class="inp_text"/>'
OFFLINE_PATTERN = r'class="error_msg_title"> Invalid or Deleted File. </div>|<div class="file_info file_info_deleted">'
PREMIUM_ONLY_PATTERN = r'members only. Please upgrade to premium|a premium membership is required to download this file'
RECAPTCHA_KEY_PATTERN = r"Captcha.init\({\s*key:\s*'([^']+)'"
FLP_TOKEN_PATTERN = r"set_store_options\({token: '([^']+)'"
def handleFree(self):
# Find token and captcha key
file_id = re.match(self.__pattern__, self.pyfile.url).group(1)
m = re.search(self.FLP_TOKEN_PATTERN, self.html)
if m is None:
self.parseError("Token")
flp_token = m.group(1)
m = re.search(self.RECAPTCHA_KEY_PATTERN, self.html)
if m is None:
self.parseError("Captcha key")
captcha_key = m.group(1)
# Get wait time
get_dict = {'SID': self.req.cj.getCookie('SID'), 'JsHttpRequest': str(int(time() * 10000)) + '-xml'}
post_dict = {'action': 'set_download', 'token': flp_token, 'code': file_id}
wait_time = int(self.getJsonResponse(get_dict, post_dict, 'wait_time'))
if wait_time > 0:
self.wait(wait_time)
post_dict = {"token": flp_token, "code": file_id, "file_pass": ''}
if 'var is_pass_exists = true;' in self.html:
# Solve password
for file_pass in self.getPassword().splitlines():
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
post_dict['file_pass'] = file_pass
self.logInfo("Password protected link, trying " + file_pass)
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
break
else:
self.fail("No or incorrect password")
else:
# Solve recaptcha
recaptcha = ReCaptcha(self)
for i in xrange(5):
get_dict['JsHttpRequest'] = str(int(time() * 10000)) + '-xml'
if i:
post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field'] = recaptcha.challenge(
captcha_key)
self.logDebug(u"RECAPTCHA: %s : %s : %s" % (
captcha_key, post_dict['recaptcha_challenge_field'], post_dict['recaptcha_response_field']))
download_url = self.getJsonResponse(get_dict, post_dict, 'link')
if download_url:
if i:
self.correctCaptcha()
break
elif i:
self.invalidCaptcha()
else:
self.fail("Invalid captcha")
# Download
self.download(download_url)
def getJsonResponse(self, get_dict, post_dict, field):
json_response = json_loads(self.load('https://filepost.com/files/get/', get=get_dict, post=post_dict))
self.logDebug(json_response)
if not 'js' in json_response:
self.parseError('JSON %s 1' % field)
# i changed js_answer to json_response['js'] since js_answer is no | where set.
# i don't know the JSON-HTTP specs in detail, but the previous author
# accessed json_response['js']['error'] as well as js_answer['error'].
# see the two lines commented out with "# ~?".
if 'error' in json_response['js']:
if json_response['js']['error'] == 'download_delay':
self.retry(wait_time=json_respo | nse['js']['params']['next_download'])
# ~? self.retry(wait_time=js_answer['params']['next_download'])
elif 'Wrong file password' in json_response['js']['error']:
return None
elif 'You entered a wrong CAPTCHA code' in json_response['js']['error']:
return None
elif 'CAPTCHA Code nicht korrekt' in json_response['js']['error']:
return None
elif 'CAPTCHA' in json_response['js']['error']:
self.logDebug('error response is unknown, but mentions CAPTCHA -> return None')
return None
else:
self.fail(json_response['js']['error'])
# ~? self.fail(js_answer['error'])
if not 'answer' in json_response['js'] or not field in json_response['js']['answer']:
self.parseError('JSON %s 2' % field)
return json_response['js']['answer'][field]
getInfo = create_getInfo(FilepostCom)
|
rate=baudINDI,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_TWO,
bytesize=serial.EIGHTBITS
)
def open_connection(self):
""" Open serial connection to INDI"""
self.indi.close()
self.indi.open()
indi_open = self.indi.isOpen()
if indi_open is False:
raise RuntimeError('unable to connect to INDI')
def close_connection(self):
"""Close connection to INDI"""
self.indi.close()
def get_id(self):
"""Get ID"""
self.indi.write('*IDN?\r'.encode())
return self.indi.readline().decode()
def help(self):
"""Prints serial command options (operational commands)"""
self.indi.write('HELP\r'.encode())
for _ in range(1, 6):
print(self.indi.readline().decode())
def turn_on(self):
"""Turns Quanta-Ray INDI on"""
self.indi.write('ON\r'.encode())
def turn_off(self):
"""Turns Quanta-Ray INDI off"""
self.indi.write('OFF\r'.encode())
def set_lamp(self, lamp_set='FIX', lamp_pulse=''):
"""Select lamp trigger source
lamp_set:
FIX = set lamp trigger to Fixed
EXT = set lamp trigger to External Source
VAR = set lamp trigger to Variable
INH = inhibit lamp trigger
lamp_pul | se = set rate of lamp (pulses/second)
"""
if lamp_pulse != '':
| self.indi.write(('LAMP '+ str(lamp_set) + ' ' + str(lamp_pulse) + '\r').encode())
else:
self.indi.write(('LAMP '+ str(lamp_set) + '\r').encode())
def get_lamp(self):
""" Returns the lamp Variable Rate trigger setting """
self.indi.write('LAMP VAR?\r'.encode())
return self.indi.readline().decode()
def set(self, cmd='NORM'):
"""Set mode, type, or timing of Q-switch
cmd:
LONG = long pulse mode
EXT = external mode
NORM = normal mode
SING = single shot
FIR = fire Q-switch once
REP = repetitive shots
"""
self.indi.write(('QSW ' + str(cmd) + '\r').encode())
def single_shot(self):
"""Set single shot"""
self.set('SING')
def normal_mode(self):
"""Set normal mode"""
self.set('NORM')
def repeat_mode(self, watchdog_timeout):
"""Set repetitive shots and ensures watchdog is turned on (not disabled)
:param watchdog_timeout: seconds before laser safety shutoff
:type watchdog_timeout: int
#:raises ValueError: if watchdog is requested to be 0 (disabled)
"""
if watchdog_timeout == 0:
dummy = input('QuantaRay INDI Laser watchdog is 0 s. This will ' +
'disable watchdog and the laser will continue to run ' +
'after the experiment has finished. Continue? [ y / n ]:')
if dummy == 'n':
raise ValueError('Disabling watchdog when using repeat mode is not advised')
self.set_watchdog(watchdog_timeout)
self.set('REP')
def get(self):
"""Queries and returns the Q-switch settings."""
self.indi.write('QSW?\r'.encode())
return self.indi.readline().decode()
def set_adv(self, delay):
"""Set advanced sync delay"""
self.indi.write(('ADV ' + str(delay) + '\r').encode())
def get_adv(self):
"""Queries and returns the Q-switch Advanced Sync settings"""
self.indi.write('QSW ADV? \r'.encode())
return self.indi.readline().decode()
def set_delay(self, delay):
"""Sets delay for Q-switch delay"""
self.indi.write(('QSW DEL ' + str(delay) + '\r').encode())
def get_delay(self):
"""Queries and returns the Q-switch delay setting"""
self.indi.write('QSW DEL? \r'.encode())
return self.indi.readline().decode()
def set_echo(self, mode=0):
"""Set echo mode of INDI.
mode:
0 = show prompts
1 = laser echoes characters as received
2 = shows error messages
3 = output line feed for every command (even those that don't normally generate a response)
4 = terminate responses with <cr><lf>, rather than just <lf>
5 = use XON/XOFF handshaking for data sent to laser (not for data sent from the laser)
"""
self.indi.write(('ECH ' + str(mode) + '\r').encode())
def set_watchdog(self, time=10):
"""Set range of watchdog. If the laser does not receive communication
from the control computer within the specifiedc time, it turns off. If
disabled, the default time is zero. Time must be between 0 and 110
seconds.
"""
if time < 0 or time > 110:
raise ValueError('Invalid watchdog time. Choose value between 0 and 110 seconds.')
self.indi.write(('WATC ' + str(time) + '\r').encode())
def set_baud(self, baud_indi=9600):
"""Sets baudrate of laser. At power-up, baudrate is always 9600."""
self.indi.write(('BAUD ' + str(baud_indi) + '\r').encode())
def get_amp_setting(self):
"""Queries amplifier PFN command setting in percent"""
self.indi.write('READ:APFN?\r'.encode())
return self.indi.readline().decode()
def get_amp_power(self):
"""Queries amplifier PFN monitor in percent (what PFN power supply is actually doing)"""
self.indi.write('READ:AMON?\r'.encode())
return self.indi.readline().decode()
def get_osc_setting(self):
"""Queries oscillator PFN command setting in percent"""
self.indi.write('READ:OPFN?\r'.encode())
return self.indi.readline().decode()
def get_osc_power(self):
"""Queries oscillator PFN monitor in percent (what PFN power supply is actually doing)"""
self.indi.write('READ:OMON?\r'.encode())
return self.indi.readline().decode()
def get_qsw_adv(self):
"""Queries and returns the current Q-Switch Advanced Sync setting"""
self.indi.write('READ:QSWADV?\r'.encode())
return self.indi.readline().decode()
def get_shots(self):
"""Queries and returns the number of shots"""
self.indi.write('SHOT?\r'.encode())
return self.indi.readline().decode()
def get_trig_rate(self):
"""Queries and returns the lamp trigger rate (unless lamp trigger source is external"""
self.indi.write('READ:VAR?\r'.encode())
return self.indi.readline().decode()
def set_osc_power(self, percent=0):
"""set the Oscillator PFN voltage as a percentage of factory full scale"""
self.indi.write(('OPFN ' + str(percent) + '\r').encode())
def set_amp_power(self, percent=0):
"""set the PFN Amplifier voltage as a percentage of factory full scale"""
self.indi.write(('APFN ' + str(percent) + '\r').encode())
def get_status(self):
"""Returns the laser status.
Result is a list with entries of the form: [bit, error], where "bit" is
the bit of the status byte, and "error" is a text description of the
error.
"""
self.indi.write('*STB?\r'.encode())
stb_value = bin(int(self.indi.readline().decode()))
stb_value = stb_value[2:] # remove 0b at beginning
#print 'stb_value: ', stb_value # prints binary status byte value
error_list = list()
if stb_value[len(stb_value)-1] == '1':
bit = '0'
error = 'Laser emission can occur'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-2] == '1':
bit = '1'
error = 'Reserved error'
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-3] == '1':
bit = '2'
error = ('Data is in the error log.\n'
+ '(use QRstatus().getHist() for details on the error.)')
stat = [bit, error]
error_list.append(stat)
if stb_value[len(stb_value)-4] == '1':
bit = '3'
error = 'Check QRstatus().getQuest() for error'
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at | http://mozilla.org/MPL/2.0/.
from marionette_driver.by import By
from marionette_harness import MarionetteTestCase
class TestPosition(MarionetteTestCase):
def test_should_get_element_position_back(self):
test_url = self.marionette.absolute_url('rectangles.html')
self.marionette.nav | igate(test_url)
r2 = self.marionette.find_element(By.ID, "r2")
location = r2.rect
self.assertEqual(11, location['x'])
self.assertEqual(10, location['y'])
|
import unittest2
from zounds.util import simple_in_memory_settings
from .preprocess import MeanStdNormalization, PreprocessingPipeline
import featureflow as ff
import numpy as np
class MeanStdTests(unittest2.TestCase):
def _forward_backward(self, shape):
| @simple_in_memory_settings
class Model(ff.BaseModel):
meanstd = ff.PickleFeature(
MeanStdNormalization,
store=False)
pipeline = ff.PickleFeature(
PreprocessingPipeline,
needs=(meanstd,),
store=True)
training = np.random.random_sample((100,) + shape)
_id = Model.process(meanstd=training)
model = Model(_id)
data_shape = (10,) + shape
data = np.random.random_sample(data_shape)
result = model.pipeline.transform(data)
self.assertEqual(data_shape, result.data.shape)
inverted = result.inverse_transform()
self.assertEqual(inverted.shape, data.shape)
np.testing.assert_allclose(inverted, data)
def test_can_process_1d(self):
self._forward_backward((9,))
def test_can_process_2d(self):
self._forward_backward((3, 4))
def test_can_process_3d(self):
self._forward_backward((5, 4, 7))
| |
#!/usr/bin/env python
#-*-coding:utf-8-*-
from evaluator import Evaluator
from loader import Loader
import matplotlib.pyplot as plt
from confidence_weighted import Confi | denceWeighted
def graph_plot(plt_obj, show=False):
plt_obj.ylim(0, 1)
plt_obj.xlabel("Number of trials")
plt_obj.ylabel("Accuracy")
plt_obj.legend(["CW", "CW1", "CW2"], loc="lower right")
if show is True:
plt_obj.show()
else:
plt_obj.figure()
if __name__ == '__main__':
# construct passive-aggressive model
cw = list()
cw.append(ConfidenceWeighted(123))
cw.append(Confid | enceWeighted(123, 0.30))
cw.append(ConfidenceWeighted(123, 0.50))
# training phase
loader = Loader('a1a', 123, 30956, 1605)
y_vec, feats_vec = loader.load_train()
for i in range(len(cw)):
evaluator = Evaluator(cw[i], y_vec, feats_vec)
evaluator.update()
plt.plot(evaluator.accuracy)
graph_plot(plt)
# test phase
y_vec, feats_vec = loader.load_test()
for i in range(len(cw)):
evaluator = Evaluator(cw[i], y_vec, feats_vec)
evaluator.predict()
plt.plot(evaluator.accuracy)
graph_plot(plt, show=True)
|
#!/usr/bin/env python
# coding: utf-8
import os
import csv
from schedule_entry import EntryStatus
from machine import MachineStatus
def dump_stat(path, data, headers):
with open(path, 'w') as out:
csv_out = csv.writer(out)
csv_out.writerow(headers)
for row in data:
csv_out.writerow(row)
class Statistics():
def __init__(self):
self.numbers = []
self.scheds = []
self.entries = []
self.durations = []
def snapshot(self, timestamp, entries, machines):
# Number of jobs in scheduled/execution
njs = len([e for e in entries if e.status == EntryStatus.scheduled])
nje = len([e for e in entries if e.status == EntryStatus.executing])
# Number of machines allocating/running
nma = len([m for m in machines if m.status == MachineStatus.allocating])
nmr = len([m for m in machines if m.status == MachineStatus.running])
self.numbers.append((timestamp, njs, nje, nma, nmr))
def schedshot(self, provisioner):
self.scheds.append((provisioner.timestamp, provisioner.budget, provisioner.cost_pred, provisioner.wf_end))
def jobs(self, entries):
d = {}
for e in entries:
if e.host != None:
host_id = e.host.id
condor_slot = e.host.condor_slot
else:
host_id = condor_slot = None
if e.job != None:
wf_id = e.job.wf_id
dag_job_id = e.job.dag_job_id
else:
wf_id = dag_job_id = None
for event in e.log.keys():
if e.log[event]:
self.entries.append((host_id, condor_slot, wf_id, dag_job_id, e.condor_id, event, e.log[event]))
if dag_job_id and 'EXECUTE' in e.log.keys() and 'JOB_TERMINATED' in e.log.keys() and 'SUBMIT' in e.log.keys():
parts = dag_job_id.split('_')
if len(parts) | == 2:
jt = parts[0]
else:
jt = '_'.join(parts[:2])
d[jt] = [
(d[jt][0] if jt in d.keys() else 0) +1,
(d[jt][1] if jt in d.keys() else 0) +(e.log['JOB_TERMINATED'] - e.log['EXECUTE']).total_seconds(),
(d[jt][2] if jt in d.keys() else 0) +(e.log['EXECUTE'] - e.log['SUBMIT']).total_seconds(),
(d[j | t][3] if jt in d.keys() else 0) +(e.log['JOB_TERMINATED'] - e.log['SUBMIT']).total_seconds(),
]
for jt in d.keys():
self.durations.append((jt, d[jt][1]*1.0 / d[jt][0], d[jt][2]*1.0 / d[jt][0], d[jt][3]*1.0 / d[jt][0], d[jt][0]))
def dump(self):
home = os.path.expanduser('~')
directory = os.path.join(home, '.dynamic_provisioning')
if not os.path.exists(directory):
os.makedirs(directory)
print 'Writing statistics in ' + str(directory)
path = os.path.join(directory, 'numbers.csv')
headers = ['timestamp','n_jobs_s','n_jobs_e','n_machines_a','n_machines_r']
dump_stat(path, self.numbers, headers)
path = os.path.join(directory, 'budget.csv')
headers = ['timestamp', 'budget', 'cost_prediction', 'wf_end']
dump_stat(path, self.scheds, headers)
path = os.path.join(directory, 'jobs.csv')
headers = ['host', 'slot', 'workflow', 'dag_job_id','condor_id', 'event', 'timestamp']
dump_stat(path, self.entries, headers)
path = os.path.join(directory, 'durations.csv')
headers = ['job', 'execute_time', 'queue_time', 'total_time', 'n']
dump_stat(path, self.durations, headers)
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2019
# Separated test code with Python 3.6 syntax.
import typing
import decimal
from streamsx.spl.types import int64
class NTS(typing.NamedTuple):
x: int
msg: str
class NamedTupleBytesSchema(typing.NamedTuple):
idx: str
msg: bytes
flag: bool
oidx: typing.Optional[str] = None
omsg: typing.Optional[bytes] = None
oflag: typing.Optional[bool] = None
class NamedTupleNumbersSchema2(typing.NamedTuple):
i64: int
f64: float
d128: decimal.Decimal
c64: complex
si64: typing.Set[int]
oi64: typing.Optional[int] = None
of64: typing.Optional[float] = None
od128: typing.Optional[decimal.Decimal] = None
oc64: typing.Optional[complex] = None
omi64li64: typing.Optional[typing.Mapping[int,typing.List[int]]] = None
class NamedTupleNumbersSchema(typing.NamedTuple):
i64: int
f64: float
d128: decimal.Decimal
c64: complex
oi64: typing.Optional[int] = None
of64: typing.Optional[float] = None
od128: typing.Optional[decimal.Decimal] = None
oc64: typing.Optional[complex] = None
omi64li64: typing.Optional[typing.Mapping[int,typing.List[int]]] = None
#tuple<float64 start_time, float64 end_time, float64 confidence>
class SpottedSchema(typing.NamedTuple):
start_time: float
end_time: float
confidence: float
class NamedTupleSetOfListofTupleSchema(typing.NamedTuple):
slt: typing.Set[typing.List[SpottedSchema]]
#tuple<map<rstring, tuple<float64 start_time, float64 end_time, float64 confidence>> keywords_spotted>
class NamedTupleMapWithTupleSchema(typing.NamedTuple):
keywords_spotted: typing.Mapping[str,SpottedSchema]
class NamedTupleMapWithListTupleSchema(typing.NamedTuple):
keywords_spotted: typing.Mapping[str,typing.List[SpottedSchema]]
class NamedTupleListOfTupleSchema(typing.NamedTuple):
spotted: typing.List[SpottedSchema]
#tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>
class NamedTupleNestedTupleSchema(typing.NamedTuple):
key: str
spotted: SpottedSchema
#tuple<int64 i64, list<tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList>
class NamedTupleListOfNestedTupleSchema(typing.NamedTuple):
i64: int
spottedList: typing.List[NamedTupleNestedTupleSchema]
#tuple<rstring s1, tuple<int64 i64, list<tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList> tupleWList>
class NamedTupleNestedList2Schema(typing.NamedTuple):
s1: str
tupleWList: NamedTupleListOfNestedTupleSchema
#tuple<rstring s2, tuple<rstring s1, tuple<int64 i64, list<tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedList> tupleWList> tupleWList2>
class NamedTupleNestedList3Schema(typing.NamedTuple):
s2: str
tupleWList2: NamedTupleNestedList2Schema
#tuple<int64 i64, map<rstring, tuple<rstring str, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spotted>
class NamedTupleMapOfNestedTupleSchema(typing.NamedTuple):
i64: int
spottedMap: typing.Mapping[str,NamedTupleNestedTupleSchema]
#tuple<rstring s1, tuple<int64 i64, map<rstring, tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedMap> tupleWMap>
class NamedTupleNestedMap2Schema(typing.NamedTuple):
s1: str
tupleWMap: NamedTupleMapOfNestedTupleSchema
#tuple<rstring s2, tuple<rstring s1, tuple<int64 i64, map<rstring, tuple<rstring key, tuple<float64 start_time, float64 end_time, float64 confidence> spotted>> spottedMap> tupleWMap> tupleWMap2>
class NamedTupleNestedMap3Schema(typing.NamedTuple):
s2: str
tupleWMap2: NamedTupleNestedMap2Schema
class TestSchema(typing.NamedTuple):
flag: bool
i64: int
class ContactsSchema(typing.NamedTuple):
mail: str
phone: str
nested_tuple: TestSchema
class AddressSchema(typing.NamedTuple):
street: str
city: str
contacts: ContactsSchema
class PersonSchema(typing.NamedTuple):
name: str
age: int
address: AddressSchema
#tuple<int64 x_coord, int64 y_coord>
class Point2DSchema(typing.NamedTuple):
x_coord: int
y_coord: int
#tuple<int64 x_coord, int64 y_coord, int64 z_coord>
class Point3DSchema(typing.NamedTuple):
x_coord: int
y_coord: int
z_coord: int
#tuple<tuple<int64 x_coord, int64 y_coord> center, int64 radius>
class CircleSchema(typing.NamedTuple):
center: Point2DSchema
radius: float
#tuple<float64 radius, boolean has_rings>
class CircleRadiusSchema(typing.NamedTuple):
radius: float
has_rings: bool
#tuple<tuple<int64 x_coord, int64 y_coord, int64 z_coord> center, int64 radius , int64 radius2>
class DonutSchema(typing.NamedTuple):
center: Point3DSchema
radius: int
radius2: int
rings: typing.List[CircleRadiusSchema]
#tuple<tuple<tuple<int64 x_coord, int64 y_coord> center, radius int64> circle,
# tuple<tuple<int64 x_coord, int64 y_coord, int64 z_coord> center, int64 radius , int64 radius2> torus>
class TripleNestedTupleAmbiguousAttrName(typing.NamedTuple):
circle: CircleSchema # contains 'center' as tuple attribute
torus: DonutSchema # contains also 'center' as a different tuple type attribute, contains 'rings' attribute
rings: typing.List[CircleSchema] # rings with nested (anonymous C++ type)
#tuple<int64 int1, map<string, tuple<int64 x_coord, int64 y_coord>> map1>
class TupleWithMapToTupleAttr1(typing.NamedTuple):
int1: int
map1: typing.Mapping[str,Point2DSchema]
#tuple<int64 int2, m | ap<string, tuple<int64 int1, | map<rstring, tuple<int64 x_coord, int64 y_coord>> map1>> map2>
# This schema contains map attributes at different nesting levels with different attribute names and different Value types
class TupleWithMapToTupleWithMap(typing.NamedTuple):
int2: int
map2: typing.Mapping[str,TupleWithMapToTupleAttr1]
#tuple<int64 int1, map<string, tuple<int64 int1, map<rstring, tuple<int64 x_coord, int64 y_coord>> map1>> map1>
# This schema contains map attributes at different nesting levels with equal map attribute name (map1), but different Value types
class TupleWithMapToTupleWithMapAmbigousMapNames(typing.NamedTuple):
int1: int
map1: typing.Mapping[str,TupleWithMapToTupleAttr1]
#tuple<int64 int1, map<string, tuple<int64 x_coord, int64 y_coord, int64 z_coord>> map1>
#class TupleWithMapToTupleAttr2(typing.NamedTuple):
# int1: int
# map1: typing.Mapping[str,Point3DSchema]
|
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the | implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Windows shell link files"""
from lf.win.shell.link.objects import (
ShellLink, FileAttributes, LinkFlags, ShellLinkHeader, StringData,
LinkInfo, VolumeID, CNRL, Ext | raDataBlock, ConsoleProps, ConsoleFEProps,
DarwinProps, ExpandableStringsDataBlock, EnvironmentProps,
IconEnvironmentProps, KnownFolderProps, PropertyStoreProps, ShimProps,
SpecialFolderProps, DomainRelativeObjId, TrackerProps,
VistaAndAboveIDListProps, TerminalBlock, ExtraDataBlockFactory,
StringDataSet
)
__docformat__ = "restructuredtext en"
__all__ = [
"ShellLink", "FileAttributes", "LinkFlags", "ShellLinkHeader",
"StringData", "LinkInfo", "VolumeID", "CNRL", "ExtraDataBlock",
"ConsoleProps", "ConsoleFEProps", "DarwinProps",
"ExpandableStringsDataBlock", "EnvironmentProps", "IconEnvironmentProps",
"KnownFolderProps", "PropertyStoreProps", "ShimProps",
"SpecialFolderProps", "DomainRelativeObjId", "TrackerProps",
"VistaAndAboveIDListProps", "TerminalBlock", "ExtraDataBlockFactory",
"StringDataSet"
]
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2015 Trustcode - www.trustcode.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at yo | ur option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from openerp import models, api, fields
class mrp_bom(models.Model):
_inherit = 'mrp.bom'
def _bom_explode(self, cr, uid, bom, product, factor, properties=None,
level=0, routing_id=False, previous_products=None,
master_bom=None, context=None):
res = super(mrp_bom, self)._bom_explode(
cr, uid, bom, product, factor,
properties=properties, level=level,
routing_id=routing_id,
previous_products=previous_products,
master_bom=master_bom, context=context
)
results = res[0] # product_lines
results2 = res[1] # workcenter_lines
indice = 0
for bom_line_id in bom.bom_line_ids:
line = results[indice]
line['largura'] = bom_line_id.largura
line['comprimento'] = bom_line_id.comprimento
line['unidades'] = bom_line_id.unidades
indice += 1
return results, results2
class mrp_bom_line(models.Model):
_inherit = 'mrp.bom.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
@api.onchange('largura', 'comprimento', 'unidades')
def compute_quantity(self):
self.product_qty = (self.largura or 1) * \
(self.comprimento or 1) * (self.unidades or 1)
class mrp_production_product_line(models.Model):
_inherit = 'mrp.production.product.line'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class stock_move(models.Model):
_inherit = 'stock.move'
largura = fields.Float(string="Largura", digits=(16, 6))
comprimento = fields.Float(string="Comprimento", digits=(16, 6))
unidades = fields.Float(string="Unidades", digits=(16, 6))
class mrp_production(models.Model):
_inherit = 'mrp.production'
def _make_production_consume_line(self, cr, uid, line, context=None):
move_id = super(mrp_production, self)\
._make_production_consume_line(
cr, uid, line, context=context)
self.pool['stock.move'].write(cr, uid, move_id,
{'unidades': line.unidades,
'comprimento': line.comprimento,
'largura': line.largura})
return move_id
|
# -*- coding: utf-8 -*
import uuid
import random
import string
from test import DjangoTestCase
class Account(object):
def __init__(self, email=None, password=None):
self.email = email
self.password = password
@staticmethod
def create_email():
return u"some.one+%s@example.com" % uuid.uuid4().hex.__str__()
@staticmethod
def create_password(length=20):
return u"".join([random.choice(string.digits) for _ in range(length)])
class AccountTestCase(DjangoTestCase):
def signup(self, email=None, password=None, password_confirmation=None):
data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
if password_confirmation is not None:
data[u | "password_confirmation"] = password_confirmation
response = self.http_post(u"/signup", data)
return Account(email=email, password=password), response
def login(self, email=None, password=None):
data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
return self.http_post(u"/login", data)
def logout(self, email=None, password=None):
| data = {}
if email is not None:
data[u"email"] = email
if password is not None:
data[u"password"] = password
return self.http_post(u"/logout", data) |
# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.matchers import Equals, MatchesException, Raises
from testtools.content_type import (
ContentType,
JSON,
UTF8_TEXT,
)
class TestContentType(TestCase):
def test___init___None_errors(self):
raises_value_error = Raises(MatchesException(ValueError))
self.assertThat(lambda:ContentType(None, None), raises_value_error)
self.assertThat(lambda:ContentType(None, "traceback"),
raises_value_error)
self.assertThat(lambda:ContentType("text", None), raises_value_error)
def test___init___sets_ivars(self):
content_type = ContentType("foo", "bar")
self.assertEqual("foo", content_type.type)
self.assertEqual("bar", content_type.subtype)
s | elf.assertEqual({}, content_type.parameters)
def test___init___with_parame | ters(self):
content_type = ContentType("foo", "bar", {"quux": "thing"})
self.assertEqual({"quux": "thing"}, content_type.parameters)
def test___eq__(self):
content_type1 = ContentType("foo", "bar", {"quux": "thing"})
content_type2 = ContentType("foo", "bar", {"quux": "thing"})
content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
self.assertTrue(content_type1.__eq__(content_type2))
self.assertFalse(content_type1.__eq__(content_type3))
def test_basic_repr(self):
content_type = ContentType('text', 'plain')
self.assertThat(repr(content_type), Equals('text/plain'))
def test_extended_repr(self):
content_type = ContentType(
'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
self.assertThat(
repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
class TestBuiltinContentTypes(TestCase):
def test_plain_text(self):
# The UTF8_TEXT content type represents UTF-8 encoded text/plain.
self.assertThat(UTF8_TEXT.type, Equals('text'))
self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
def test_json_content(self):
# The JSON content type represents implictly UTF-8 application/json.
self.assertThat(JSON.type, Equals('application'))
self.assertThat(JSON.subtype, Equals('json'))
self.assertThat(JSON.parameters, Equals({}))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import contextlib
import imp
import os
import sys
import inspect
# Python 3.3's importlib caches filesystem reads for faster imports in the
# general case. But sometimes it's necessary to manually invalidate those
# caches so that the import system can pick up new generated files. See
# https://github.com/astropy/astropy/issues/820
if sys.version_info[:2] >= (3, 3):
from importlib import invalidate_caches
else:
invalidate_caches = lambda: None
class _DummyFile(object):
"""A noop writeable object."""
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
if sys.platform == 'win32':
import ctypes
def _has_hidden_attribute(filepath):
"""
Returns True if the given filepath has the hidden attribute on
MS-Windows. Based on a post here:
http://stackoverflow.com/questions/284115/cross-platform-hidden-file-detection
"""
if isinstance(filepath, bytes):
filepath = filepath.decode(sys.getfilesystemencoding())
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(filepath)
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
else:
def _has_hidden_attribute(filepath):
return False
def is_path_hidden(filepath):
"""
Determines if a given file or directory is hidden.
Parameters
----------
filepath : str
The path to a file or directory
Returns
-------
hidden : bool
Returns `True` if the file is hidden
"""
name = os.path.basename(os.path.abspath(filepath))
if isinstance(name, bytes):
is_dotted = name.startswith(b'.')
else:
is_dotted = name.startswith('.')
return is_dotted or _has_hidden_attribute(filepath)
def walk_skip_hidden(top, onerror=None, followlinks=False):
"""
A wrapper for `os.walk` that skips | hidden files and directories.
This function does not have the parameter `topdown` from
`os.walk`: the directories must always be recursed top-down when
using this function.
See also
--------
os.walk : For a description of the parameters
"""
for root, dirs, files in os.walk(
| top, topdown=True, onerror=onerror,
followlinks=followlinks):
# These lists must be updated in-place so os.walk will skip
# hidden directories
dirs[:] = [d for d in dirs if not is_path_hidden(d)]
files[:] = [f for f in files if not is_path_hidden(f)]
yield root, dirs, files
def write_if_different(filename, data):
"""Write `data` to `filename`, if the content of the file is different.
Parameters
----------
filename : str
The file name to be written to.
data : bytes
The data to be written to `filename`.
"""
assert isinstance(data, bytes)
if os.path.exists(filename):
with open(filename, 'rb') as fd:
original_data = fd.read()
else:
original_data = None
if original_data != data:
with open(filename, 'wb') as fd:
fd.write(data)
def import_file(filename):
"""
Imports a module from a single file as if it doesn't belong to a
particular package.
"""
# Specifying a traditional dot-separated fully qualified name here
# results in a number of "Parent module 'astropy' not found while
# handling absolute import" warnings. Using the same name, the
# namespaces of the modules get merged together. So, this
# generates an underscore-separated name which is more likely to
# be unique, and it doesn't really matter because the name isn't
# used directly here anyway.
with open(filename, 'U') as fd:
name = '_'.join(
os.path.relpath(os.path.splitext(filename)[0]).split(os.sep)[1:])
return imp.load_module(name, fd, filename, ('.py', 'U', 1))
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs |
"""
Django settings for dfiid project.
For more information | on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
BA | SE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
from django.core.exceptions import ImproperlyConfigured
def get_env(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = 'Set the %s env variable' % setting
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_env('SECRET_KEY')
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'nocaptcha_recaptcha',
'core',
'user',
'content',
'notify',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dfiid.urls'
WSGI_APPLICATION = 'dfiid.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_env('DB_NAME'),
'USER': get_env('DB_USER'),
'PASSWORD': get_env('DB_PASSWORD'),
'HOST': get_env('DB_HOST'),
'PORT': get_env('DB_PORT'),
}
}
LANGUAGE_CODE = get_env('LANGUAGE')
TIME_ZONE = 'Atlantic/Canary'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/s/'
STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), )
STATIC_ROOT = os.path.join(BASE_DIR, 's')
MEDIA_URL = '/m/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'm')
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
AUTH_USER_MODEL = 'user.User'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
NORECAPTCHA_SITE_KEY = get_env('NORECAPTCHA_SITE_KEY')
NORECAPTCHA_SECRET_KEY = get_env('NORECAPTCHA_SECRET_KEY')
|
#Evaluate semantic space against MEN dataset
import sys
import utils
from scipy import stats
import numpy as np
from math import sqrt
#Note: this is scipy's spearman, without tie adjustment
def spearman(x,y):
return stats.spearmanr(x, y)[0]
def readMEN(annotation | _file):
pairs=[]
humans=[]
f=open(annotation_file,'r')
for l in f:
l=l.rstrip('\n')
items=l.split()
pairs.append((items[0],items[1]))
humans.ap | pend(float(items[2]))
f.close()
return pairs, humans
def compute_men_spearman(dm_dict, annotation_file):
pairs, humans=readMEN(annotation_file)
system_actual=[]
human_actual=[]
count=0
for i in range(len(pairs)):
human=humans[i]
a,b=pairs[i]
if a in dm_dict and b in dm_dict:
cos=utils.cosine_similarity(dm_dict[a],dm_dict[b])
system_actual.append(cos)
human_actual.append(human)
count+=1
sp = spearman(human_actual,system_actual)
return sp,count
|
t Gdk
import cPickle
import xapian
import json
import tempfile
import shutil
from sugar3.graphics.radiotoolbutton import RadioToolButton
from sugar3.graphics.palette import Palette
from sugar3.graphics import style
from sugar3 import env
from sugar3 import profile
from jarabe.journal import model
from jarabe.journal.misc import get_mount_icon_name
from jarabe.journal.misc import get_mount_color
from jarabe.view.palettes import VolumePalette
_JOURNAL_0_METADATA_DIR = '.olpc.store'
def _get_id(document):
"""Get the ID for the document in the xapian database."""
tl = document.termlist()
try:
term = tl.skip_to('Q').term
if len(term) == 0 or term[0] != 'Q':
return None
return term[1:]
except StopIteration:
return None
def _convert_entries(root):
"""Convert entries written by the datastore version 0.
The metadata and the preview will be written using the new
scheme for writing Journal entries to removable storage
devices.
- entries that do not have an associated file are not
converted.
- if an entry has no title we set it to Untitled and rename
the file accordingly, taking care of creating a unique
filename
"""
try:
database = xapian.Database(os.path.join(root, _JOURNAL_0_METADATA_DIR,
'index'))
except xapian.DatabaseError:
logging.exception('Convert DS-0 Journal entries: error reading db: %s',
os.path.join(root, _JOURNAL_0_METADATA_DIR, 'index'))
return
metadata_dir_path = os.path.join(root, model.JOURNAL_METADATA_DIR)
if not os.path.exists(metadata_dir_path):
try:
os.mkdir(metadata_dir_path)
except EnvironmentError:
logging.error('Convert DS-0 Journal entries: '
'error creating the Journal metadata directory.')
return
for posting_item in database.postlist(''):
try:
document = database.get_document(posting_item.docid)
except xapian.DocNotFoundError, e:
logging.debug('Convert DS-0 Journal entries: error getting '
'document %s: %s', posting_item.docid, e)
continue
_convert_entry(root, document)
def _convert_entry(root, document):
try:
metadata_loaded = cPickle.loads(document.get_data())
except cPickle.PickleError, e:
logging.debug('Convert DS-0 Journal entries: '
'error converting metadata: %s', e)
return
if not ('activity_id' in metadata_loaded and
'mime_type' in metadata_loaded and
'title' in metadata_loaded):
return
metadata = {}
uid = _get_id(document)
if uid is None:
return
for key, value in metadata_loaded.items():
metadata[str(key)] = str(value[0])
if 'uid' not in metadata:
metadata['uid'] = uid
filename = metadata.pop('filename', None)
if not filename:
return
if not os.path.exists(os.path.join(root, filename)):
return
if not metadata.get('title'):
metadata['title'] = _('Untitled')
fn = model.get_file_name(metadata['title'],
metadata['mime_type'])
new_filename = model.get_unique_file_name(root, fn)
os.rename(os.path.join(root, filename),
os.path.join(root, new_filename))
filename = new_filename
preview_path = os.path.join(root, _JOURNAL_0_METADATA_DIR,
'preview', uid)
if os.path.exists(preview_path):
preview_fname = filename + '.preview'
new_preview_path = os.path.join(root,
model.JOURNAL_METADATA_DIR,
preview_fname)
if not os.path.exists(new_preview_path):
shutil.copy(preview_path, new_preview_path)
metadata_fname = filename + '.metadata'
metadata_path = os.path.join(root, model.JOURNAL_METADATA_DIR,
metadata_fname)
if not os.path.exists(metadata_path):
(fh, fn) = tempfile.mkstemp(dir=root)
os.write(fh, json.dumps(metadata))
os.close(fh)
os.rename(fn, metadata_path)
logging.debug('Convert DS-0 Journal entries: entry converted: '
'file=%s metadata=%s',
os.path.join(root, filename), metadata)
class VolumesToolbar(Gtk.Toolbar):
__gtype_name__ = 'VolumesToolbar'
__gsignals__ = {
'volume-changed': (GObject.SignalFlags.RUN_FIRST, None,
([str])),
'volume-error': (GObject.SignalFlags.RUN_FIRST, None,
([str, str])),
}
def __init__(self):
Gtk.Toolbar.__init__(self)
self._mount_added_hid = None
self._mount_removed_hid = None
button = JournalButton()
button.connect('toggled', self._button_toggled_cb)
self.insert(button, 0)
button.show()
self._volume_buttons = [button]
self.connect('destroy', self.__destroy_cb)
GLib.idle_add(self._set_up_volumes)
def __destroy_cb(self, widget):
volume_monitor = Gio.VolumeMonitor.get()
volume_monitor.disconnect(self._mount_added_hid)
volume_monitor.disconnect(self._mount_removed_hid)
def _set_up_volumes(self):
self._set_up_documents_button()
volume_monitor = Gio.VolumeMonitor.get()
self._mount_added_hid = volume_monitor.connect('mount-added',
self.__mount_added_cb)
self._mount_removed_hid = volume_monitor.connect(
'mount-removed',
self.__mount_removed_cb)
for mount in volume_monitor.get_mounts():
self._add_button(mount)
def _set_up_documents_button(self):
documents_path = model.get_documents_path()
if documents_path is not None:
button = DocumentsButton(documents_path)
button.props.group = self._volume_buttons[0]
button.set_palette(Palette(_('Documents')))
button.connect('toggled', self._button_toggled_cb)
button.show()
position = self.get_item_index(self._volume_buttons[-1]) + 1
self.insert(button, position)
self._volume_buttons.append(button)
self.show()
def __mount_added_cb(self, volume_monitor, mount):
self._add_button(mount)
def __mount_removed_cb(self, volume_monitor, mount):
self._remove_button(mount)
def _add_button(self, mount):
logging.debug('VolumeToolbar._add_button: %r', mount.get_name())
if os.path.exists(os.path.join(mount.get_root().get_path(),
_JOURNAL_0_METADATA_DIR)):
logging.debug('Convert DS-0 Journal entries: starting conversion')
GLib.idle_add(_convert_entries, mount.get_root().get_path())
button = VolumeButton(mount)
button.props.group = self._volume_buttons[0]
button.connect('toggled', self._button_toggled_cb)
button.connect('volume-error', self.__volume_error_cb)
position = self.get_item_index(self._volume_buttons[-1]) + 1
self.insert(button, position)
button.show()
self._volume_buttons.append(button)
if len(self.get_children()) > 1:
self.show()
def __volume_error_c | b(self, button, strerror, severity):
self.emit('volume-error', strerror, severity)
def _button_toggled_cb(self, button):
if button.props.active:
self.emit('volume-changed', button.mount_point)
def _ge | t_button_for_mount(self, mount):
mount_point = mount.get_root().get_path()
for button in self.get_children():
if button.mount_point == mount_point:
return button
logging.error('Couldnt find button with mount_point %r', mount_point)
return None
def _remove_button(self, mount):
button = self._get_button_for_mount(mount)
|
rpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def untokenize(iterable):
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output text will tokenize the back to the input
t1 = [tok[:2] for tok in generate_tokens(f.readline)]
newcode = untokenize(t1)
readline = iter(newcode.splitlines(1)).next
t2 = [tok[:2] for tokin generate_tokens(readline)]
assert t1 == t2
"""
ut = Untokenizer()
return ut.untokenize(iterable)
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
| elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
| endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': |
# -*- coding: utf-8 -*-
fro | m __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hack_plot', | '0005_auto_20150505_1940'),
]
operations = [
migrations.AddField(
model_name='sshhackip',
name='located',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
from trackers.fitbit | _tracker import Fitb | itTracker
__author__ = 'doughyde'
# FitBit connection
f = FitbitTracker()
f.authenticate()
f.get_devices() |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language g | overning permissions and
# limitations under the License.
import os
from st2common.content.loader import ContentPackLoader
from st2common.exceptions.content import ParseException
from st2common.bootstrap.aliasesregistrar import AliasesRegistrar
from st2common.models.utils.action_alias_utils import extract_parameters_for_action_alias_db
from st2common.models.utils.action_alias_utils import extract_parameters
from st2tests.pack_resource import BasePackResourceTestCase
__all__ = [
'BaseActionAliasT | estCase'
]
class BaseActionAliasTestCase(BasePackResourceTestCase):
"""
Base class for testing action aliases.
"""
action_alias_name = None
action_alias_db = None
def setUp(self):
super(BaseActionAliasTestCase, self).setUp()
if not self.action_alias_name:
raise ValueError('"action_alias_name" class attribute needs to be provided')
self.action_alias_db = self._get_action_alias_db_by_name(name=self.action_alias_name)
def assertCommandMatchesExactlyOneFormatString(self, format_strings, command):
"""
Assert that the provided command matches exactly one format string from the provided list.
"""
matched_format_strings = []
for format_string in format_strings:
try:
extract_parameters(format_str=format_string,
param_stream=command)
except ParseException:
continue
matched_format_strings.append(format_string)
if len(matched_format_strings) == 0:
msg = ('Command "%s" didn\'t match any of the provided format strings' % (command))
raise AssertionError(msg)
elif len(matched_format_strings) > 1:
msg = ('Command "%s" matched multiple format strings: %s' %
(command, ', '.join(matched_format_strings)))
raise AssertionError(msg)
def assertExtractedParametersMatch(self, format_string, command, parameters):
"""
Assert that the provided command matches the format string.
In addition to that, also assert that the parameters which have been extracted from the
user input (command) also match the provided parameters.
"""
extracted_params = extract_parameters_for_action_alias_db(
action_alias_db=self.action_alias_db,
format_str=format_string,
param_stream=command)
if extracted_params != parameters:
msg = ('Extracted parameters from command string "%s" against format string "%s"'
' didn\'t match the provided parameters: ' % (command, format_string))
# Note: We intercept the exception so we can can include diff for the dictionaries
try:
self.assertEqual(extracted_params, parameters)
except AssertionError as e:
msg += str(e)
raise AssertionError(msg)
def _get_action_alias_db_by_name(self, name):
"""
Retrieve ActionAlias DB object for the provided alias name.
"""
base_pack_path = self._get_base_pack_path()
_, pack = os.path.split(base_pack_path)
pack_loader = ContentPackLoader()
registrar = AliasesRegistrar(use_pack_cache=False)
aliases_path = pack_loader.get_content_from_pack(pack_dir=base_pack_path,
content_type='aliases')
aliases = registrar._get_aliases_from_pack(aliases_dir=aliases_path)
for alias_path in aliases:
action_alias_db = registrar._get_action_alias_db(pack=pack,
action_alias=alias_path)
if action_alias_db.name == name:
return action_alias_db
raise ValueError('Alias with name "%s" not found' % (name))
|
from sft.runner.Trainer import T | rainer
import sft.config.exp
if __name__ == "__main__":
Trainer().run(sft.config.exp) | |
import time
import sqlite3
from base_model import BaseModel
from datetime import datetime
from contextlib import contextmanager
class SSIDTrafficHistory(BaseModel):
def __init__(self, dbfile, table_name, time_limit):
super(SSIDTrafficHistory, self).__init__(dbfile, table_name)
self.time_limit = time_limit
def init_db(self):
with self.db_cursor() as c:
c.execute('''
CREATE TABLE IF NOT EXISTS {} (
timestamp integer,
adapter text,
ssid text,
rx integer,
tx integer,
PRIMARY KEY (timestamp, adapter, ssid)
)
'''.format(self.table_name))
def truncate_time(timestamp):
raise NotImplementedError
def query(self, adapter, ssid, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, rx, tx
FROM {}
WHERE adapter=? AND ssid=? AND timestamp=?;
'''.format(self.table_name)
c.execute(query, (adapter, ssid, self.truncate_time(t | imestamp)))
result = c.fetchone()
if result == None:
result = (self.truncate_time(timestamp), adapter, ssid, 0, 0)
return {
'timestamp': self.truncate_time(timestamp),
'adapter': adapter,
'ssid': ssid,
'rx': result[3],
'tx': result[4]
}
def query_all(self, start_time=None, end_time=None, timestamp=None):
if not timestamp:
timestamp = time.time()
if not end_time:
end_time = timestamp
if not start_ti | me:
start_time = self.truncate_time(end_time)
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, sum(rx), sum(tx)
FROM {}
WHERE timestamp >= ? AND timestamp <= ?
GROUP BY adapter, ssid
ORDER BY adapter, ssid;
'''.format(self.table_name)
c.execute(query, (start_time, end_time))
results = c.fetchall()
query_result = {}
for r in results:
ts, adapter, ssid, rx, tx = r
if adapter not in query_result:
query_result[adapter] = []
query_result[adapter].append({
'timestamp': ts,
'adapter': adapter,
'ssid': ssid,
'rx': rx,
'tx': tx
})
return query_result
def update(self, adapter, ssid, rx, tx, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
INSERT OR REPLACE INTO {} (timestamp, adapter, ssid, rx, tx)
VALUES ( ?, ?, ?, ?, ? );
'''.format(self.table_name)
c.execute(query, (self.truncate_time(timestamp), adapter, ssid, rx, tx))
def add(self, adapter, ssid, delta_rx, delta_tx, timestamp=None):
if not timestamp:
timestamp = time.time()
prev = self.query(adapter, ssid, timestamp=timestamp)
self.update(
adapter, ssid,
prev['rx']+delta_rx, prev['tx']+delta_tx,
timestamp=timestamp
)
self.clear(timestamp=timestamp)
def clear(self, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
DELETE FROM {}
WHERE timestamp < ?;
'''.format(self.table_name)
c.execute(query, (timestamp - self.time_limit, ))
|
'''
Created on 28/set/2014
@author: Vincenzo Pirrone <pirrone.v@gmail.com>
'''
import serial, time
class Connector:
def readline(self):
pass
def writeline(self, line):
pass
def close(self):
pass
class FakeSerial(Connector):
def __init__(self, port):
print 'opening fake serial on %s' % port
def readline(self):
time.sleep(2)
| return 'TIME:%d' % int(time.time())
def writeline( | self, line):
print 'FAKE SERIAL: ' + line
def close(self):
print 'closing fake serial'
class Serial(Connector, serial.Serial):
def __init__(self,
port=None,
baudrate=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=None,
xonxoff=False,
rtscts=False,
writeTimeout=None,
dsrdtr=False,
interCharTimeout=None):
serial.Serial.__init__(self, port=port, baudrate=baudrate, bytesize=bytesize, parity=parity, stopbits=stopbits, timeout=timeout, xonxoff=xonxoff, rtscts=rtscts, writeTimeout=writeTimeout, dsrdtr=dsrdtr, interCharTimeout=interCharTimeout)
def readline(self):
return Serial.readline(self)
def writeline(self, line):
Serial.write(self, line + '\n')
def close(self):
Serial.close(self) |
# Generated by Django 1.11.15 on 2018-08-08 18:28
import django.db.models.deletion
import django_extensions.db.fields
import stdimage.models
from course_discovery.apps.course_metadata.utils import UploadToFieldNamePath
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0096_degree_lead_capture_list_name'),
]
operations = [
migrations.AddField(
model_name='degree',
name='lead_capture_image',
field=stdimage.models.StdImageField( | blank=True, help_text='Please provide an image file for the lead capture banner.', null=True, upload_to=UploadToFieldNamePath('uuid', path='media/degree_marketing/lead_capture_im | ages/')),
),
]
|
import os.path
from subprocess import call
class InstallerTools(object):
@staticmethod
def update_environment | (file_path,environment_path):
upda | te_file = open(file_path, 'r')
original_lines = update_file.readlines()
original_lines[0] = environment_path+'\n'
update_file.close()
update_file = open(file_path, 'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def fix_migrate(base_directory):
print "\nFixing the migrate bug \n"
buggy_path = os.path.join(base_directory,
'env/lib/python2.7/site-packages/migrate/versioning/schema.py')
buggy_file = open(buggy_path,'r')
original_lines = buggy_file.readlines()
original_lines[9] = "from sqlalchemy import exc as sa_exceptions\n"
buggy_file.close()
update_file = open(buggy_path,'w')
for lines in original_lines:
update_file.write(lines)
update_file.close()
@staticmethod
def refresh_environment(framework_config):
InstallerTools.update_environment(framework_config.yard_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.blow_path,framework_config.environment_path)
InstallerTools.update_environment(framework_config.try_path,framework_config.environment_path)
@staticmethod
def change_permissions(framework_config):
call(['chmod', 'a+x', framework_config.yard_path])
call(['chmod', 'a+x', framework_config.blow_path])
call(['chmod', 'a+x', framework_config.try_path])
@staticmethod
def create_db_directory(base_directory):
if not os.path.exists(os.path.join(base_directory, 'storage/')):
os.makedirs(os.path.join(base_directory, 'storage/'))
@staticmethod
def create_virtual_environment(framework_config):
call(['python', framework_config.v_path, framework_config.environment_name])
InstallerTools.refresh_environment(framework_config)
InstallerTools.change_permissions(framework_config)
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProtgenerics(RPackage):
"""S4 generic functions ne | eded by Bioconductor proteomics packages."""
homepage = "https://bioconductor.org/packages/ProtGenerics/"
url = "https://git.bioconductor.org/packages/ProtGenerics"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/ProtGenerics', commit='b2b3bb0938e | 20f58fca905f6870de7dbc9dfd7a3')
depends_on('r@3.4.0:3.4.9', when='@1.8.0')
|
# Input:
# 2
| # 5
# 1 2 3 4 5
# 6
# 2 4 6 7 5 1
#
# Output:
# 3
# 7
def findMid(head):
if head == None:
return -1
fast, slow = head, head
while fast.next != None and fast.next.next != None:
fast = fast.next.next
slow = slow.next
if fast.next != None:
return slow.next
ret | urn slow
|
from tests.util.base import event
def test_invite_generation(event, default_account):
from inbox.events.ical import generate_icalendar_invite
event.sequence_number = 1
event.participants = [{'email': 'helena@nylas.com'},
{'email': 'myles@nylas.com'}]
cal = generate_icalendar_invite(event)
assert cal['method'] == 'REQUEST'
for component in cal.walk():
if component.name == "VEVENT":
assert component.get('summary') == event.title
assert int(component.get('sequence')) == event.sequence_number
assert component.get('location') == event.location
attendees = component.get('attendee', [])
# the iCalendar python module doesn't return a list when
# there's only one attendee. Go figure.
if not isinstance(attendees, list):
attendees = [attendees]
for attendee in attendees:
email = unicode(attendee)
# strip mailto: if it exists
if email.lower().startswith('mailto:'):
email = email[7:]
assert email in ['helena@nylas.com', 'myles@nylas.com']
def test_message_generation(event, default_account):
from inbox.events.ical import generate_invite_message
event.title = 'A long walk on the beach'
event.participants = [{'email': 'helena@nylas.com'}]
msg = generate_invite_message('empty', event, default_account)
# Check that we have an email with an HTML part, a plain text part, a
# text/calendar with METHOD=REQUEST and | an attachment.
count = 0
for mimepart in msg.walk(with_self=msg.content_type.is_singlepart()):
format_type = mimepart.content_type.format_type
subtype = mimepart.content_type.subtype
| if (format_type, subtype) in [('text', 'plain'), ('text', 'html'),
('text', 'calendar; method=request'),
('application', 'ics')]:
count += 1
assert count == 3
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agre | ed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either ex | press or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack import resource2 as resource
class ServiceProfile(resource.Resource):
resource_key = 'service_profile'
resources_key = 'service_profiles'
base_path = '/service_profiles'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
_query_mapping = resource.QueryParameters(
'description', 'driver',
is_enabled='enabled',
project_id='tenant_id'
)
# Properties
#: Description of the service flavor profile.
description = resource.Body('description')
#: Provider driver for the service flavor profile
driver = resource.Body('driver')
#: Sets enabled flag
is_enabled = resource.Body('enabled', type=bool)
#: Metainformation of the service flavor profile
meta_info = resource.Body('metainfo')
#: The owner project ID
project_id = resource.Body('tenant_id')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for ASL securityd log file."""
from plaso.formatters import interface
class MacSecuritydLogFormatter(interface.ConditionalEvent | Formatter):
"""Formatter for ASL Securityd file."""
DATA_TYPE = 'mac:asl:securityd:line'
FORMAT_STRING_PIECES = [
u'Sender: {sender}',
u'({sender_pid})',
u'Level: {level}',
u'Facility: {facility}',
u'Text: {message}']
FORMAT_STRING_SHORT_PIECES = [u'Text: {message}']
SOURCE_LONG | = 'Mac ASL Securityd Log'
SOURCE_SHORT = 'LOG'
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Te | chnologies and contributors
# License: MIT. See LICENSE
# import frappe
from frappe.model.document import Document
cl | ass UserDocumentType(Document):
pass
|
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import taskqueue
import json
import endpoints
import urllib2
import logging, os
import settings
import inspect
from seqid import SeqidIssuer, seqid2str
from endpointshelper import EndpointsHelper
from logger import Logger
from geofeed import GeoFeed
package = 'GeoFeedAPI'
"""GeoFeed API
"""
class SeqidResponse(messages.Message):
"""Response message for geofeed.seqid method"""
status = messages.StringField(1)
series = messages.StringField(2)
seqid_datetime = messages.StringField(3)
seqid_int = messages.IntegerField(4)
class TestRequest(messages.Message):
"""request message for taskqueue.test"""
message = messages.StringField(1)
class TestResponse(messages.Message):
"""response message for taskqueue.test"""
status = messages.StringField(1)
message = messages.StringField(2)
info = messages.StringField(3)
class FeedItem(messages.Message):
topic = messages.StringField(1, required=True)
key = messages.StringField(2, required=True)
url = messages.StringField(3)
latitude = messages.FloatField(4)
longitude = messages.FloatField(5)
content = messages.StringField(6)
published = messages.StringField(7)
class PublishResponse(messages.Message):
"""response message for geofeed.publish"""
status = messages.StringField(1)
class ListRequest(messages.Message):
"""message for retrieving a list of feed items"""
topic = messages.StringField(1, required=True)
class ListResponse(messages.Message):
"""response message for geofeed.list"""
status = messages.StringField(1)
items = messages.MessageField(FeedItem, 2, repeated=True)
class GetRequest(messages.Message):
"""message for retrieving a single feed items"""
topic = messages.StringField(1, required=True)
key = messages.StringField(2, required=True)
class GetResponse(messages.Message):
"""response message for geofeed.get"""
status = messages.StringField(1)
item = messages.MessageField(FeedItem, 2)
@endpoints.api(name='geofeed', version='v1.0', allowed_client_ids=['314157906781-5k944tnd2e4hvcf0nrc4dl93kgdaqnam.apps.googleusercontent.com'])
#@hub_api.api_class(resource_name='geofeed')
class GeoFeedApi(remote.Service):
"""GeoFeed API
"""
SEQUENCE_RESOURCE = endpoints.ResourceContainer(
message_types.VoidMessage,
series=messages.StringField(1))
@endpoints.method(SEQUENCE_RESOURCE, SeqidResponse,
path='seqid/{series}', http_method='GET',
name='seqid')
def seqid(self, request):
"""Get a new seqid from the specified series
"""
response = SeqidResponse(status='OK')
try:
EndpointsHelper.authenticate()
issuer = SeqidIssuer(series=request.series)
seqid = issuer.issueSeqids()[0]
response.series = issuer.series
response.seqid_int = seqid
response.seqid_datetime = seqid2str (seqid)
except Exception, err:
response.status=str(err)
return response
@endpoints.method(FeedItem, PublishResponse,
path='publish', http_method='POST',
name='publish')
def publish(self, request):
"""Publish a new item to a feed.
"""
response = PublishResponse(status='OK')
try:
EndpointsHelper.authenticate()
GeoFeed.publish(**EndpointsHelper.message2dict(request))
except Exception, err:
response.status=str(err)
return response
@endpoints.method(ListRequest, ListResponse,
path='list', http_method='POST',
name='list')
def list(self, request):
"""Retrieve a list of recent items in a feed
"""
response = ListResponse(status='OK')
try:
EndpointsHelper.authenticate()
response.items = [FeedItem(**item) for item in GeoFeed.list(topic=request.topic)]
except Exception, err:
response.status=str(err)
return response
@endpoints.method(GetRequest, GetResponse,
path='get', http_method='POST',
name='get')
def get(self, request):
"""Retrieve a specified feed item
"""
response = GetResponse(status='OK')
try:
EndpointsHelper.authenticate()
item = GeoFeed.get(request.topic, request.key)
if item:
response.item = FeedItem(**item)
else:
response.status='NOT FOUND'
except Exception, err:
response.status=str(err)
return response
@endpoints.method(TestRequest, TestResponse,
path='test', http_method='POST',
name='test')
def test(self, request):
"""Test method for debugging conncection and auth issues
This method will return to the caller whatever string is supplied in the ' | message' field
The info field in the response contains some debug information
"""
response = TestResponse(message=request.message, status='OK')
response.info = "USER: %s" % endpoints.get_current_user()
try:
EndpointsHelper.authenticate()
Logger.lo | g (op='test')
except Exception, err:
response.status=str(err)
return response
#app = endpoints.api_server([hub_api])
#app = endpoints.api_server([GeoFeedApi]) |
have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
: | simplerepr: The default. This takes the ``str`` of the object and
then returns the | text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to e |
# -*- coding: utf-8 -*-
# Aualé oware graphic user interface.
# Copyright (C) 2014-2020 Joan Sala Soler <contact@joansala.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
import random
import struct
from game import Match |
from game import Oware
from uci import Strength
from .constants import COEFFICIENTS
class OpeningBook(object):
"""Opening book implementation"""
__MARGIN = 42
def __init__(self, p | ath):
self._scores = []
self._header = dict()
self._min_score = self.__MARGIN
self._load_opening_book(path)
def set_strength(self, strength):
"""Sets the playing strength of the book"""
margin = self.__MARGIN
factor = 1 - strength.strength_factor
self._min_score = margin + (.25 * margin * factor) ** 2
def pick_best_move(self, match):
"""Choose a best move from the book"""
moves = self.find_best_moves(match)
choice = random.choice(moves) if moves else None
return choice
def find_best_moves(self, match):
"""Obtain the best moves from the book"""
moves = list()
game = match.get_game()
turn = match.get_turn()
scores = self._get_move_scores(match)
max_score = max(scores) if scores else -math.inf
min_score = max(max_score - self._min_score, -self._min_score)
offset = 0 if turn == game.SOUTH else 6
for move, score in enumerate(scores, offset):
if score >= min_score or score >= max_score:
moves.append(move)
return moves
def _get_move_scores(self, match):
"""Scores for the given match position"""
code = self._compute_hash_code(match)
scores = self._scores.get(code, [])
return scores
def _load_opening_book(self, path):
"""Loads an opening book from a file"""
with open(path, 'rb') as file:
self._header = self._read_header(file)
self._scores = self._read_scores(file)
def _read_header(self, file):
"""Reads the header fields from an open file"""
header = dict()
signature = file.readline()
while True:
field = file.readline()
if not field or field == b'\x00\n': break
values = field.decode('utf-8').split(':', 1)
header.setdefault(*values)
return header
def _read_scores(self, file):
"""Reads position scores from an open file"""
scores = dict()
while True:
entry = file.read(20)
if not entry: break
code, *values = struct.unpack('>q6h', entry)
scores.setdefault(code, values)
return scores
def _compute_hash_code(self, match):
"""Hash code for the current match position"""
game = match.get_game()
turn = match.get_turn()
board = match.get_board()
code = 0x80000000000 if turn == game.SOUTH else 0x00
seeds = board[13]
for house in range(12, -1, -1):
if seeds >= 48: break
code += COEFFICIENTS[seeds][house]
seeds += board[house]
return code
|
d server
state as the first component of the output. If there is no need for server
state, the input/output state should be modeled as an empty tuple.
In addition to updating state, `next` additionally takes client-side data as
input, and can produce results on server side in addition to state intended to
be passed to the next round. As is the case for the server state, if this
is undesired it should be modeled as an empty tuple.
The type signature of `next`, in the concise TFF type notation (as defined in
TFF's `computation.proto`), is as follows:
```python
(<S@SERVER,{D}@CLIENTS> -> <S@SERVER,X@SERVER>)
```
The above type signature involves the following abstract types:
* `S` is the type of the state that is passed at the server between rounds of
processing. For example, in the context of federated training, the server
state would typically include the weights of the model being trained. The
weights would be updated in each round as the model is trained on more and
more of the clients' data, and hence the server state would evolve as well.
Note: This is also the type of the output of the `initialize` that produces
the server state to feed into the first round.
* `D` represents the type of per-client units of data that serve as the input
to the computation. Often, this would be a sequence type, i.e., a dataset
in TensorFlow's parlance, although strictly speaking this does not have to
always be the case.
* `X` represents the type of server-side outputs generated by the server after
each round.
One can think of the process based on this representation as being equivalent
to the following pseudocode loop:
```python
client_data = ...
server_state = initialize()
while True:
server_state, server_outputs = next(server_state, client_data)
```
The logic of `next` in `MapReduceForm` is factored into seven
variable components `prepare`, `work`, `zero`, `accumulate`, `merge`,
`report`, and `update` (in addition to `initialize` that produces the server
state component for the initial round and `bitwidth` that specifies runtime
parameters for `federated_secure_sum_bitwidth`). The pseudocode below uses
common syntactic shortcuts (such as implicit zipping) for brevity.
For a concise representation of the logic embedded in the discussion below,
specifying the manner in which an instance `mrf` of `MapReduceForm` maps to
a single federated round, see the definitions of `init_computation` and
`next_computation` in
`form_utils.get_iterative_process_for_map_reduce_form`.
```python
@tff.federated_computation
def next(server_state, client_data):
# The server prepares an input to be broadcast to all clients that controls
# what will happen in this round.
client_input = (
tff.federated_broadcast(tff.federated_map(prepare, server_state)))
# The clients all independently do local work and produce updates, plus the
# optional client-side outputs.
client_updates = tff.federated_map(work, [client_data, client_input])
# `client_updates` is a two-tuple, whose first index should be aggregated
# with TFF's `federated_aggregate` and whose second index should be passed
# to TFF's `federated_secure_sum_bitwidth`. The updates are aggregated
# across the system into a single global update at the server.
simple_agg = (
tff.federated_aggregate(client_updates[0], zero(), accumulate, merge,
report))
secure_agg = tff.secure_sum(client_updates[1], bitwidth())
global_update = [simple_agg, secure_agg]
# Finally, the server produces a new state as well as server-side output to
# emit from this round.
new_server_state, server_output = (
tff.federated_map(update, [server_state, global_update]))
# The updated server state, server- and client-side outputs are returned as
# results of this round.
return new_server_state, server_output
```
The above characterization of `next` forms the relationship between
`MapReduceForm` and `tff.templates.IterativeProcess`. It depends on the seven
pieces of pure TensorFlow logic defined as follows. Please also consult the
documentation for related federated operators for more detail (particularly
the `tff.federated_aggregate()`, as several of the components below correspond
directly to the parameters of that operator).
* `prepare` represents the preparatory steps taken by the server to generate
inputs that will be broadcast to the clients and that, together with the
client data, will drive the client-side work in this round. It takes the
initial state of the server, and produces the input for use by the clients.
Its type signature is `(S -> C)`.
* `work` represents the totality of client-side processing, again all as a
single section of TensorFlow code. It takes a tuple of client data and
client input that was broadcasted by the server, and returns a two-tuple
containing the client update to be aggregated (across all the clients). The
first index of this two-tuple will be passed to an aggregation parameterized
by the blocks of TensorFlow below (`zero`, `accumulate`, `merge`, and
`report`), and the second index will be passed to
`federated_secure_sum_bitwidth`. Its type signature is `(<D,C> -> <U,V>)`.
* `bitwidth` is the TensorFlow computation that produces an integer specifying
the bitwidth for inputs to secure sum. `bitwidth` will be used by the system
to compute appropriate parameters for the secure sum protocol. Exactly how
this computation is performed is left to the runtime implementation of
`federated_secure_sum_bitwidth`.
* `zero` is the TensorFlow computation that produces the initial state of
accumulators that are used to combine updates collected from subsets of the
client population. In some systems, all accumulation may happen at the
server, but for scalability reasons, it is often desirable to structure
aggregation in multiple tiers. Its type signature is `A`, or when
represented as a `tff.Computation` in Python, `( -> A)`.
* | `accumulate` is the TensorFlow computation that updates the state of an
update accumulator (initialized with `zero` above) with a single client's
update. Its type signature is `(<A,U> -> A)`. Typically, a | single acumulator
would be used to combine the updates from multiple clients, but this does
not have to be the case (it's up to the target deployment platform to choose
how to use this logic in a particular deployment scenario).
* `merge` is the TensorFlow computation that merges two accumulators holding
the results of aggregation over two disjoint subsets of clients. Its type
signature is `(<A,A> -> A)`.
* `report` is the TensorFlow computation that transforms the state of the
top-most accumulator (after accumulating updates from all clients and
merging all the resulting accumulators into a single one at the top level
of the system hierarchy) into the final result of aggregation. Its type
signature is `(A -> R)`.
* `update` is the TensorFlow computation that applies the aggregate of all
clients' updates (the output of `report`), also referred to above as the
global update, to the server state, to produce a new server state to feed
into the next round, and that additionally outputs a server-side output,
to be reported externally as one of the results of this round. In federated
learning scenarios, the server-side outputs might include things like loss
and accuracy metrics, and the server state to be carried over, as noted
above, may include the model weights to be trained further in a subsequent
round. The type signature of this computation is `(<S,R> -> <S,X>)`.
The above TensorFlow computations' type signatures involves the following
abstract types in addition to those defined earlier:
* `C` is the type of the inputs for the clients, to be supplied by the server
at the beginning of each round (or an empty tuple if not needed).
* `U` is the type of the per-client update to be produced in each round and
fed into the cross-client federated aggregation protocol.
* `V` is the type of the per-client update to be produced in each round and
fed into the cross-client secure aggregation protocol.
* `A` is the type |
ergy - (-2.855160426155)) < 1.e-5
def test_load_wfn_low_h2o():
fn_wfn = context.get_fn('test/h2o_sto3g.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'H2O Optimization'
assert numbers.shape == (3,)
assert (numbers == np.array([8, 1, 1])).all()
assert coordinates.shape == (3, 3)
assert (coordinates[0] == [-4.44734101, 3.39697999, 0.00000000]).all()
assert (coordinates[1] == [-2.58401495, 3.55136194, 0.00000000]).all()
assert (coordinates[2] == [-4.92380519, 5.20496220, 0.00000000]).all()
assert centers.shape == (21,)
assert (centers[:15] == np.zeros(15, int)).all()
assert (centers[15:] == np.array([1, 1, 1, 2, 2, 2])).all()
assert type_assignment.shape == (21,)
assert (type_assignment[:6] == np.ones(6)).all()
assert (type_assignment[6:15] == np.array([2, 2, 2, 3, 3, 3, 4, 4, 4])).all()
assert (type_assignment[15:] == np.ones(6)).all()
assert exponents.shape == (21,)
assert (exponents[:3] == [0.1307093E+03, 0.2380887E+02, 0.6443608E+01]).all()
assert (exponents[5:8] == [0.3803890E+00, 0.5033151E+01, 0.1169596E+01]).all()
assert (exponents[13:16] == [0.1169596E+01, 0.3803890E+00, 0.3425251E+01]).all()
assert exponents[-1] == 0.1688554E+00
assert mo_count.shape == (5,)
assert (mo_count == [1, 2, 3, 4, 5]).all()
assert occ_num.shape == (5,)
assert np.sum(occ_num) == 10.0
assert (occ_num == [2.0, 2.0, 2.0, 2.0, 2.0]).all()
assert mo_energy.shape == (5,)
assert (mo_energy == np.sort(mo_energy)).all()
assert (mo_energy[:3] == [-20.251576, -1.257549, -0.593857]).all()
assert (mo_energy[3:] == [-0.459729, -0.392617]).all()
assert coefficients.shape == (21, 5)
expected = [0.42273517E+01, -0.99395832E+00, 0.19183487E-11, 0.44235381E+00, -0.57941668E-14]
assert (coefficients[0] == expected).all()
assert coefficients[6, 2] == 0.83831599E+00
assert coefficients[10, 3] == 0.65034846E+00
assert coefficients[17, 1] == 0.12988055E-01
assert coefficients[-1, 0] == -0.46610858E-03
assert coeffic | ients[-1, -1] == -0.33277355E-15
assert abs(energy - (-74.965901217080)) < 1.e-6
def test_get_permutation_orbital():
assert (get_permutation_orbital(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([1, 1, 2, 3, 4])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_orbital(np.array([2, 3, 4])) == [0, 1, 2]).all()
assert (get_permuta | tion_orbital(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assign = np.array([1, 1, 2, 2, 3, 3, 4, 4, 1])
expect = [0, 1, 2, 4, 6, 3, 5, 7, 8]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 2, 3, 4, 5, 6, 7]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10])
expect = [0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10])
expect = [0, 1, 3, 5, 2, 4, 6, 7, 8, 9, 10, 11, 12]
assert (get_permutation_orbital(assign) == expect).all()
# f orbitals
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(10)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# h orbitals
assert (get_permutation_orbital(np.arange(36, 57)) == range(21)).all()
assign = np.array([1, 1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(12)).all()
assign = np.array([2, 3, 4, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1, 1])
assert (get_permutation_orbital(assign) == range(15)).all()
def test_get_permutation_basis():
assert (get_permutation_basis(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_basis(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assert (get_permutation_basis(np.array([1, 2, 3, 4, 1])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_basis(np.array([5, 6, 7, 8, 9, 10])) == [0, 3, 4, 1, 5, 2]).all()
assign = np.repeat([5, 6, 7, 8, 9, 10], 2)
expect = [0, 6, 8, 2, 10, 4, 1, 7, 9, 3, 11, 5]
assert (get_permutation_basis(assign) == expect).all()
assert (get_permutation_basis(np.arange(1, 11)) == [0, 1, 2, 3, 4, 7, 8, 5, 9, 6]).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 4, 5, 2, 6, 3, 7]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
expect = [0, 4, 5, 3, 9, 6, 1, 8, 7, 2]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 2, 3, 3, 4, 4])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 13, 15, 12, 14, 16]
assert (get_permutation_basis(assign) == expect).all()
assign = [1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expect = np.array([0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 12, 13, 14, 17, 18, 15, 19, 16])
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == np.arange(21)[::-1]).all()
assign = [23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21]
expect = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == range(21)[::-1]).all()
def test_get_mask():
assert (get_mask(np.array([2, 3, 4])) == [True, False, False]).all()
expected = [True, True, False, False, True, True, False, False]
assert (get_mask(np.array([1, 2, 3, 4, 1, 2, 3, 4])) == expected).all()
expected = [True, False, False, False, False, False]
assert (get_mask(np.array([5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, True, True, False, False, False, False, False]
assert (get_mask(np.array([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, False, False, False, False, False, False, False]
assert (get_mask(np.arange(11, 21)) == expected).all()
assert (get_mask(np.array([21, 24, 25])) == [True, False, False]).all()
assert (get_mask(np.array([11, 21, 36, 1])) == [True, True, True, True]).all()
def check_load_wfn(name):
# system out of *.wfn file
mol1 = IOData.from_file(context.get_fn('test/%s.wfn' % name))
# system out of *.fchk file
mol2 = IOData.from_file(context.get_fn('test/%s.fchk' % name))
# Coordinates check:
assert (abs(mol1.coordinates - mol2.coordinates) < 1e-6).all()
# Numbers check
numbers1 = mol1.numbers
numbers2 = mol2.numbers
assert (numbers1 == numbers2).all()
# Basis Set check:
obasis1 = mol1.obasis
obasis2 = mol2.obasis
assert obasis1.nbasis == obasis2.nbasis
assert (obasis1.shell_map == obasis2.shell_map).all()
assert (obasis1.shell_types == obasis2.shell_types).all()
assert (obasis1.nprims == obasis2.nprims).all()
assert (abs(obasis1.alphas - obasis2.alphas) < 1.e-4).all()
# Comparing MOs (*.wfn might not contain virtual orbitals):
n_mo = mol1.orb_alpha.nfn
assert (abs(mol1.orb_alpha.energies - mol2.orb_alpha.energies[:n_mo]) < 1.e-5).all()
assert (mol1.orb_alpha.occupations == mol2.orb_alpha.occupations[:n_mo]).all()
assert (abs(mol1.orb_alpha.coeffs - mol2.orb_alpha.coeffs[:, :n_mo]) < 1.e-7).all()
# Check overlap
olp1 = obasis1.compute_overlap()
olp2 = obasis2.compute_overlap |
# -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
--------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as plt
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` t | o the dictionary.
nrn_params = {'V_m': -70.6,
| 'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_u_bar_minus': 10.0,
'tau_u_bar_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[ 20.0, 120.0, 220.0, 320.0, 420.0], # noqa
[ 20.0, 70.0, 120.0, 170.0, 220.0], # noqa
[ 20.0, 53.3, 86.7, 120.0, 153.3], # noqa
[ 20.0, 45.0, 70.0, 95.0, 120.0], # noqa
[ 20.0, 40.0, 60.0, 80.0, 100.0], # noqa
# Presynaptic spike after the postsynaptic
[120.0, 220.0, 320.0, 420.0, 520.0, 620.0], # noqa
[ 70.0, 120.0, 170.0, 220.0, 270.0, 320.0], # noqa
[ 53.3, 86.6, 120.0, 153.3, 186.6, 220.0], # noqa
[ 45.0, 70.0, 95.0, 120.0, 145.0, 170.0], # noqa
[ 40.0, 60.0, 80.0, 100.0, 120.0, 140.0]] # noqa
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[ 10.0, 110.0, 210.0, 310.0, 410.0], # noqa
[ 10.0, 60.0, 110.0, 160.0, 210.0], # noqa
[ 10.0, 43.3, 76.7, 110.0, 143.3], # noqa
[ 10.0, 35.0, 60.0, 85.0, 110.0], # noqa
[ 10.0, 30.0, 50.0, 70.0, 90.0], # noqa
[130.0, 230.0, 330.0, 430.0, 530.0, 630.0], # noqa
[ 80.0, 130.0, 180.0, 230.0, 280.0, 330.0], # noqa
[ 63.3, 96.6, 130.0, 163.3, 196.6, 230.0], # noqa
[ 55.0, 80.0, 105.0, 130.0, 155.0, 180.0], # noqa
[ 50.0, 70.0, 90.0, 110.0, 130.0, 150.0]] # noqa
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for s_t_pre, s_t_post in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.resolution = resolution
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", {"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", {"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder')
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr})
syn_dict = {"synapse_model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
weights = wr.get("events", "weights")
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0 * 15.0 * (syn_weights - init_w) / init_w + 100.0
# Plot results
fig, ax = plt.subplots(1, sharex=False)
ax.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
ax.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
ax.set_ylabel("normalized weight change")
ax.set_xlabel("rho (Hz)")
ax.legend()
ax.set_title("synaptic weight")
plt.show()
|
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.sprite import Sprite
import pyglet
import random
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
self.sprite = Sprite('grossini.png')
self.sprite.position = x/2, y/2
self.add( self.sprite )
self.schedule( self.change_x )
self.schedule_interval( | self.change_y, 1 )
def change_x(self, dt):
self.sprite.x = random.random()*director.get_window_size()[0]
def change_y(self, dt):
self.sprite.y = random.random()*director.get_window_size()[1]
if __name__ == "__main__":
director.init()
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (te | st_layer)
director.run (main_scene)
|
import tornado.web
import json
from tornado_cors import CorsMixin
from common import ParameterFormat, EnumEncoder
class DefaultRequestHandler(CorsMixin, tornado.web.RequestHandler):
CORS_ORIGIN = '*'
def initialize(self):
self.default_format = self.get_argument("format", "json", True)
self.show_about = self.get_argument("show_about", True, True)
self.pg_version = self.get_argument("pg_version", 9.6, True)
self.version = "2.0 beta"
def write_about_stuff(self, format_type="alter_system"):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
self.write("{} Generated by PGConfig {}\n".format(default_comment,
self.version))
self.write("{} http://pgconfig.org\n\n".format(default_comment * 2))
def write_comment(self, format_type, comment):
default_comment = "--"
if format_type == "conf":
default_comment = "#"
if comment != "NONE":
self.write("\n{} {}\n".format(default_comment, comment))
def write_config(self, output_data):
if self.show_about is True:
self.write_about_ | stuff("conf")
for category in output_data:
self.write("# {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
value_format = parameter.get("format", ParameterFormat.NONE)
if value_format in (ParameterFormat.String,
ParameterFormat.Time):
| config_value = "'{}'".format(config_value)
parameter_comment = parameter.get("comment", "NONE")
if parameter_comment != "NONE":
self.write_comment("conf", parameter_comment)
self.write("{} = {}\n".format(parameter["name"], config_value))
self.write("\n")
def write_alter_system(self, output_data):
if float(self.pg_version) <= 9.3:
self.write("-- ALTER SYSTEM format it's only supported on version 9.4 and higher. Use 'conf' format instead.")
else:
if self.show_about is True:
self.write_about_stuff()
for category in output_data:
self.write("-- {}\n".format(category["description"]))
for parameter in category["parameters"]:
config_value = parameter.get("config_value", "NI")
parameter_comment = parameter.get("comment", "NONE")
self.write_comment("alter_system", parameter_comment)
self.write("ALTER SYSTEM SET {} TO '{}';\n".format(parameter[
"name"], config_value))
self.write("\n")
def write_plain(self, message=list()):
if len(message) == 1:
self.write(message[0])
else:
for line in message:
self.write(line + '\n')
def write_bash(self, message=list()):
bash_script = """
#!/bin/bash
"""
self.write(bash_script)
if len(message) == 1:
self.write('SQL_QUERY="{}"\n'.format(message[0]))
self.write('psql -c "${SQL_QUERY}"\n')
else:
for line in message:
self.write('SQL_QUERY="{}"\n'.format(line))
self.write('psql -c "${SQL_QUERY}"\n\n')
def write_json_api(self, message):
self.set_header('Content-Type', 'application/vnd.api+json')
_document = {}
_document["data"] = message
_meta = {}
_meta["copyright"] = "PGConfig API"
_meta["version"] = self.version
_meta["arguments"] = self.request.arguments
_document["meta"] = _meta
_document["jsonapi"] = {"version": "1.0"}
full_url = self.request.protocol + "://" + self.request.host + self.request.uri
_document["links"] = {"self": full_url}
self.write(
json.dumps(
_document,
sort_keys=True,
separators=(',', ': '),
cls=EnumEncoder))
def write_json(self, message=list()):
self.set_header('Content-Type', 'application/json')
if len(message) == 1:
self.write("{ \"output\": \"" + message[0] + "\"}")
else:
new_output = "{ \"output\": ["
first_line = True
for line in message:
if not first_line:
new_output += ","
else:
first_line = False
new_output += "\"{}\"".format(line)
new_output += "] } "
self.write(new_output)
def return_output(self, message=list()):
# default_format=self.get_argument("format", "json", True)
# converting string input into a list (for solve issue with multiline strings)
process_data = []
if not isinstance(message, list):
process_data.insert(0, message)
else:
process_data = message
if self.default_format == "json":
self.write_json_api(message)
elif self.default_format == "bash":
self.write_bash(message)
elif self.default_format == "conf":
self.write_config(message)
elif self.default_format == "alter_system":
self.write_alter_system(message)
else:
self.write_plain(message)
class GeneratorRequestHandler(DefaultRequestHandler):
pass
|
(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(5, QtGui.QFormLayout.LabelRole, spacerItem1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(8, QtGui.QFormLayout.LabelRole, spacerItem2)
spacerItem3 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(11, QtGui.QFormLayout.LabelRole, spacerItem3)
spacerItem4 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(2, QtGui.QFormLayout.LabelRole, spacerItem4)
spacerItem5 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(4, QtGui.QFormLayout.LabelRole, spacerItem5)
spacerItem6 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(7, QtGui.QFormLayout.LabelRole, spacerItem6)
spacerItem7 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.formLayout.setItem(10, QtGui.QFormLayout.LabelRole, spacerItem7)
self.label_6 = QtGui.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.label_6)
self.label_7 = QtGui.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.label_7)
self.label_8 = QtGui.QLabel(self.formLayoutWidget)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.label_8)
self.label_9 = QtGui.QLabel(self.formLayoutWidget)
self.label_9.setObjectName("label_9")
self.formLayout.setWidget(9, QtGui.QFormLayout.FieldRole, self.label_9)
self.label_10 = QtGui.QLabel(self.formLayoutWidget)
self.label_10.setObjectName("label_10")
self.formLayout.setWidget(12, QtGui.QFormLayout.FieldRole, self.label_10)
self.gridLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(240, 390, 561, 161))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_2 = QtGui.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setOb | jectName("gridLayout_2")
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem8, 1, 2, 1, 1)
self.pushButton_24 = QtGui.QPushButton(self.gridLayoutWidget_2)
self.pushButton_24.setObjectName("pushButton_24")
self.gridLayout_2.addWidget(self.pushButton_24, 1, 1, 1, 1)
self.pushButton_25 = QtGui.QPushButton(self.gr | idLayoutWidget_2)
self.pushButton_25.setObjectName("pushButton_25")
self.gridLayout_2.addWidget(self.pushButton_25, 1, 3, 1, 1)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem9, 1, 4, 1, 1)
spacerItem10 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem10, 1, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(240, 0, 561, 111))
self.label_11.setObjectName("label_11")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 808, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton_25, QtCore.SIGNAL("clicked()"), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_5.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_1.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_9.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_6.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_10.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_15.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_4.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_11.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_12.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_7.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_3.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_13.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_8.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_14.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_2.setText(QtGui.QApplication.translate("MainWindow", "PushButton", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "offen:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "korrket:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "falsch:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "gesamt:", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Spiele:", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("MainWindow", "offenAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("MainWindow", "korrektAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("MainWindow", "falschAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("MainWindow", "gesamtAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("MainWindow", "spieleAnzahl", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_24.setText(QtGui.QApplication.translate("MainWindow", "Neu", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton_25.setText(QtGui.QApplication.translate("MainWindow", "Ende", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("MainWindow", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">Drücken Sie die Buttons in aufsteigender Reihenfolge</span></p></body></html>", Non |
# ----------------------------------------------------------------------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
# Edson de Lima Barros 1715310043
# Tiago Ferreira Aranha | 1715310047
# Vitor Simôes Azevedo 1715310025
# Roberta de Oliveira da Cruz 0825070169
# Uriel Brito Barros 151512 | 0558
#
# 16. Faça um procedimento que recebe, por parâmetro,
# 2 vetores de 10 elementos inteiros e que calcule e retorne,
# também por parâmetro, o vetor intersecção dos dois primeiros.
from lista08.ipc import vetor
vetor1 = vetor.cria_vetor(10)
vetor2 = vetor.cria_vetor(10)
vetor_interseccao = vetor.vetor_interseccao(vetor1, vetor2)
print(vetor_interseccao)
|
import mcpi.minecraft as minecraft
import mcpi.block as Block
import serial
import time
# The location where redstone torch needs to spawn.
a0 = (-112, 0, 62) # <- YOU MUST SET THIS VALUE (x,y,z)
"""
Helper method: get_pin(pin)
Returns whether the minecraft pin is turned on or off (based on redstone torch type)
Block(76, 1) -> Redstone Toch ON
Block(75, 1) -> Redstone Toch OFF
"""
def get_pin(pin):
block = mc.getBlockWithData(pin)
print(block)
if block.id == 76:
return 1
elif block.id == 75:
return 0
else:
return -1
if __name__ == "__main__":
# My espruino was COM23, and I had to use value 22 here.
port = 22;
old_val = 0
ser = serial.Serial(port, timeout=1) # open first serial port
print ser.portstr # check which port was really used
# Create mc object.
mc = minecraft.Minecraft.create()
# Main loop
try:
while True:
# Read the minecraft pin
cur_val = get_pin(a0)
if cur_val != old_val:
| # write the result to the LED1 on Espruino
if int(cur_val):
# turn LED on
ser.write("digitalWrite(LED1, 1)\n")
else:
# turn LED off
ser.write("digitalWrite(LED1, 0)\n")
| old_val = cur_val
time.sleep(.5) # small sleep
except KeyboardInterrupt:
print("stopped")
ser.close()
|
#!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud command line tool."""
import time
START_TIME = time.time()
# pylint:disable=g-bad-import-order
# pylint:disable=g-import-not-at-top, We want to get the start time first.
import os
import signal
import sys
from googlecloudsdk.calliope import backend
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import cli
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.updater import local_state
from googlecloudsdk.core.updater import update_manager
from googlecloudsdk.core.util import platforms
import surface
# Disable stack traces when people kill a command.
def CTRLCHandler(unused_signal, unused_frame):
"""Custom SIGNINT handler.
Signal handler that doesn't print the stack trace when a command is
killed by keyboard interupt.
"""
try:
log.err.Print('\n\nCommand killed by keyboard interrupt\n')
except NameError:
sys.stderr.write('\n\nCommand killed by keyboard interrupt\n')
# Kill ourselves with SIGINT so our parent can detect that we exited because
# of a signal. SIG_DFL disables further KeyboardInterrupt exceptions.
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.kill(os.getpid(), signal.SIGINT)
# Just in case the kill failed ...
sys.exit(1)
signal.signal(signal.SIGINT, CTRLCHandler)
# Enable normal UNIX handling of SIGPIPE to play nice with grep -q, head, etc.
# See https://mail.python.org/pipermail/python-list/2004-June/273297.html and
# http://utcc.utoronto.ca/~cks/space/blog/python/SignalExceptionSurprise
# for more details.
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _DoStartupChecks():
if not platforms.PythonVersion().IsCompatible():
sys.exit(1)
_DoStartupChecks( | )
if not config.Paths().sdk_root:
# Don't do update checks if there is no install root.
properties.VALUES.component_manager.disable_update_check.Set(True)
def UpdateCheck(command_path, **unused_kwargs):
try:
update_manager.UpdateManager.PerformUpdateCheck(command_path=co | mmand_path)
# pylint:disable=broad-except, We never want this to escape, ever. Only
# messages printed should reach the user.
except Exception:
log.debug('Failed to perform update check.', exc_info=True)
def CreateCLI(surfaces):
"""Generates the gcloud CLI from 'surface' folder with extra surfaces.
Args:
surfaces: list(tuple(dot_path, dir_path)), extra commands or subsurfaces
to add, where dot_path is calliope command path and dir_path
path to command group or command.
Returns:
calliope cli object.
"""
def VersionFunc():
generated_cli.Execute(['version'])
pkg_root = os.path.dirname(os.path.dirname(surface.__file__))
loader = cli.CLILoader(
name='gcloud',
command_root_directory=os.path.join(pkg_root, 'surface'),
allow_non_existing_modules=True,
version_func=VersionFunc)
loader.AddReleaseTrack(base.ReleaseTrack.ALPHA,
os.path.join(pkg_root, 'surface', 'alpha'),
component='alpha')
loader.AddReleaseTrack(base.ReleaseTrack.BETA,
os.path.join(pkg_root, 'surface', 'beta'),
component='beta')
for dot_path, dir_path in surfaces:
loader.AddModule(dot_path, dir_path, component=None)
# Check for updates on shutdown but not for any of the updater commands.
loader.RegisterPostRunHook(UpdateCheck,
exclude_commands=r'gcloud\.components\..*')
generated_cli = loader.Generate()
return generated_cli
def _PrintSuggestedAction(err, err_string):
"""Print the best action for the user to take, given the error."""
if (isinstance(err, backend.CommandLoadFailure) and
type(err.root_exception) is ImportError):
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
else:
log.error('gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__),
err_string))
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
def main(gcloud_cli=None):
metrics.Started(START_TIME)
# TODO(user): Put a real version number here
metrics.Executions(
'gcloud',
local_state.InstallationState.VersionForInstalledComponent('core'))
if gcloud_cli is None:
gcloud_cli = CreateCLI([])
try:
gcloud_cli.Execute()
except Exception as err: # pylint:disable=broad-except
# We want this to be parsable by `gcloud feedback`, so we print the
# stacktrace with a nice recognizable string
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
_PrintSuggestedAction(err, gcloud_cli.SafeExceptionToString(err))
if properties.VALUES.core.print_unhandled_tracebacks.GetBool():
# We want to see the traceback as normally handled by Python
raise
else:
# This is the case for most non-Cloud SDK developers. They shouldn't see
# the full stack trace, but just the nice "gcloud crashed" message.
sys.exit(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
CTRLCHandler(None, None)
|
DEBUG = False
USERNAME = 'hikaru'
CHANNEL = 'random'
VOCAB = {
'RANDOM': ['random', ':troll:', ':trollface:'],
'PASS': ['pass', 'skip'],
'RESIGN': ['resign', 'give up'],
'VOTE': ['vote', 'move', 'play'],
'VOTES': ['votes', 'moves', 'voted', 'chance'],
'CAPTURES': ['captures'],
'SHOW': ['show', 'board'],
'YES': ['yes', 'yeah', 'ya', 'y', 'ja', 'please', 'ok', 'yep'],
'NO': ['no', 'nope', 'n', 'nee', "don't", 'cancel'],
}
RESPONSES = {
'RESIGN_CONFIRMATION': [
'Are you sure you want to resign?',
'Sure?',
],
'RESIGN_CANCELLED': [
| 'Ok.',
'Resignation cancelled.',
],
'UNKNOWN': [
"I don't know.",
'What do you mean?',
"That doesn't make any sense.",
"I'm just a bot.",
],
}
# How often to play moves. See `man crontab` for format information.
if DEBUG:
CRON = '*/2 * * * *' # | Every two minutes.
else:
CRON = '0 9-18 * * 1-5' # Hourly between 9:00 and 18:00 on weekdays.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licen | ses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Transformer model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.nlp.transformer import model_params
from official.transformer.v2 import transformer
class TransformerV2Test(tf.test.TestCase):
def setUp(self):
self.params = params = model_params.TINY_PARAMS
params["batch_size"] = params["default_batch_size"] = 16
params["use_synthetic_data"] = True
params["hidden_size"] = 12
params["num_hidden_layers"] = 2
params["filter_size"] = 14
params["num_heads"] = 2
params["vocab_size"] = 41
params["extra_decode_length"] = 2
params["beam_size"] = 3
params["dtype"] = tf.float32
def test_create_model_train(self):
model = transformer.create_model(self.params, True)
inputs, outputs = model.inputs, model.outputs
self.assertEqual(len(inputs), 2)
self.assertEqual(len(outputs), 1)
self.assertEqual(inputs[0].shape.as_list(), [None, None])
self.assertEqual(inputs[0].dtype, tf.int64)
self.assertEqual(inputs[1].shape.as_list(), [None, None])
self.assertEqual(inputs[1].dtype, tf.int64)
self.assertEqual(outputs[0].shape.as_list(), [None, None, 41])
self.assertEqual(outputs[0].dtype, tf.float32)
def test_create_model_not_train(self):
model = transformer.create_model(self.params, False)
inputs, outputs = model.inputs, model.outputs
self.assertEqual(len(inputs), 1)
self.assertEqual(len(outputs), 2)
self.assertEqual(inputs[0].shape.as_list(), [None, None])
self.assertEqual(inputs[0].dtype, tf.int64)
self.assertEqual(outputs[0].shape.as_list(), [None, None])
self.assertEqual(outputs[0].dtype, tf.int32)
self.assertEqual(outputs[1].shape.as_list(), [None])
self.assertEqual(outputs[1].dtype, tf.float32)
if __name__ == "__main__":
tf.compat.v1.enable_v2_behavior()
tf.test.main()
|
from . import mne # noqa
from .m | ne.spect | ral import TFRmorlet # noqa
|
# Copyright (C) 2010-2017 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snf_django.management.commands import SynnefoCommand
from synnefo.db.models import Backend
from synnefo.logic import backend as backend_mod
from synnefo.d | b import transaction
HELP_MSG = """Query Ganeti backends and update the status of backend in DB.
This command updates:
* the list of the enabled disk-templates
* the available resources (disk, memory, CPUs)
"""
class Command(SynnefoCommand):
help = HELP_MSG
@transaction.atomic
def handle(self, **options):
for backend in Backend.objects.select_for_update()\
.filter(offline=False):
backend_mod.update_backend_disk_ | templates(backend)
backend_mod.update_backend_resources(backend)
self.stdout.write("Successfully updated backend '%s'\n" % backend)
|
"""Defines all search scopes used in this project."""
from os import path
ROOT_PATH = path.abspath('/')
class TreeSearchScope:
"""Encapsulation of a search scope to search up the tree."""
def __init__(self,
from_folder=ROOT_PATH,
to_folder=ROOT_PATH):
"""Initialize the search scope."""
self.from_folder = from_folder
self.to_folder = to_folder
@property
def from_folder(self):
"""Get the starting folder."""
return self._from_folder
@from_folder.setter
def from_folder(self, folder):
"""Set the last folder in search."""
self._from_folder = folder
self._current_folder = self._from_folder
@property
def to_folder(self):
"""Get the end of search folder."""
return self._to_folder
@to_folder.setter
def to_folder(self, folder):
"""Set the last folder in search."""
self._to_folder = folder
self._one_past_last = path.dirname(self._to_folder)
def __bool__(self):
"""Check if the search scope is empty."""
return self.from_folder != ROOT_PATH
def __iter__(self):
"""Make this an iterator."""
self._current_folder = self._from_folder
return self
def __next__(self):
"""Get next folder to search in."""
current_folder = self._current_folder
self._current_folder = path.dirname(self._current_folder)
scope_end_reached = current_folder == self._one_past_last
root_reached = current_folder == self._current_folder
if root_reached or scope_end_reached:
raise StopIteration
else:
return current_folder
def __repr__(self):
"""Return search scope as a printable string."""
return 'SearchScope: from_folder: {}, to_folder: {}'.format(
self._from_folder, self._to_folder)
class ListSearchScope:
"""Encapsulation of a search scope to search in a list."""
def __init__(self, paths=[]):
"""Initialize the search scope."""
self.folders = paths
@property
def folders(self):
"""Get the starting folder."""
return self._folders
@folders.setter
def folders(self, paths):
"""Set the folders."""
self._folders = [f for f in paths | if path.isdir(f)]
self._iter = iter(self._folders)
def __bool__(self):
"""Check if the search scope is not empty."""
return len(self._folders) > 0
def __iter__(self):
"""Make this an iterator."""
self._iter = iter(self._folders)
return self._iter
def __next__(self):
"""Get next folder to search in."""
return next(self._iter)
def __repr__(self):
"""Return search scope as a printable string.""" |
return 'SearchScope: folders: {}'.format(self._folders)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_stack
----------------------------------
Tests for `python_algorithms.stack` module.
"""
import unittest
from python_algorithms.basic.stack import Stack
class TestStack(unittest.TestCase):
def setUp(self):
self.empty_stack = Stack()
self.stack = Stack()
self.seq = [0, 2, 4, 6, 8]
for x in self.seq:
self.stack.push(x)
def test_push_to_empty_stack(self):
self.empty_stack.push(0)
self.assertEqual(self.empty_stack.peek(), 0)
def test_push_to_stack(self):
self.stack.push(10)
self.assertEqual(self.stack.peek(), 10)
def test_pop_from_empty_stack(self):
self.assertRaises(IndexError, self.empty_stack.pop)
def test_pop_from_stack(self):
self.assertEqual(self.stack.pop(), self.seq[-1])
def test_size_of_empty_stack(self):
self.assertEqual(self.empty_stack.size, 0)
def test_size_of_stack(self):
self.assertEqual(self.stack.size, len(self.seq))
def test_peek_at_empty_stack(self):
self.assertRaises(IndexError, self.empty_stack.peek)
def test_peek_at_stack(self):
self.assertEqual(self.stack.peek(), self.seq[-1])
def test_iterate_empty_stack(self):
for curr in self.empty_stack:
self.assertEqual(False, True)
def test_iterate_stack(self):
iter_seq = []
fo | r curr in self.stack:
| iter_seq.append(curr)
iter_seq.reverse()
self.assertEqual(iter_seq, self.seq)
def test_str_empty_stack(self):
self.assertEqual(str(self.empty_stack), "")
def test_str_stack(self):
self.assertEqual(str(self.stack), " ".join([str(x) for x in self.seq]))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
checks")
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
if check_type == "checks":
for item in result:
assert any(
qc in item.qualitycheck_set.values_list("name", flat=True)
for qc
in check_data)
assert(
list(result)
== list(
qs.filter(
qualitycheck__false_positive=False,
qualitycheck__name__in=check_data).distinct()))
else:
for item in result:
item.qualitycheck_set.values_list("category", flat=True)
assert(
list(result)
== list(
qs.filter(
qualitycheck__false_positive=False,
qualitycheck__category=check_data).distinct()))
def _test_units_contribution_filter(qs, user, unit_filter):
result = UnitContributionFilter(qs, user=user).filter(unit_filter)
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
user_subs_overwritten = [
"my_submissions_overwritten",
"user_submissions_overwritten"]
if unit_filter == "suggestions":
assert (
result.count()
== qs.filter(
suggestion__state__name="pending").distinct().count())
return
elif not user:
assert result.count() == 0
return
elif unit_filter in ["my_suggestions", "user_suggestions"]:
expected = qs.filter(
suggestion__state__name="pending",
suggestion__user=user).distinct()
elif unit_filter == "user_suggestions_accepted":
expected = qs.filter(
suggestion__state__name="accepted",
suggestion__user=user).distinct()
elif unit_filter == "user_suggestions_rejected":
expected = qs.filter(
suggestion__state__name="rejected",
suggestion__user=user).distinct()
elif unit_filter in ["my_submissions", "user_submissions"]:
expected = qs.filter(submitted_by=user)
elif unit_filter in user_subs_overwritten:
# lets calc this long hand
# first submissions that have been added with no suggestion
user_edit_subs = Submission.objects.filter(
type__in=SubmissionTypes.EDIT_TYPES).filter(
suggestion__isnull=True).filter(
submitter=user).values_list("unit_id", flat=True)
# next the suggestions that are accepted and the user is this user
user_suggestions = Suggestion.objects.filter(
state__name="accepted",
user=user).values_list("unit_id", flat=True)
expected = qs.filter(
id__in=(
set(user_edit_subs)
| set(user_suggestions))).exclude(submitted_by=user)
assert (
list(expected.order_by("pk"))
== list(result.order_by("pk")))
def _test_unit_text_search(qs, text, sfields, exact, empty=True):
unit_search = UnitTextSearch(qs)
result = unit_search.search(text, sfields, exact).order_by("pk")
words = unit_search.get_words(text, exact)
fields = unit_search.get_search_fields(sfields)
# ensure result meets our expectation
assert (
list(result)
== _expected_text_search_results(qs, words, fields))
# ensure that there are no dupes in result qs
assert list(result) == list(result.distinct())
if not empty:
assert result.count()
for item in result:
# item is in original qs
assert item in qs
for word in words:
searchword_found = False
for field in fields:
if word.lower() in getattr(item, field).lower():
# one of the items attrs matches search
searchword_found = True
break
assert searchword_found
def _test_units_state_filter(qs, unit_filter):
result = UnitStateFilter(qs).filter(unit_filter)
for item in result:
assert item in qs
assert result.count() == result.distinct().count()
if unit_filter == "all":
assert list(result) == list(qs)
return
elif unit_filter == "translated":
states = [TRANSLATED]
elif unit_filter == "untranslated":
states = [UNTRANSLATED]
elif unit_filter == "fuzzy":
states = [FUZZY]
elif unit_filter == "incomplete":
states = [UNTRANSLATED, FUZZY]
assert all(
state in states
for state
in result.values_list("state", flat=True))
assert (
qs.filter(state__in=states).count()
== result.count())
@pytest.mark.django_db
def test_get_units_text_search(units_text_searches):
search = units_text_searches
sfields = search["sfields"]
fields = _expected_text_search_fields(sfields)
words = _expected_text_search_words(search['text'], search["exact"])
# ensure the fields parser works correctly
assert (
UnitTextSearch(Unit.objects.all()).get_search_fields(sfields)
== fields)
# ensure the text tokeniser works correctly
assert (
UnitTextSearch(Unit.objects.all()).get_words(
search['text'], search["exact"])
== words)
assert isinstance(words, list)
# run the all units test first and check its not empty if it shouldnt be
_test_unit_text_search(
Unit.objects.all(),
search["text"], search["sfields"], search["exact"],
search["empty"])
for qs in [Unit.objects.none(), Unit.objects.live()]:
# run tests against different qs
_test_unit_text_search(
qs, search["text"], search["sfields"], search["exact"])
@pytest.mark.django_db
def test_units_contribution_filter_none(units_contributor_searches):
unit_filter = units_contributor_searches
user = None
qs = Unit.objects.all()
if not hasattr(UnitContributionFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitContributionFilter(qs, user=user).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_contribution_filter(_qs, user, unit_filter)
@pytest.mark.django_db
def test_units_contribution_filter(units_contributor_searches, site_users):
unit_filter = units_contributor_searches
user = site_users["user"]
qs = Unit.objects.all()
if not hasattr(UnitContributionFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitContributionFilter(qs, user=user).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project | __project=Project.objects.first())]
for _qs in test_qs:
_test_units_contribution_filter(_qs, user, unit_filter)
@pytest.mark.django_db
def test_units_state_filter(units_state_searches):
unit_filter = units_state_searches
qs = Unit.objects.all()
if not hasattr(UnitStateFilter, "filter_%s" % unit_filter):
with pytest.raises(FilterNotFound):
UnitStateFilt | er(qs).filter(unit_filter)
return
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_state_filter(_qs, unit_filter)
@pytest.mark.django_db
def test_units_checks_filter(units_checks_searches):
check_type, check_data = units_checks_searches
qs = Unit.objects.all()
test_qs = [
qs,
qs.none(),
qs.filter(
store__translation_project__project=Project.objects.first())]
for _qs in test_qs:
_test_units_checks_filter(_qs, check_type, check_data)
@pytest.mark.django_db
def test_units_checks_filter_bad():
qs = Unit.objects.all()
with pytest.raises(FilterNotFound):
UnitChecksFilter(qs).filter("BAD")
# if you dont supply check/category you get empty qs
assert not UnitChecksFilter(qs). |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from django.conf import settings
from polycommon.options.exceptions import OptionException
from polycommon.options.feature import Feature
from polycommon.options.option import NAMESPACE_DB_OPTION_MARKER, OptionStores
class DummyFeature(Feature):
pass
class TestFeature(TestCase):
def test_feature_default_store(self):
assert DummyFeature.store == OptionStores(settings.STORE_OPTION)
def test_feature_marker(self):
assert DummyFeature.get_marker() == NAMESPACE_DB_OPTION_MARKER
def test_parse_key_wtong_namespace(self):
DummyFeature.key = "FOO"
with self.assertRaises(OptionException):
DummyFeature.parse_key()
DummyFeature.key = "FOO:BAR"
with self.assertRaises(OptionException):
DummyFeature.parse_key()
def test_parse_key_without_namespace(self):
DummyFeature.key = "FEATURES:FOO"
as | sert DummyFeature.parse_key() == (None, "FOO")
de | f test_parse_key_with_namespace(self):
DummyFeature.key = "FEATURES:FOO:BAR"
assert DummyFeature.parse_key() == ("FOO", "BAR")
|
import sqlalchemy as sa
from sqlalchemy_utils.functions.sort_query import make_order_by_deterministic
from tests import assert_contains, TestCase
class TestMakeOrderByDeterministic(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode)
email = sa.Column(sa.Unicode, unique=True)
email_lower = sa.orm.column_property(
sa.func.lower(name)
)
class Article(self.Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
User.article_count = sa.orm.column_property(
sa.select([sa.func.count()], from_obj=Article)
.where(Article.author_id == User.id)
.label('article_count')
)
self.User = User
self.Article = Article
def test_column_property(self):
query = self.session.query(self.User).order_by(self.User.email_lower)
query = make_order_by_deterministic(query)
assert_contains('lower("user".name), "user".id ASC', query)
def test_unique_column(self):
query = self.session.query(self.User).order_by(self.User.email)
query = make_order_by_deterministic(query)
assert str(query).endswith('ORDER BY "user".email')
def test_non_unique_column(self):
query = self.session.query(self.User).order_by(self.User.name)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name, "user".id ASC', query)
def test_descending_order_by(self):
query = self.session.query(self.User).order_by(
sa.desc(self.User.name)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name DESC, "user".id DESC', query)
def test_ascending_order_by(self):
query = self.session.query(self.User).order_by(
sa.asc(self.User.name)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY "user".name ASC, "user".id | ASC', query)
def test_string_order_by(self):
query = self.session.query(self.U | ser).order_by('name')
query = make_order_by_deterministic(query)
assert_contains('ORDER BY name, "user".id ASC', query)
def test_annotated_label(self):
query = self.session.query(self.User).order_by(self.User.article_count)
query = make_order_by_deterministic(query)
assert_contains('article_count, "user".id ASC', query)
def test_annotated_label_with_descending_order(self):
query = self.session.query(self.User).order_by(
sa.desc(self.User.article_count)
)
query = make_order_by_deterministic(query)
assert_contains('ORDER BY article_count DESC, "user".id DESC', query)
def test_query_without_order_by(self):
query = self.session.query(self.User)
query = make_order_by_deterministic(query)
assert 'ORDER BY "user".id' in str(query)
def test_alias(self):
alias = sa.orm.aliased(self.User.__table__)
query = self.session.query(alias).order_by(alias.c.name)
query = make_order_by_deterministic(query)
assert str(query).endswith('ORDER BY user_1.name, "user".id ASC')
|
from gooddataclient.dataset import Dataset
from gooddataclient.columns import ConnectionPoint, Label, Reference
class Employee(Dataset):
employee = ConnectionPoint(title='Employee', folder='Employee')
firstname = Label(title='First Name', reference='employee', folder='Employee')
lastname = Label(title='Last Name', reference='employee', folder='Employee')
department = Reference(title='Department', reference='department', schemaReference='Department', folder='Employee')
class Meta(Dataset.Meta):
column_order = ('employee', 'firstname', 'lastname', 'department')
def data(self):
return [{'employee': 'e1', 'lastname': 'Nowmer', 'department': 'd1', 'firstname': 'Sheri'},
{'employee': 'e2', 'lastname': 'Whelply', 'department': 'd1', 'firstname': 'Derrick'},
{'employee': 'e6', 'lastname': 'Damstra', 'department': 'd2', 'firstname': 'Roberta'},
{'employee': 'e7', 'lastname': 'Kanagaki', 'department': 'd3', 'firstname': 'Rebecca'},
{'employee': 'e8', 'lastname': 'Brunner', 'department': 'd11', 'firstname': 'Kim'},
{'employee': 'e9', 'lastname': 'Blumberg', 'department': 'd11', 'firstname': 'Brenda'},
{'employee': 'e10', 'lastname': 'Stanz', 'department': 'd5', 'firstname': 'Darren'},
{'employee': 'e11', 'lastname': 'Murraiin', 'department': 'd11', 'firstname': 'Jonathan'},
{'employee': 'e12', 'lastname': 'Creek', 'department': 'd11', 'firstname': 'Jewel'},
{'employee': 'e13', 'lastname': 'Medina', 'department': 'd11', 'firstname': 'Peggy'},
{'employee': 'e14', 'lastname': 'Rutledge', 'department': 'd11', 'firstname': 'Bryan'},
{'employee': 'e15', 'lastname': 'Cavestany', 'department': 'd11', 'firstname': 'Walter'},
{'employee': 'e16', 'lastname': 'Planck', 'department': 'd11', 'firstname': 'Peggy'},
{'employee': 'e17', 'lastname': 'Marshall', 'department': 'd11', 'firstname': 'Brenda'},
{'employee': 'e18', 'lastname': 'Wolter', 'department': 'd11', 'firstname': 'Daniel'},
{'employee': 'e19', 'lastname': 'Collins', 'department': 'd11', 'firstname': 'Dianne'}
]
maql = """
# THIS IS MAQL SCRIPT THAT GENERATES PROJECT LOGICAL MODEL.
# SEE THE MAQL DOCUMENTATION AT http://developer.gooddata.com/api/maql-ddl.html FOR MORE DETAILS
# CREATE DATASET. DATASET GROUPS ALL FOLLOWING LOGICAL MODEL ELEMENTS TOGETHER.
CREATE DATASET {dataset.employee} VISUAL(TITLE "Employee");
# CREATE THE FOLDERS THAT GROUP ATTRIBUTES AND FACTS
CREATE FOLDER {dim.employee} VISUAL(TITLE "Employee") TYPE ATTRIBUTE;
# CREATE ATTRIBUTES.
# ATTRIBUTES ARE CATEGORIES THAT ARE USED FOR SLICING AND DICING THE NUMBERS (FACTS)
CREATE ATTRIBUTE {attr.employee.employee} VISUAL(TITLE "Employee", FOLDER {dim.employee}) AS KEYS {f_employee.id} FULLSET;
ALTER DATASET {dataset.employee} ADD {attr.employee.employee};
# CREATE FACTS
# FACTS ARE NUMBERS THAT ARE AGGREGATED BY ATTRIBUTES.
# CREATE DATE FACTS
# DATES ARE REPRESENTED AS FACTS
# DATES ARE ALSO CONNECTED TO THE DATE DIMENSIONS
# CREATE REFERENCES
# REFERENCES CONNECT THE DATASET TO OTHER DATASETS
# CONNECT THE REFERENCE TO THE APPROPRIATE DIMENSION
ALTER ATTRIBUTE {attr.department.department} ADD KEYS {f_employee.department_id};
# ADD LABELS TO ATTRIBUTES
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee.firstname} VISUAL(TITLE "First Name") AS {f_employee.nm_firstname};
ALTER ATTRIBUTE {attr.employee.employee} DEFAULT LABEL {label.employee.employee.firstname};
# ADD LABELS TO ATTRIBUTES
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee.lastname} VISUAL(TITLE "Last Name") AS {f_employee.nm_lastname};
ALTER ATTRIBUTE {attr.employee.employee} ADD LABELS {label.employee.employee} VISUAL(TITLE "Employee") AS {f_employee.nm_employee};
# SYNCHRONIZE THE STORAGE AND DATA LOADING INTERFACES WITH THE NEW LOGICAL MODEL
SYNCHRONIZE {dataset.employee};
"""
schema_xml = '''
<schema>
<name>Employee</name>
<columns>
<column>
<name>employee</name>
<title>Employee</title>
<ldmType>CONNECTION_POINT</ldmType>
<folder>Employee</folder>
</column>
<column>
<name>firstname</name>
<title>First Name</title>
<ldmType>LABEL</ldmType>
<reference>employee</reference>
<folder>Employee</folder>
</column>
<column>
<name>lastname</name>
<title>Last Name</title>
<ldmType>LABEL</ldmType>
<reference>employee</reference>
<folder>Employee</folder>
</column>
<column>
<name>department</name>
<title>Department</title>
<ldmType>REFERENCE</ldmType>
<reference>department</reference>
<schemaReference>Department</schemaReference>
<folde | r>Employee</folder>
</column>
</columns>
</schema>
'''
data_csv = '''"employee","firstname","lastname","department"
"e1","Sheri","Nowmer","d1"
"e2","Derrick","Whelply", | "d1"
"e6","Roberta","Damstra","d2"
"e7","Rebecca","Kanagaki","d3"
"e8","Kim","Brunner","d11"
"e9","Brenda","Blumberg","d11"
"e10","Darren","Stanz","d5"
"e11","Jonathan","Murraiin","d11"
"e12","Jewel","Creek","d11"
"e13","Peggy","Medina","d11"
"e14","Bryan","Rutledge","d11"
"e15","Walter","Cavestany","d11"
"e16","Peggy","Planck","d11"
"e17","Brenda","Marshall","d11"
"e18","Daniel","Wolter","d11"
"e19","Dianne","Collins","d11"
'''
sli_manifest = {"dataSetSLIManifest": {
"parts": [
{
"columnName": "employee",
"mode": "FULL",
"populates": ["label.employee.employee"],
"referenceKey": 1
},
{
"columnName": "firstname",
"mode": "FULL",
"populates": ["label.employee.employee.firstname"]
},
{
"columnName": "lastname",
"mode": "FULL",
"populates": ["label.employee.employee.lastname"]
},
{
"columnName": "department",
"mode": "FULL",
"populates": ["label.department.department"],
"referenceKey": 1
}
],
"file": "data.csv",
"dataSet": "dataset.employee",
"csvParams": {
"quoteChar": "\"",
"escapeChar": "\"",
"separatorChar": ",",
"endOfLine": "\n"
}
}}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A layer that produces a dense `Tensor` based on given `feature_columns`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from keras.feature_column import base_feature_layer as kfc
from keras.feature_column import dense_features
from keras.utils import tf_contextlib
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.DenseFeatures', v1=[])
class DenseFeatures(dense_features.DenseFeatures):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
This is the V2 version of this layer that uses name_scopes to create
variables instead of variable_scopes. But this approach currently lacks
support for partitioned variables. In that case, use the V1 version instead.
Example:
```python
price = tf.feature_column.numeric_column('price')
keywords_embedded = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_hash_bucket("keywords", 10000),
dimens | ions=16)
columns = [price, keywords_embedded, ...]
feature_layer = tf.keras.layers.DenseFeatures(columns)
features = tf.io.parse_example(
..., features=tf.feature_column.make_pa | rse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
**kwargs):
"""Creates a DenseFeatures object.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
**kwargs)
self._state_manager = _StateManagerImplV2(self, self.trainable)
def build(self, _):
for column in self._feature_columns:
with tf.name_scope(column.name):
column.create_state(self._state_manager)
# We would like to call Layer.build and not _DenseFeaturesHelper.build.
# pylint: disable=protected-access
super(kfc._BaseFeaturesLayer, self).build(None) # pylint: disable=bad-super-call
class _StateManagerImplV2(tf.__internal__.feature_column.StateManager): # pylint: disable=protected-access
"""Manages the state of DenseFeatures."""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
# We explicitly track these variables since `name` is not guaranteed to be
# unique and disable manual tracking that the add_weight call does.
with no_manual_dependency_tracking_scope(self._layer):
var = self._layer.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource)
if isinstance(var, tf.__internal__.tracking.Trackable):
self._layer._track_trackable(var, feature_column.name + '/' + name) # pylint: disable=protected-access
self._cols_to_vars_map[feature_column][name] = var
return var
@tf_contextlib.contextmanager
def no_manual_dependency_tracking_scope(obj):
"""A context that disables manual dependency tracking for the given `obj`.
Sometimes library methods might track objects on their own and we might want
to disable that and do the tracking on our own. One can then use this context
manager to disable the tracking the library method does and do your own
tracking.
For example:
class TestLayer(tf.keras.Layer):
def build():
with no_manual_dependency_tracking_scope(self):
var = self.add_weight("name1") # Creates a var and doesn't track it
self._track_trackable("name2", var) # We track variable with name `name2`
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies manually.
"""
# pylint: disable=protected-access
previous_value = getattr(obj, '_manual_tracking', True)
obj._manual_tracking = False
try:
yield
finally:
obj._manual_tracking = previous_value
|
o insert concentration is
given -> returns mass of insert needed
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_ng
from autoprotocol.unit import Unit
plasmid_size = 3000
plasmid_mass = Unit(100, 'ng')
insert_size = 48
ligation_insert_ng(plasmid_size, plasmid_mass, insert_size)
Returns:
.. code-block:: python
Unit(1.6, 'nanogram')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
insert_size: int
Length of insert in bp
plasmid_mass : str, Unit
Mass of plasmid in prefix-g
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector. By default it is 1 : 1.
Generally ligations are tested at 1:3, 1:1, and 3:1
Returns
-------
insert_amount: Unit
Amount of insert solution needed in ng
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
"""
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if type(molar_ratio) == str:
molar_ratio = float(
molar_ratio.split(":")[0]) / float(molar_ratio.split(":")[1])
if type(molar_ratio) not in (int, float):
raise ValueError(
"molar_ratio: must be an int, float, or string in the form "
"of int:int")
if isinstance(plasmid_mass, str):
plasmid_mass = Unit.fromstring(plasmid_mass)
if not (isinstance(plasmid_mass, Unit) and
str(plasmid_mass.dimensionality) == "[mass]"):
raise ValueError(
"Plasmid amount must be of type str or Unit in prefix-g")
length_ratio = float(insert_size) / float(plasmid_size)
plasmid_ng = plasmid_mass.to("ng")
insert_ng = plasmid_ng * length_ratio * molar_ratio
return insert_ng
def ligation_insert_volume(plasmid_size, plasmid_mass, insert_size,
insert_conc, ds=True, molar_ratio=1):
"""
For the plasmid size, plasmid amount, insert size, insert concentration,
and molar ratio given, return the volume of insert solution needed for
ligation
Different from ligation_insert_ng: insert concentration is given -> returns
volume of insert solution needed
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_volume
from autoprotocol_utilities import molar_to_mass_conc
from autoprotocol.unit import Unit
plasmid_size = 3000
plasmid_mass = Unit(100, 'ng')
insert_size = 48
insert_conc = Unit(25, 'ng/uL')
ligation_insert_volume(plasmid_size, plasmid_mass, insert_size,
insert_conc)
Returns:
.. code-block:: python
Unit(0.064, 'microliter')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
plasmid_mass : str, Unit
Mass of plasmid in prefix-g
insert_size: int
Length of insert in bp
insert_conc: str, Unit
Molar or | mass concentration of insert
ds: bool, optional
True for dsDNA, False for ssDNA
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector.
Common ratios are 1:3, 1:1, and 3:1. 1:1 by default
Returns
-------
insert_amount: Unit
Volume of insert solution needed in uL
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
" | ""
conc_dimension = ["[substance] / [length] ** 3", '[mass] / [length] ** 3']
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if isinstance(plasmid_mass, str):
plasmid_mass = Unit.fromstring(plasmid_mass)
if not isinstance(plasmid_mass, Unit) and \
str(plasmid_mass.dimensionality) == "[mass]":
raise ValueError(
"Plasmid mass must be of type str or Unit in prefix-g")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if isinstance(insert_conc, str):
insert_conc = Unit.fromstring(insert_conc)
if not (isinstance(insert_conc, Unit) and
str(insert_conc.dimensionality) in conc_dimension):
raise ValueError(
"Plasmid concentration must be of type Unit in prefix-M or "
"prefix-g / prefix-L ")
if not isinstance(ds, bool):
raise ValueError(
"ds is of type %s, must be of type bool: True for dsDNA, "
"False for ssDNA" % type(ds))
if type(molar_ratio) == str:
molar_ratio = float(
molar_ratio.split(":")[0]) / float(molar_ratio.split(":")[1])
if type(molar_ratio) not in (int, float):
raise ValueError(
"molar_ratio: must be an int, float, or string in the "
"form of int:int")
len_ratio = float(insert_size) / float(plasmid_size)
plasmid_ng = plasmid_mass.to("ng")
insert_ng = plasmid_ng * len_ratio * molar_ratio
# Convert concentration to ng/uL
if str(insert_conc.dimensionality) == conc_dimension[0]:
insert_conc = molar_to_mass_conc(insert_size, insert_conc, ds)
else:
insert_conc = insert_conc.to("ng/uL")
insert_vol = insert_ng / insert_conc
return insert_vol
def ligation_insert_amount(plasmid_size, plasmid_conc, plasmid_volume,
insert_size, insert_conc, ds=True, molar_ratio=1):
"""
For the plasmid size, plasmid concentration, insert size, insert
concentration, and molar ratio given,
return the volume of insert solution needed for ligation
Different form ligation_insert_volume: plasmid concentration and volume
are given instead of plasmid mass
Example Usage:
.. code-block:: python
from autoprotocol_utilities import ligation_insert_amount
from autoprotocol_utilities import molar_to_mass_conc
from autoprotocol.unit import Unit
plasmid_size = 2000
plasmid_conc = '1.5:uM'
plasmid_volume = Unit(10, 'uL')
insert_size = 25
insert_conc = Unit(10, 'ng/uL')
ligation_insert_amount(plasmid_size, plasmid_conc, plasmid_volume,
insert_size, insert_conc)
Returns:
.. code-block:: python
Unit(24.75, 'microliter')
Parameters
----------
plasmid_size : int
Length of plasmid in bp.
plasmid_conc : str, Unit
Molar or mass concentration of plasmid solution
plasmid_volume: str, Unit
Volume of plasmid solution in prefix-L
insert_size: int
Length of insert in bp
insert_conc : str, Unit
Molar or mass concentration of insert solution
ds: bool, optional
True for dsDNA, False for ssDNA
molar_ratio : int, float, string, optional
Ligation molar ratio of insert : vector.
Common ratios are 1:3, 1:1, and 3:1. 1:1 by default
Returns
-------
insert_amount: Unit
Volume of insert solution in uL
Raises
------
ValueError
If wells are not of type list, WellGroup or Container
"""
# Check input types
if not isinstance(plasmid_size, int):
raise ValueError("Plasmid_size: must be an integer")
if not isinstance(insert_size, int):
raise ValueError("insert_size: must be an integer")
if isinstance(plasmid_volume, str):
plasmid_volume = Unit.fromstring(plasmid_volume)
if not isinstance(plasmid_volume, Unit) or \
str(plasmid_volume.dimensionality) != "[length] ** 3":
raise ValueError(
"Volume of plasmid solution must be of type str or Unit")
conc_dimension = ["[substance] / [length] ** 3", '[mass] / [length] ** 3']
conc = [plasmid_conc, insert_conc]
size = [plasmid_size, insert_size]
for i in range(0, 2):
if isinstance |
"""
Revision ID: 0356_add_webautn_auth_type
Revises: 0355_add_webauthn_table
Create Date: 2021-05-13 12:42:45.190269
"""
from alembic import op |
revision = '0356_add_webautn_auth_type'
down_revision = '0355_add_webauthn_table'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO auth_type VALUES ('webauthn_auth')")
op.drop_constraint('ck_users_mobile_or_email_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_user_has_mobile_or_other_auth"
CHECK (auth_type | in ('email_auth', 'webauthn_auth') or mobile_number is not null)
NOT VALID
""")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.execute("UPDATE invited_users SET auth_type = 'sms_auth' WHERE auth_type = 'webauthn_auth'")
op.drop_constraint('ck_user_has_mobile_or_other_auth', 'users', type_=None, schema=None)
op.execute("""
ALTER TABLE users ADD CONSTRAINT "ck_users_mobile_or_email_auth"
CHECK (auth_type = 'email_auth' or mobile_number is not null)
NOT VALID
""")
op.execute("DELETE FROM auth_type WHERE name = 'webauthn_auth'")
# ### end Alembic commands ###
|
import fileinput
import argparse
from astexport import __version__, __prog_name__
from astexport.parse import parse
from astexport.export import export_json
def create_parser():
parser = argparse.ArgumentParser(
prog=__prog_name__,
description="Python source code in, JSON AST out. (v{})".format(
__version__
)
)
parser.add_argument(
"-i", "--input",
default="-",
help="file to read from or '-' to use standard input (default)"
)
parser.add_argument(
"-p", "--pretty",
action="store_true", |
help="print indented JSON"
)
parser.add_argument(
"-v", "--version",
action="store_true",
help="print version and exit"
)
return parser
def main():
"""Read source from stdin, parse and export the AST as JSON"""
parser = create_parser()
args = parser.parse_args()
if args.version:
print("{} version {}".format(__prog_name__, __version__))
return
source = "".join(fileinput. | input(args.input))
tree = parse(source)
json = export_json(tree, args.pretty)
print(json)
|
from copy import deepcopy
from distutils.spawn import find_executable
class Settings(object):
_upload_limit = 0
def __init__(self, settings=None):
if settings:
self._upload_limit = settings.up_kbytes_sec
@property
def upload_limit(self):
""" Returns the value as required by the trickle command (i.e. in KBytes) """
return self._upload_limit
def upload_limit_in_kbytes(self, upload_limit):
self._upload_limit = upload_limit if upload_limit is not None else 0
def to_argument_list(self):
"""
converts the setting in a list as required by the trickle command
"""
return ["-u", self._upload_limit] if self._upload_limit != 0 else []
class TrickleBwShaper(object):
_trickle_cmd = "trickle"
"""
Helper class to handle trickle (http://linux.die.net/man/1/trickle) usage
"""
def __init__(self, settings):
self._settings = deepcopy(settings)
self._trickle_cmd = find_executable("trickle")
if self._trickle_cmd is None:
raise RuntimeError(" | Couldn't find 'trickle' program")
def wrap_call(self, call_cmd):
"""
"wraps" the call_cmd so it can be execut | ed by subprocess.call (and related flavors) as "args" argument
:param call_cmd: original args like argument (string or sequence)
:return: a sequence with the original command "executed" under trickle
"""
if isinstance(call_cmd, basestring): # FIXME python 3 unsafe
call_cmd = [call_cmd]
return [self._trickle_cmd, "-s"] + self._settings.to_argument_list() + list(call_cmd)
|
#!/usr/bin/env python
"""
simple example script for running notebooks and reporting exceptions.
Usage: `checkipnb.py foo.ipynb [bar.ipynb [...]]`
Each cell is submitted to the kernel, and checked for error | s.
"""
import os
import glob
from runipy.notebook_runner import NotebookRunner
from pyfolio.utils import pyfolio_root
from pyfolio.ipycompat import read as read_notebook
def test_nbs():
path = os.path.join(pyfolio_root(), 'examples', '*.ipynb')
for ipynb in glob.glob(path):
with open(ipynb) as f:
nb = read_notebook(f, 'json')
nb_runner = NotebookRunner(nb)
nb_runner.run | _notebook(skip_exceptions=False)
|
ertEqual(value, context.exception.value)
def test_value_unmatched_by_all_match_statements(self):
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=2)
with self.assertRaises(rail.UnmatchedValueError) as context:
match = rail.match_length(
(rail.eq(8), lambda _: unittest.mock.Mock()),
(rail.gt(3), lambda _: unittest.mock.Mock())
)
match(value)
self.assertEqual(value, context.exception.value)
def test_value_matches_single_match_statement(self):
expected_value = unittest.mock.Mock()
match = rail.match_length(
(rail.lt(0), lambda _: unittest.mock.Mock()),
(rail.eq(0), lambda _: expected_value),
(rail.gt(0), lambda _: unittest.mock.Mock())
)
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=0)
self.assertEqual(expected_value, match(value))
def test_value_matches_multiple_match_statements(self):
expected_value = unittest.mock.Mock()
match = rail.match_length(
(rail.lt(0), lambda _: unittest.mock.Mock()),
(rail.ge(0), lambda _: expected_value),
(rail.eq(0), lambda _: unittest.mock.Mock())
)
value = unittest.mock.Mock()
value.__len__ = unittest.mock.Mock(return_value=0)
self.assertEqual(expected_value, match(value))
class TestPartial(unittest.TestCase):
def test_func_with_no_args(self):
@rail.partial
def func():
return 'value'
self.assertEqual('value', func())
def test_func_with_single_arg(self):
@rail.partial
def func(arg):
return arg
value = unittest.mock.Mock()
self.assertEqual(value, func(value))
def test_func_with_multiple_args(self):
@rail.partial
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual((val1, val2, val3), func(val1, val2, val3))
self.assertEqual((val1, val2, val3), func(val1)(val2, val3))
self.assertEqual((val1, val2, val3), func(val1, val2)(val3))
self.assertEqual((val1, val2, val3), func(val1)(val2)(val3))
def test_func_with_arguments_applied_out_of_order(self):
@rail.partial
def func(arg1, arg2, arg3):
return arg1, arg2, arg3
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
self.assertEqual((val1, val2, val3), func(arg2=val2)(val1, val3))
self.assertEqual((val1, val2, val3), func(arg3=val3)(val1, val2))
self.assertEqual(
(val1, val2, val3), func(arg2=val2, arg3=val3)(val1)
)
self.assertEqual(
(val1, val2, val3), func(arg3=val3)(arg2=val2)(val1)
)
self.assertEqual((val1, val2, val3), func(val1, arg3=val3)(val2))
def test_func_with_default_arguments(self):
@rail.partial
def func(arg1, arg2, arg3='val3', arg4='val4'):
return arg1, arg2, arg3, arg4
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2, 'val3', 'val4'), func(val1, val2))
self.assertEqual((val1, val2, 'val3', 'val4'), func(val1)(val2))
self.assertEqual(
(val1, val2, val3, val4), func(val1, val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1)(val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1, arg3=val3)(val2, val4)
)
def test_func_with_default_arguments_only(self):
@rail.partial
def func(arg1='val1', arg2='val2'):
return arg1, arg2
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual(('val1', 'val2'), func())
self.assertEqual((val1, 'val2'), func(val1))
self.assertEqual(('val1', val2), func(arg2=val2))
self.assertEqual((val1, val2), func(val1, val2))
def test_func_with_argument_list(self):
@rail.partial
def func(arg1, arg2, *args):
return (arg1, arg2) + args
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2), func(val1, val2))
self.assertEqual((val1, val2), func(val1)(val2))
self.assertEqual(
(val1, val2, val3, val4), func(val1, val2, val3, val4)
)
self.assertEqual(
(val1, val2, val3, val4), func(val1)(val2, val3, val4)
)
def test_func_with_argument_list_only(self):
@rail.partial
def func(*args):
return args
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual((), func())
self.assertEqual((val1,), func(val1))
self.assertEqual((val1, val2), func(val1, val2))
def test_func_with_keyword_arguments(self):
@rail.partial
def func(arg1, arg2, **kwargs):
return (arg1, arg2) + ((kwargs,) if kwargs else ())
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
val3 = unittest.mock.Mock()
val4 = unittest.mock.Mock()
self.assertEqual((val1, val2), func(val1, val2))
self.assertEqual((val1, val2), func(val1)(val2))
self.assertEqual(
(val1, val2, {'val3': val3, 'val4': val4}),
func(val1, val2, val3=val3, val4=val4)
)
self.assertEqual(
(val1, val2, {'val3': val3, 'val4': val4}),
func(val1, val3=val3)(val2, val4=val4)
)
def test_func_with_keyword_arguments_only(self):
@rail.partial
def func(**kwargs):
return kwargs
val1 = unittest.mock.Mock()
val2 = unittest.mock.Mock()
self.assertEqual({}, func())
self.assertEqual({'arg1': val1}, func(arg1=val1))
self.assertEqual(
{'arg1': val1, 'arg2': val2}, func(arg1=val1, | arg2=val2)
)
def test_docstring_preserved(self):
@rail.partial
def func1(arg1, arg2):
"""Docstring for func"""
| return arg1, arg2
self.assertEqual('Docstring for func', func1.__doc__)
func2 = func1(unittest.mock.Mock())
self.assertEqual('Docstring for func', func2.__doc__)
class TestCompose(unittest.TestCase):
def test_compose_with_no_funcs(self):
func = rail.compose()
value = unittest.mock.Mock()
self.assertEqual(value, func(value))
def test_compose_with_no_exception(self):
expected_value = unittest.mock.Mock()
func = rail.compose(
lambda value: expected_value
)
self.assertEqual(expected_value, func(unittest.mock.Mock()))
def test_compose_with_exception(self):
with self.assertRaises(ValueError) as context:
func = rail.compose(
lambda value: rail.raise_(ValueError('exception'))
)
func(unittest.mock.Mock())
self.assertEqual('exception', str(context.exception))
def test_compose_with_multiple_funcs(self):
return_value1 = unittest.mock.Mock()
return_value2 = unittest.mock.Mock()
return_value3 = unittest.mock.Mock()
func1 = unittest.mock.Mock(return_value=return_value1)
func2 = unittest.mock.Mock(return_value=return_value2)
func3 = unittest.mock.Mock(return_value=return_value3)
func = rail.compose(
func1,
func2,
func3
)
value = unittest.mock.Mock()
self.assertEqual(return_value3, func(value))
func1.assert_called_once_with(value)
func2.assert_called_once_with(return_value1)
func3.assert_called_once_with(return_value2)
class TestPipe(unittest.TestCase):
def test_pipe(self):
|
ad = 1.
# th | is statement will be i | gnored at the codegen
x = ad is None
|
import json
from django.utils import unittest
from django.test.client import RequestFactory
from formalizr.tests.views import SimpleFormView, SimpleCreateView, SimpleUpdateView
from formalizr.tests.models import SimpleModel
class AjaxFormViewTest(unittest.TestCase):
view_class = SimpleFormView
VALUE = 1
def setUp(self):
self.factory = RequestFactory()
SimpleModel.objects.all().delete()
def testRequest(self):
"""
Posts valid form in normal way
"""
data = {"value": AjaxFormViewTest.VALUE}
request = self.factory.post('/', data)
response = self.view_class.as_view()(request)
self.assertEqual(302, response.status_code)
self.assertEqual(self.view_class.success_url, response["location"])
def testNotValid(self):
"""
Posts not valid form in normal way
"""
data = {}
request = self.factory.post('/', data)
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertIn("value", response.context_data["form"].errors)
def testAjaxRequest(self):
"""
Posts valid form through AJAX request.
Response with redirect must be in JSON.
"""
data = {"value": AjaxFormViewTest.VALUE}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("redirect", resp["status"])
self.assertEqual(self.view_class.success_url, resp["location"])
return resp
def testAjaxNotValid(self):
"""
Posts not valid form through AJAX request.
Response with errors must be in JSON.
"""
data = {}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(400, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("error", resp["status"])
self.assertIn("value", resp["errors"])
return resp
def testAjaxResultRequest(self):
"""
Posts valid form through AJAX request.
Response with result must be in JSON.
"""
data = {"value": AjaxFormViewTes | t.VALUE, "_return": "result"}
request = self.factory.post('/', data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = self.view_class.as_view()(request)
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['content-type'].split(';')[0])
resp = json.loads(response.content)
self.assertEqual("success", resp["sta | tus"])
return resp
class AjaxCreateViewTest(AjaxFormViewTest):
view_class = SimpleCreateView
def testRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
super(AjaxCreateViewTest, self).testRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
def testAjaxRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
super(AjaxCreateViewTest, self).testAjaxRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
def testAjaxResultRequest(self):
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 0)
resp = super(AjaxCreateViewTest, self).testAjaxResultRequest()
self.assertEqual(SimpleModel.objects.filter(value=AjaxFormViewTest.VALUE).count(), 1)
self.assertIn("pk", resp["object"])
obj = SimpleModel.objects.get(pk=resp["object"]["pk"])
self.assertEqual(AjaxFormViewTest.VALUE, obj.value)
class AjaxUpdateViewTest(AjaxCreateViewTest):
view_class = SimpleUpdateView
def setUp(self):
super(AjaxUpdateViewTest, self).setUp()
SimpleModel.objects.filter(value=SimpleUpdateView.VALUE).delete()
SimpleModel(value=SimpleUpdateView.VALUE).save()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2016. 10. 11.
@author: "comfact"
'''
import yaml
from ansible.module_utils.basic import *
from acidi | py import deployACI
DOCUMENTATION = '''
---
module: acibuilder
version_added: historical
short_description: acidipy ansible module.
description:
- This is Acidipy Ansible Module named AciBuilder
options: {}
author: hyjang@cisco.com
'''
EXAMPLES = '''
# Test 'webservers' status
ansible webservers -m ping
'''
def main():
module = AnsibleModule(
argument_spe | c = dict(
Controller=dict(required=True),
Option=dict(required=True),
Tenant=dict(required=True)
),
supports_check_mode = True
)
ctrl = yaml.load(module.params['Controller'])
opts = yaml.load(module.params['Option'])
tnts = yaml.load(module.params['Tenant'])
desc = {'Controller' : ctrl, 'Option' : opts, 'Tenant' : tnts}
result = deployACI(desc)
module.exit_json(**result)
main()
|
lLayout_5.addLayout(self.horizontalLayout_21)
self.horizontalLayout_22 = QtGui.QHBoxLayout()
self.horizontalLayout_22.setObjectName(_fromUtf8("horizontalLayout_22"))
self.itemLabelTextBrowser_21 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_21.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_21.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_21.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_21.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_21.setObjectName(_fromUtf8("itemLabelTextBrowser_21"))
self.horizontalLayout_22.addWidget(self.itemLabelTextBrowser_21)
self.SendButton_21 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_21.sizePolicy().hasHeightForWidth())
self.SendButton_21.setSizePolicy(sizePolicy)
self.SendButton_21.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_21.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_21.setObjectName(_fromUtf8("SendButton_21"))
self.horizontalLayout_22.addWidget(self.SendButton_21)
self.verticalLayout_5.addLayout(self.horizontalLayout_22)
self.horizontalLayout_23 = QtGui.QHBoxLayout()
self.horizontalLayout_23.setObjectName(_fromUtf8("horizontalLayout_23"))
self.itemLabelTextBrowser_22 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_22.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_22.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_22.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_22.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_22.setObjectName(_fromUtf8("itemLabelTextBrowser_22"))
self.horizontalLayout_23.addWidget(self.itemLabelTextBrowser_22)
self.SendButton_22 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_22.sizePolicy().hasHeightForWidth())
self.SendButton_22.setSizePolicy(sizePolicy)
self.SendButton_22.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_22.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_22.setObjectName(_fromUtf8("SendButton_22"))
self.horizontalLayout_23.addWidget(self.SendButton_22)
self.verticalLayout_5.addLayout(self.horizontalLayout_23)
self.horizontalLayout_24 = QtGui.QHBoxLayout()
self.horizontalLayout_24.setObjectName(_fromUtf8("horizontalLayout_24"))
self.itemLabelTextBrowser_23 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_23.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_23.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_23.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_23.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_23.setObjectName(_fromUtf8("itemLabelTextBrowser_23"))
self.horizontalLayout_24.addWidget(self.itemLabelTextBrowser_23)
self.SendButton_23 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_23.sizePolicy().hasHeightForWidth())
self.SendButton_23.setSizePolicy(sizePolicy)
self.SendButton_23.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_23.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_23.setObjectName(_fromUtf8("SendButton_23"))
self.horizontalLayout_24.addWidget(self.SendButton_23)
self.verticalLayout_5.addLayout(self.horizontalLayout_24)
self.horizontalLayout_25 = QtGui.QHBoxLayout()
self.horizontalLayout_25.setObjectName(_fromUtf8("horizontalLayout_25"))
self.itemLabelTextBrowser_24 | = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_24.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_24.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_24.setMinimumSize(QtCore.QSize(391, 31))
| self.itemLabelTextBrowser_24.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_24.setObjectName(_fromUtf8("itemLabelTextBrowser_24"))
self.horizontalLayout_25.addWidget(self.itemLabelTextBrowser_24)
self.SendButton_24 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_24.sizePolicy().hasHeightForWidth())
self.SendButton_24.setSizePolicy(sizePolicy)
self.SendButton_24.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_24.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_24.setObjectName(_fromUtf8("SendButton_24"))
self.horizontalLayout_25.addWidget(self.SendButton_24)
self.verticalLayout_5.addLayout(self.horizontalLayout_25)
self.horizontalLayout_26 = QtGui.QHBoxLayout()
self.horizontalLayout_26.setObjectName(_fromUtf8("horizontalLayout_26"))
self.itemLabelTextBrowser_25 = QtGui.QTextBrowser(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.itemLabelTextBrowser_25.sizePolicy().hasHeightForWidth())
self.itemLabelTextBrowser_25.setSizePolicy(sizePolicy)
self.itemLabelTextBrowser_25.setMinimumSize(QtCore.QSize(391, 31))
self.itemLabelTextBrowser_25.setMaximumSize(QtCore.QSize(1234, 31))
self.itemLabelTextBrowser_25.setObjectName(_fromUtf8("itemLabelTextBrowser_25"))
self.horizontalLayout_26.addWidget(self.itemLabelTextBrowser_25)
self.SendButton_25 = QtGui.QPushButton(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SendButton_25.sizePolicy().hasHeightForWidth())
self.SendButton_25.setSizePolicy(sizePolicy)
self.SendButton_25.setMinimumSize(QtCore.QSize(73, 32))
self.SendButton_25.setMaximumSize(QtCore.QSize(73, 32))
self.SendButton_25.setObjectName(_fromUtf8("SendButton_25"))
self.horizontalLayout_26.addWidget(self.SendButton_25)
self.verticalLayout_5.addLayout(self.horizontalLayout_26)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_6.addWidget(self.scrollArea)
self.retranslateUi(GenericCommand |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from contextlib import contextmanager
from textwrap import dedent
from pants.util.dirutil import safe_file_dump, safe_rmtree
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_engine
SUBPROJ_SPEC = 'testprojects/src/python/subproject_test/'
SUBPROJ_ROOT = 'testprojects/src/python/subproject_test/subproject'
BUILD_FILES = {
'testprojects/src/python/subproject_test/BUILD':
"""
python_library(
dependencies = ['//testprojects/src/python/subproject_test/subproject/src/python:helpers'],
)
""",
'testprojects/src/python/subproject_test/subproject/BUILD':
"""
target(
name = 'local',
dependencies = [
':relative',
'//:absolute',
],
)
target(
name = 'relative',
)
target(
name = 'absolute',
)
""",
'testprojects/src/python/subproject_test/subproject/src/python/BUILD':
"""
python_library(
name = 'helpers',
dependencies = ['//src/python/helpershelpers'],
)
""",
'testprojects/src/python/subproject_test/subproject/src/python/helpershelpers/BUILD':
"""
python_library(
name = 'helpershelpers',
)
"""
}
"""
Test layout
-----------
testprojects/
src/
python/
subproject_test/
BUILD
subproject/
src/
python/
BUILD/
helpershelpers/
BUILD/
"""
@contextmanager
def harness():
try:
for name, content in BUILD_FILES.items():
safe_file_dump(name, dedent(content))
yield
finally:
safe_rmtree(SUBPROJ_SPEC)
class SubprojectIntegrationTest(PantsRunIntegrationTest):
@ensure_engine
def test_subproject_without_flag(self):
"""
Assert that when getting the dependenci | es of a project which relies
on a subproject which relies on its own internal library, a failure
occurs without the --subproject-roots option
"""
with harness():
pants_args = ['dependencies', SUBPROJ_SPEC]
self.assert_failure(self | .run_pants(pants_args))
@ensure_engine
def test_subproject_with_flag(self):
"""
Assert that when getting the dependencies of a project which relies on
a subproject which relies on its own internal library, all things
go well when that subproject is declared as a subproject
"""
with harness():
# Has dependencies below the subproject.
pants_args = ['--subproject-roots={}'.format(SUBPROJ_ROOT),
'dependencies', SUBPROJ_SPEC]
self.assert_success(self.run_pants(pants_args))
# A relative path at the root of the subproject.
pants_args = ['--subproject-roots={}'.format(SUBPROJ_ROOT),
'dependencies', '{}:local'.format(SUBPROJ_ROOT)]
self.assert_success(self.run_pants(pants_args))
|
#!/usr/bin/env python
##################################################
# Parallel MLMC: Config class #
# #
# Jun Nie #
# Last modification: 19-09-2017 #
############################## | ####################
import sys, os
import numpy as np
class Config:
"""
config class wchich is used for fvm solver, mlmc & parallelization
TODO: adding read config parameters from file.
"""
def __init__(self, config_file):
# === fvm solver parameters
self. | DIM = 2
self.ORDER = 1
self.case = 'vayu_burgers' # 'vayu_ls89', 'su2_ls89'
self.mesh_ncoarsest = 8+1
self.mesh_nfinest = 128+1
self.mesh_filename = '/home/jun/vayu/TestMatrix/Burgers.Test/mesh/' + \
'cartesian_tube_0009x0009x2.BlockMesh'
# === mlmc parameters
self.eps = 0.
self.alpha = 0.
self.beta = 0.
self.gamma = 0.
self.L = 2 # highest level
self.ML = 8 # number of samples on finest level
self.M = 2 # refinement factor
self.SAMPLES_FACTOR = 1
self.mlmc_convergence_test = True
self.READ_NUMBER_OF_SAMPLES_FROM_FILE = False
self.USE_OPTIMAL_NUMBER_OF_SAMPLES = False
self.USE_EQUIDISTRIBUTED_NUMBER_OF_SAMPLES = True
self.COMPUTE_IN_DIFFERENCE = True
# === qoi
self.STATS = 'MEAN_VAR'
# === parallelization parameters
self.multi = 'mpi' # 'mpi' for parallel, 'single' for serial
self.MULTIN = 1 # number of processes for fvm solver, 1 or multiples of 2
self.MULTIM = 4 # number of samplers (processor group)
self.MULTI_CORES = 0
# === update
self.update(config_file)
def update(self, config_file):
''' read config file and update parameters'''
pass
if __name__ == '__main__':
pass
|
########################################################################
# amara/xslt/expressions/avt.py
"""
Implementation of XSLT attribute value templates
"""
from amara.xpath import datatypes
from amara.xpath.expressions import expression
from amara.xslt import XsltError
from amara.xslt.expressions import _avt
_parse_avt = _avt.parser().parse
class avt_expression(expression):
__slots__ = ('_format', '_args')
def __init__(self, value):
try:
# parts is a list of unicode and/or parsed XPath
parts = _parse_avt(value)
except _avt.error, error:
raise XsltError(XsltError.AVT_SYNTAX)
self._args = args = []
for pos, part in enumerate(parts):
if isinstance(part, unicode):
if '%' in part:
parts[pos] = part.replace('%', '%%')
else:
parts[pos] = u'%s'
args.append(part)
self._format = u''.join(parts)
if not self._args:
# use empty format args to force '%%' replacement
self._format = datatypes.string(self._format % ())
return
def evaluate_as_string(self, context):
if not self._args:
return self._format
result = self._for | mat % tuple(arg.evaluate_as_string(context)
for arg in self._args)
return datatypes.string(result)
evaluate = evaluate_as_string
def __str__(self):
return | '{' + self._format % tuple(self._args) + '}'
|
#!/usr/bin/env python
from distutils.core i | mport setup
from pip.req import parse_requirements
install_reqs = parse_requirements("requirements.txt", session=False)
reqs = [str(ir.req) for ir in install_reqs]
setup(name='pgoapi',
version='1.0',
url='https://github.com/tejado/pgoapi',
packages=['p | goapi'],
install_requires=reqs)
|
from oracleplsqlsource import OraclePLSQLSourc | e
class OracleJavaSource(OraclePLSQLSource):
def __init__(self, name, source):
self.name = name
| #debug_message("debug: generating java source ")
OraclePLSQLSource.__init__(self,source)
|
from cupy import elementwise
_id = 'out0 = in0'
# TODO(okuta): Implement convolve
_clip = elementwise.create_ufunc(
'cupy_clip',
('???->?', 'bbb->b', 'BBB->B', 'hhh->h', 'HHH->H', 'iii->i', 'III->I',
'lll->l', 'LLL->L', 'qqq->q', 'QQQ->Q', 'eee->e', 'fff->f', 'ddd->d'),
'out0 = min(in2, max(in1, in0))')
def clip(a, a_min, a_max, out=None):
'''Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar or cupy.ndarray): The left side of the interval.
a_max (scalar or cupy.ndarray): The right side of the interval.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
| .. seealso:: :func:`numpy.clip`
'''
return _clip(a, a_min, a_max, out=out)
sqrt = elementwise.create_ufunc(
'cupy_sqrt',
# I think this order is a bug of NumPy, | though we select this "buggy"
# behavior for compatibility with NumPy.
('f->f', 'd->d', 'e->e'),
'out0 = sqrt(in0)',
doc='''Elementwise positive square-root function.
.. note::
This ufunc outputs float32 arrays for float16 arrays input by default as
well as NumPy 1.9. If you want to override this behavior, specify the
dtype argument explicitly, or use ``cupy.math.misc.sqrt_fixed`` instead.
.. seealso:: :data:`numpy.sqrt`
''')
# Fixed version of sqrt
sqrt_fixed = elementwise.create_ufunc(
'cupy_sqrt',
('e->e', 'f->f', 'd->d'),
'out0 = sqrt(in0)')
square = elementwise.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = elementwise.create_ufunc(
'cupy_absolute',
(('?->?', _id), 'b->b', ('B->B', _id), 'h->h', ('H->H', _id), 'i->i',
('I->I', _id), 'l->l', ('L->L', _id), 'q->q', ('Q->Q', _id),
('e->e', 'out0 = fabsf(in0)'),
('f->f', 'out0 = fabsf(in0)'),
('d->d', 'out0 = fabs(in0)')),
'out0 = in0 > 0 ? in0 : -in0',
doc='''Elementwise absolute value function.
.. seealso:: :data:`numpy.absolute`
''')
# TODO(beam2d): Implement it
# fabs
_unsigned_sign = 'out0 = in0 > 0'
sign = elementwise.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d'),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
_float_maximum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : max(in0, in1)'
maximum = elementwise.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum)),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''')
_float_minimum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : min(in0, in1)'
minimum = elementwise.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum)),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''')
fmax = elementwise.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = elementwise.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN apperas, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
# TODO(okuta): Implement nan_to_num
# TODO(okuta): Implement real_if_close
# TODO(okuta): Implement interp
|
""" | Configuration values.
"""
# Command paths (you can change these to e.g. absolute paths in calling code)
CMD_FLAC = "flac"
CMD_FFMPEG = "ffmpeg"
CMD_IM_MONTAGE = "montag | e"
CMD_IM_MOGRIFY = "mogrify"
CMD_IM_CONVERT = "convert"
|
# python3
class HeapBuilder:
def __init__(self):
self._swaps = []
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def GenerateSwaps(self):
# The | following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a he | ap,
# but in the worst case gives a quadratic number of swaps.
#
# TODO: replace by a more efficient implementation
for i in range(len(self._data)):
for j in range(i + 1, len(self._data)):
if self._data[i] > self._data[j]:
self._swaps.append((i, j))
self._data[i], self._data[j] = self._data[j], self._data[i]
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
|
2015-04-01 csv|xml
# cities.csv obtained from "Gestió agrupada impost 1.5%"
class MunicipalTaxesInvoicingReport:
def __init__(self, cursor, start_date, end_date, tax, aggregated):
self.cursor = cursor
self.start_date = start_date
self.end_date = end_date
self.tax = tax
self.aggregated = aggregated
pass
def by_city(self, ids, file_type):
sql = '''
SELECT
municipi.name AS name,
municipi.ine AS ine,
EXTRACT(YEAR FROM invoice.date_invoice) AS invoice_year,
EXTRACT(QUARTER FROM invoice.date_invoice) AS invoice_quarter,
COALESCE(SUM(invoice_line.price_subtotal::float*(
CASE
WHEN factura_line.tipus IN ('subtotal_xml') AND invoice.type='in_invoice' THEN 1
WHEN factura_line.tipus IN ('subtotal_xml') AND invoice.type='in_refund' THEN -1
ELSE 0
END
)),0.0) AS provider_amount,
COALESCE(SUM(invoice_line.price_subtotal::float*(
CASE
WHEN factura_line.tipus IN ('energia','reactiva','potencia') AND invoice.type='out_invoice' THEN 1
WHEN factura_line.tipus IN ('energia','reactiva','potencia') AND invoice.type='out_refund' THEN -1
ELSE 0
END
)),0.0) AS client_amount
FROM giscedata_facturacio_factura_linia AS factura_line
LEFT JOIN account_invoice_line AS invoice_line ON invoice_line.id = factura_line.invoice_line_id
LEFT JOIN giscedata_facturacio_factura AS factura ON factura.id = factura_line.factura_id
LEFT JOIN account_invoice AS invoice ON invoice.id = factura.invoice_id
LEFT JOIN giscedata_polissa AS polissa ON polissa.id = factura.polissa_id
LEFT JOIN giscedata_cups_ps AS cups ON cups.id = polissa.cups
LEFT JOIN res_municipi as municipi on municipi.id = cups.id_municipi
WHERE municipi.ID IN ({0})
AND ((invoice.date_invoice >= '{1}') AND (invoice.date_invoice < '{2}'))
AND (((invoice.type LIKE 'out_%%')
AND ((invoice.state = 'open') OR (invoice.state = 'paid')))
OR (invoice.type LIKE 'in_%%'))
GROUP BY 1,2,3,4
ORDER BY 1,2,3,4
'''.format(','.join(map(str, ids)), self.start_date, self.end_date)
self.cursor.execute(sql, {'start_date': self.start_date,
'end_date': self.end_date,
'ids': ids})
return self.build_report(self.cursor.fetchall(), file_type)
def build_report(self, records, file_type):
invoicing_by_name = {}
invoicing_by_date = {}
ines = {}
for record in records:
name = record[0]
ine = record[1]
year = record[2]
quart | er = record[3]
invoicing_by_name.setdefault(name, {'total_provider_amount': 0, 'total_client_amount': 0, 'quarters': []})
invoicing_by_name[name]['total_provider_amount'] += record[4]
invoicing_by_name[name]['total_client_amount'] += record[5]
invoicing_by_name[name]['quarters'].append({
'year': record[2],
'quarter': record[3],
'provider_am | ount': record[4],
'client_amount': record[5]
})
invoicing_by_date.setdefault(year, {})
invoicing_by_date[year].setdefault(quarter, {'total_provider_amount': 0, 'total_client_amount': 0})
invoicing_by_date[year][quarter]['total_provider_amount'] += record[4]
invoicing_by_date[year][quarter]['total_client_amount'] += record[5]
ines.setdefault(name, ine)
if file_type=='csv':
## CSV
csv_doc=StringIO.StringIO()
writer_report = csv.writer(csv_doc)
for name,v in sorted(invoicing_by_name.items()):
writer_report.writerow([name])
writer_report.writerow(['Año', 'Trimestre', 'Pagos a distribuidora', 'Facturas a clientes'])
for quarter in v['quarters']:
writer_report.writerow([
quarter['year'],
quarter['quarter'],
round(quarter['provider_amount'], 2),
round(quarter['client_amount'], 2)
])
writer_report.writerow([])
writer_report.writerow(['', '', '', '', 'Ingresos brutos', 'Tasa', 'Total'])
diff = v['total_client_amount'] - v['total_provider_amount']
writer_report.writerow(['Total',
'',
round(v['total_provider_amount'], 2),
round(v['total_client_amount'], 2),
round(diff, 2),
self.tax,
round(diff*(self.tax/100.0), 2)
])
writer_report.writerow([])
writer_report.writerow([])
writer_report.writerow(['Año', 'Trimestre', 'Pagos a distribuidora', 'Factuas a clientes', 'Ingresos',
'Tasta', 'Total'])
for year, v in sorted(invoicing_by_date.items()):
for quarter, v in sorted(invoicing_by_date[year].items()):
diff = v['total_client_amount'] - v['total_provider_amount']
writer_report.writerow([
year,
quarter,
round(v['total_provider_amount'], 2),
round(v['total_client_amount'], 2),
round(diff, 2),
self.tax,
round(diff*(self.tax/100.0), 2)
])
doc = csv_doc.getvalue()
if file_type == 'xml':
## XML
_empresa = Element("EMPRESA")
_datos = SubElement(_empresa, 'DATOS')
_nombre = SubElement(_datos, 'NOMBRE')
_nombre.text = "Som Energia SCCL"
_nif = SubElement(_datos, 'NIF')
_nif.text = "F55091367"
_municipios = SubElement(_empresa, 'MUNICIPIOS')
for name,v in sorted(invoicing_by_name.items()):
for quarter in v['quarters']:
_municipio = SubElement(_municipios, 'MUNICIPIO')
_ine = SubElement(_municipio, 'INEMUNICIPIO')
_ine.text = ines[name]
_ejercicio = SubElement(_municipio, 'EJERCICIO')
_ejercicio.text = str(int(quarter['year']))
_periodo = SubElement(_municipio, 'PERIODO')
_periodo.text = str(int(quarter['quarter']))
_fechaalta = SubElement(_municipio, 'FECHAALTA')
_fechabaja = SubElement(_municipio, 'FECHABAJA')
_tiposumin = SubElement(_municipio, 'TIPOSUMIN')
_tiposumin.text = '2'
_descsum = SubElement(_municipio, 'DESCSUM')
_descsum.text = 'Electricidad'
_descperi = SubElement(_municipio, 'DESCPERI')
_facturacion = SubElement(_municipio, 'FACTURACION')
_facturacion.text = '%0.2f' % quarter['client_amount']
_derechosacceso = SubElement(_municipio, 'DERECHOSACCESO')
_derechosacceso.text = '%0.2f' % quarter['provider_amount']
_compensacion = SubElement(_municipio, 'COMPENSACION')
_compensacion.text = '0.00'
_baseimponible = SubElement(_municipio, 'BASEIMPONIBLE')
diff = (quarter['client_amount'] - quarter['provider_amount'])
_baseimponible.text = '%0.2f' % diff
|
import android
import android.activity
from os import unlink
from jnius import autoclass, cast
from plyer_lach.facades import Camera
from plyer_lach.platforms.android import activity
Intent = autoclass('android.content.Intent')
PythonActivity = autoclass('org.renpy.android.PythonActivity')
MediaStore = autoclass('android.provider.MediaStore')
Uri = autoclass('android.net.Uri')
class AndroidCamera(Camera):
def _take_picture(self, on_complete, filename=None):
assert(on_complete is not None)
self.on_complete = on_complete
self.filename = filename
android.activity.unbind(on_activity_result=self._on_activity_result)
android.activity.bind(on_activity_result=self._on_activity_result)
intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
uri = Uri.parse('file://' + filename)
parcelable = cast('android.os.Parcelable', uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, parcelable)
activity.startActivityForResult(intent, 0x123)
def _take_video(self, on_complete, filename=None):
assert(on_complete is not None)
self.on_complete = on_complete
self.filename = filename
android.activity.unbind(on_activity_result=self._on_activity_result)
android.activity.bind(on_activity_result=self._on_activity_result)
intent = Intent(MediaStore.ACTION_VIDEO_CAPTURE)
ur | i = Uri.parse('file://' + filename)
parcelable = cast('android.os.Parcelable', uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, parcelable)
# 0 = low quality, suitable for MMS messages,
# 1 = high quality
intent.putExtra(MediaStore.EXTRA_VIDEO_QUALITY, 1)
activity.startActivityForResult(intent, 0x123)
def _on_activity_re | sult(self, requestCode, resultCode, intent):
if requestCode != 0x123:
return
android.activity.unbind(on_activity_result=self._on_activity_result)
if self.on_complete(self.filename):
self._unlink(self.filename)
def _unlink(self, fn):
try:
unlink(fn)
except:
pass
def instance():
return AndroidCamera()
|
#!/usr/bin/env python
from __future__ | import absolute_import
from __future__ import division
from | __future__ import print_function
from __future__ import unicode_literals
'fbcode_builder steps to build Facebook Thrift'
import specs.fbthrift as fbthrift
def fbcode_builder_spec(builder):
return {
'depends_on': [fbthrift],
}
config = {
'github_project': 'facebook/fbthrift',
'fbcode_builder_spec': fbcode_builder_spec,
}
|
from setuptools import setup
import pybvc
setup(
name='pybvc',
version=pybvc.__version__,
description='A python library for programming your network via the Brocade Vyatta Controller (BVC)',
long_description=open('README.rst').read(),
author='Elbrys Networks',
author_email='jeb@elbrys.com',
url='https://github.com/brcdcomm/pybvc',
packages=['pybvc',
'pybvc.common',
'pybvc.controller',
'pybvc.netconfdev',
'pybvc.netconfdev.vrouter',
'pybvc.netconfdev.vdx',
'pybvc.openflowdev'
],
install_requires=['requests>=1.0.0',
'PyYAML',
'xmltodict'],
zip_safe=False,
include_package_data=True,
platforms='any',
license='BSD',
keywords= | 'sdn nfv bvc brocade vyatta controller network vrouter',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'Topic :: System :: Networking',
'License :: OSI A | pproved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.