code
stringlengths
1
199k
import sys import click from solar.core import testing from solar.core import resource from solar.system_log import change from solar.system_log import operations from solar.system_log import data from solar.cli.uids_history import get_uid, remember_uid, SOLARUID @click.group() def changes(): pass @changes.command() def validate(): errors = resource.validate_resources() if errors: for r, error in errors: print 'ERROR: %s: %s' % (r.name, error) sys.exit(1) @changes.command() @click.option('-d', default=False, is_flag=True) def stage(d): log = list(change.stage_changes().reverse()) for item in log: click.echo(item) if d: for line in item.details: click.echo(' '*4+line) if not log: click.echo('No changes') @changes.command(name='staged-item') @click.argument('log_action') def staged_item(log_action): item = data.SL().get(log_action) if not item: click.echo('No staged changes for {}'.format(log_action)) else: click.echo(item) for line in item.details: click.echo(' '*4+line) @changes.command() def process(): uid = change.send_to_orchestration() remember_uid(uid) click.echo(uid) @changes.command() @click.argument('uid', type=SOLARUID) def commit(uid): operations.commit(uid) @changes.command() @click.option('-n', default=5) def history(n): commited = list(data.CL().collection(n)) if not commited: click.echo('No history.') return commited.reverse() click.echo(commited) @changes.command() def test(): results = testing.test_all() for name, result in results.items(): msg = '[{status}] {name} {message}' kwargs = { 'name': name, 'message': '', 'status': 'OK', } if result['status'] == 'ok': kwargs['status'] = click.style('OK', fg='green') else: kwargs['status'] = click.style('ERROR', fg='red') kwargs['message'] = result['message'] click.echo(msg.format(**kwargs)) @changes.command(name='clean-history') def clean_history(): data.CL().clean() data.CD().clean()
from __future__ import with_statement from os.path import basename, splitext import codecs from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT from robot.utils import utf8open from .jswriter import JsResultWriter, SplitLogWriter class _LogReportWriter(object): def __init__(self, js_model): self._js_model = js_model def _write_file(self, path, config, template): outfile = codecs.open(path, 'wb', encoding='UTF-8')\ if isinstance(path, basestring) else path # unit test hook with outfile: model_writer = RobotModelWriter(outfile, self._js_model, config) writer = HtmlFileWriter(outfile, model_writer) writer.write(template) class RobotModelWriter(ModelWriter): def __init__(self, output, model, config): self._output = output self._model = model self._config = config def write(self, line): JsResultWriter(self._output).write(self._model, self._config) class LogWriter(_LogReportWriter): def write(self, path, config): self._write_file(path, config, LOG) if self._js_model.split_results: self._write_split_logs(splitext(path)[0]) def _write_split_logs(self, base): for index, (keywords, strings) in enumerate(self._js_model.split_results): index += 1 # enumerate accepts start index only in Py 2.6+ self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index)) def _write_split_log(self, index, keywords, strings, path): with utf8open(path, 'wb') as outfile: writer = SplitLogWriter(outfile) writer.write(keywords, strings, index, basename(path)) class ReportWriter(_LogReportWriter): def write(self, path, config): self._write_file(path, config, REPORT)
""" Contains the logic for `aq del cluster systemlist --hostname`. """ from aquilon.aqdb.model import SystemList from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611 from aquilon.worker.commands.del_cluster_member_priority import \ CommandDelClusterMemberPriority class CommandDelClusterSystemList(CommandDelClusterMemberPriority): required_parameters = ["cluster", "hostname"] resource_class = SystemList def render(self, hostname, **kwargs): super(CommandDelClusterSystemList, self).render(hostname=None, metacluster=None, comments=None, member=hostname, **kwargs)
from __future__ import with_statement import argparse import sys import logging import urllib, urllib2 import json from fabric.operations import local from fabric.api import hide import yaml VERSION = "0.0.1" SERVER_FILE = ".server" logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) def get_repo_info(): with hide('commands'): f_out = local('git remote -v|grep push|grep origin', capture = True) remote_git = "" start = f_out.find("http") end = f_out.find(".git") remote_git = f_out[start:end] repo_name = remote_git[remote_git.rfind('/')+1:] return repo_name def get_current_branch(): with hide('commands'): f_out = local('git branch', capture = True) start = f_out.find('* ') end = f_out.find('\n') branch = f_out[start+2:end] return branch def get_last_hash(): with hide('commands'): f_out = local('git rev-parse HEAD', capture = True) start = 0 end = f_out.find('\n') branch = f_out[start:end] return branch class Server(object): def __init__(self): try: with open(".server") as f: self.address = f.readlines()[0] self.repo = get_repo_info() self.current_branch = get_current_branch() ok = self.post_to_server('info') logging.debug("endpoint: %s" % (ok)) except IOError: self.address = None def parse_yaml(self,yaml_file): try: data = yaml.load(yaml_file.read()) if data is not None: return data return False except Exception as e: logging.error(e) return False """ Run a normal client deployment """ def deploy(self, git_hash = None): if git_hash is None: git_hash = get_last_hash() deploy = {'hash': git_hash, 'branch': get_current_branch()} req = self.post_to_server("deploy", deploy) result = json.loads(req) self.parse_server_response(result) def parse_server_response(self,result): if result['status'] == "ok": print result['msg'] else: logging.error(result) print ("Error occured: %s" % (result['msg'])) sys.exit() """" Sends a new init configuration for deployment on a branch and current repo """ def init_config(self, config_file): conf = {'conf':self.parse_yaml(config_file)} if not conf['conf']: print "Your config file could not be parsed" sys.exit() req = self.post_to_server("init.config", conf) result = json.loads(req) self.parse_server_response(result) """ Creates the base url for the api """ def get_base_url(self, command = None): return { 'info': 'http://%s' % (self.address), 'init.config': 'http://%s/api/%s/init/' % (self.address, self.repo), 'deploy': 'http://%s/api/%s/deploy/' % (self.address, self.repo), }.get(command, 'http://%s/api/%s' % (self.address, self.repo)) """ Post requests to deploy server """ def post_to_server(self, command = None, data_dict = None): if self.address is not None: url_2 = self.get_base_url(command) if data_dict is not None: logging.debug("sending post data: %s to: %s" % (data_dict, url_2)) data = urllib.urlencode(data_dict) req = urllib2.Request(url_2, data) try: rsp = urllib2.urlopen(req) except urllib2.URLError, e: logging.error("Error 2: couldn't communicate with the server on: %s" % (url_2)) sys.exit() else: req = urllib2.Request(url_2) try: logging.debug("executing get on: %s" % (url_2)) rsp = urllib2.urlopen(req) except urllib2.URLError, e: logging.error("Error 3: couldn't communicate with the server on: %s" % (url_2)) sys.exit() return rsp.read() else: logging.error("Error 4: Can't comunicate with the server") sys.exit() class DeployAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('DeployAction %r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) if values is None: server.deploy() else: server.deploy(values) """ This will read a local config yaml which will be sent to the server If the server will have this repo and branch already configured an error will be trigered. This method can't be used to overwrite config data """ class InitAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('%r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) server.init_config(values) # TODO verify with the server if exists already an initiated config for this repo # if exists an error will be displayed class SetupAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): logging.debug('%r %r %r' % (namespace, values, option_string)) setattr(namespace, self.dest, values) server = values # write hidden file with the server address f = open(SERVER_FILE,'w') f.write('%s' %(server)) # python will convert \n to os.linesep f.close() server = Server() parser = argparse.ArgumentParser(description = 'Nursery deplkoy system') parser.add_argument('-v','--version', action = 'version', version = '%(prog)s '+VERSION) parser.add_argument('-s','--setup', nargs='?', metavar='Server', action = SetupAction,help = 'setup a nursery deploy system, you need to specify the nursery server endpoint like: http://www.my-nursery-server.com') parser.add_argument('-c','--config', metavar='config.yaml', action = InitAction, type = file,help = 'init a new repo deployment with config file you specify') parser.add_argument('-d','--deploy',nargs='?', metavar='hash', action = DeployAction, type = file,help = 'create a new async deploy') parser.add_argument('-i','--info', action='store_true', help = 'some info Nursery Client knows about') if not len(sys.argv) > 1: parser.print_help() else: args = parser.parse_args() logging.debug(args) if args.info: if server.address is not None: print ("remote deploy server: %s" % server.address) print ("repo: %s" % server.repo) print ("branch: %s" % server.current_branch) # read config yaml and send it to the server - file sent - ok # read the response and show it - ok # read the file on the server - ok # on the server store the git deploy command so it can be processed assync # 3 way to deploy git, client, forced # - client # client -> git deploy (last hash) -> ok # store in db the command if allow_multiple_deploy & stuff # parse the command assync # build file list # get instances # get scripts # make the deployment # on the server we need to modelate this yaml file to the db # find a good way to insert instances in db # filter a deployment based on touced files # make a deployment
from typing import Dict, List, Optional from ray.tune.suggest.suggestion import Searcher, ConcurrencyLimiter from ray.tune.suggest.search_generator import SearchGenerator from ray.tune.trial import Trial class _MockSearcher(Searcher): def __init__(self, **kwargs): self.live_trials = {} self.counter = {"result": 0, "complete": 0} self.final_results = [] self.stall = False self.results = [] super(_MockSearcher, self).__init__(**kwargs) def suggest(self, trial_id: str): if not self.stall: self.live_trials[trial_id] = 1 return {"test_variable": 2} return None def on_trial_result(self, trial_id: str, result: Dict): self.counter["result"] += 1 self.results += [result] def on_trial_complete( self, trial_id: str, result: Optional[Dict] = None, error: bool = False ): self.counter["complete"] += 1 if result: self._process_result(result) if trial_id in self.live_trials: del self.live_trials[trial_id] def _process_result(self, result: Dict): self.final_results += [result] class _MockSuggestionAlgorithm(SearchGenerator): def __init__(self, max_concurrent: Optional[int] = None, **kwargs): self.searcher = _MockSearcher(**kwargs) if max_concurrent: self.searcher = ConcurrencyLimiter( self.searcher, max_concurrent=max_concurrent ) super(_MockSuggestionAlgorithm, self).__init__(self.searcher) @property def live_trials(self) -> List[Trial]: return self.searcher.live_trials @property def results(self) -> List[Dict]: return self.searcher.results
import logging import threading import time from oslo_messaging._drivers.protocols.amqp import controller from oslo_messaging._i18n import _LW from oslo_messaging import exceptions from six import moves LOG = logging.getLogger(__name__) class SendTask(controller.Task): """A task that sends a message to a target, and optionally waits for a reply message. The caller may block until the remote confirms receipt or the reply message has arrived. """ def __init__(self, target, request, wait_for_reply, deadline): super(SendTask, self).__init__() self._target = target self._request = request self._deadline = deadline self._wait_for_reply = wait_for_reply self._results_queue = moves.queue.Queue() def wait(self, timeout): """Wait for the send to complete, and, optionally, a reply message from the remote. Will raise MessagingTimeout if the send does not complete or no reply is received within timeout seconds. If the request has failed for any other reason, a MessagingException is raised. """ try: result = self._results_queue.get(timeout=timeout) except moves.queue.Empty: if self._wait_for_reply: reason = "Timed out waiting for a reply." else: reason = "Timed out waiting for send to complete." raise exceptions.MessagingTimeout(reason) if result["status"] == "OK": return result.get("response", None) raise result["error"] def execute(self, controller): """Runs on eventloop thread - sends request.""" if not self._deadline or self._deadline > time.time(): controller.request(self._target, self._request, self._results_queue, self._wait_for_reply) else: LOG.warning(_LW("Send request to %s aborted: TTL expired."), self._target) class ListenTask(controller.Task): """A task that creates a subscription to the given target. Messages arriving from the target are given to the listener. """ def __init__(self, target, listener, notifications=False): """Create a subscription to the target.""" super(ListenTask, self).__init__() self._target = target self._listener = listener self._notifications = notifications def execute(self, controller): """Run on the eventloop thread - subscribes to target. Inbound messages are queued to the listener's incoming queue. """ if self._notifications: controller.subscribe_notifications(self._target, self._listener.incoming) else: controller.subscribe(self._target, self._listener.incoming) class ReplyTask(controller.Task): """A task that sends 'response' message to 'address'. """ def __init__(self, address, response, log_failure): super(ReplyTask, self).__init__() self._address = address self._response = response self._log_failure = log_failure self._wakeup = threading.Event() def wait(self): """Wait for the controller to send the message. """ self._wakeup.wait() def execute(self, controller): """Run on the eventloop thread - send the response message.""" controller.response(self._address, self._response) self._wakeup.set()
import sys sys.path.insert(1, "../../../") import h2o def binop_plus(ip,port): # Connect to h2o h2o.init(ip,port) iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader_65_rows.csv")) rows, cols = iris.dim() iris.show() ################################################################### # LHS: scaler, RHS: H2OFrame res = 2 + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = 1.1 + res[21,:] assert abs(res2 - 8.2) < 1e-1, "expected same values" ################################################################### # LHS: scaler, RHS: H2OFrame res = 1.2 + iris[2] res2 = res[21,:] + iris res2.show() # LHS: scaler, RHS: H2OVec res = 1.2 + iris[2] res2 = res[21,:] + iris[1] res2.show() # LHS: scaler, RHS: scaler res = 1.1 + iris[2] res2 = res[21,:] + res[10,:] assert abs(res2 - 5.2) < 1e-1, "expected same values" # LHS: scaler, RHS: scaler res = 2 + iris[0] res2 = res[21,:] + 3 assert abs(res2 - 10.1) < 1e-1, "expected same values" ################################################################### # LHS: H2OVec, RHS: H2OFrame #try: # res = iris[2] + iris # res.show() # assert False, "expected error. objects with different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OVec, RHS: scaler res = 1.2 + iris[2] res2 = iris[1] + res[21,:] res2.show() ################################################################### # LHS: H2OFrame, RHS: H2OFrame res = iris + iris res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" res = iris[0:2] + iris[1:3] res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == 2, "dimension mismatch" #try: # res = iris + iris[0:3] # res.show() # assert False, "expected error. frames are different dimensions." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: H2OVec #try: # res = iris + iris[0] # res.show() # assert False, "expected error. objects of different dimensions not supported." #except EnvironmentError: # pass # LHS: H2OFrame, RHS: scaler res = 1.2 + iris[2] res2 = iris + res[21,:] res2.show() # LHS: H2OFrame, RHS: scaler res = iris + 2 res_rows, res_cols = res.dim() assert res_rows == rows and res_cols == cols, "dimension mismatch" for x, y in zip([res[c].sum() for c in range(cols-1)], [469.9, 342.6, 266.9, 162.2]): assert abs(x - y) < 1e-1, "expected same values" ################################################################### if __name__ == "__main__": h2o.run_test(sys.argv, binop_plus)
from __future__ import print_function from guild.actor import Actor, actor_method, process_method, late_bind class Dog(Actor): @actor_method # Input - triggered by data coming in def woof(self): print("Woof", self) @process_method # Process - triggered each time it's run def process(self): #print(" ", end="") pass @late_bind # Output def produce(self): pass class Shitzu(Dog): def __init__(self): self.count = 0 super(Dog, self).__init__() @process_method def process(self): self.count += 1 print("I don't go meow", self.count) if self.count >= 20: self.stop() return False if __name__ == "__main__": import time dog = Dog() shitzu = Shitzu() dog.start() shitzu.start() dog.woof() shitzu.woof() time.sleep(0.1) shitzu.join() time.sleep(0.1) dog.stop() dog.join()
import os from django.conf import settings from django.contrib.auth import get_user_model from django.contrib.sites.models import Site from django.core.urlresolvers import reverse from django.test import TestCase from mock import patch, Mock import re import rdflib from rdflib import RDF from urllib import urlencode, unquote from eulxml.xmlmap import load_xmlobject_from_file, XmlObject from eulfedora.server import Repository from piffle import iiif from readux.annotations.models import Annotation from readux.books import abbyyocr from readux.books.models import SolrVolume, Volume, VolumeV1_0, Book, BIBO, \ DC, Page, PageV1_1 FIXTURE_DIR = os.path.join(settings.BASE_DIR, 'readux', 'books', 'fixtures') class SolrVolumeTest(TestCase): # primarily testing BaseVolume logic here def test_properties(self): ocm = 'ocn460678076' vol = 'V.1' noid = '1234' volume = SolrVolume(label='%s_%s' % (ocm, vol), pid='testpid:%s' % noid) self.assertEqual(ocm, volume.control_key) self.assertEqual(vol, volume.volume) self.assertEqual(noid, volume.noid) # don't display volume zero vol = 'V.0' volume.data['label'] = '%s_%s' % (ocm, vol) self.assertEqual('', volume.volume) # should also work without volume info volume.data['label'] = ocm self.assertEqual(ocm, volume.control_key) self.assertEqual('', volume.volume) def test_fulltext_absolute_url(self): volume = SolrVolume(label='ocn460678076_V.1', pid='testpid:1234') url = volume.fulltext_absolute_url() self.assert_(url.startswith('https://')) self.assert_(url.endswith(reverse('books:text', kwargs={'pid': volume.pid}))) current_site = Site.objects.get_current() self.assert_(current_site.domain in url) def test_voyant_url(self): # Volume with English Lang volume1 = SolrVolume(label='ocn460678076_V.1', pid='testpid:1234', language='eng') url = volume1.voyant_url() self.assert_(urlencode({'corpus': volume1.pid}) in url, 'voyant url should include volume pid as corpus identifier') self.assert_(urlencode({'archive': volume1.fulltext_absolute_url()}) in url, 'voyant url should include volume fulltext url as archive') self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url, 'voyant url should not include english stopword list when volume is in english') # volume language is French volume2 = SolrVolume(label='ocn460678076_V.1', pid='testpid:1235', language='fra') url_fra = volume2.voyant_url() self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url_fra, 'voyant url should not include english stopword list when language is not english') def test_pdf_url(self): # no start page set vol = SolrVolume(pid='vol:123') pdf_url = vol.pdf_url() self.assertEqual(unquote(reverse('books:pdf', kwargs={'pid': vol.pid})), pdf_url) # start page vol = SolrVolume(pid='vol:123', start_page=6) pdf_url = vol.pdf_url() self.assert_(pdf_url.startswith(unquote(reverse('books:pdf', kwargs={'pid': vol.pid})))) self.assert_('#page=6' in pdf_url) class VolumeTest(TestCase): # borrowing fixture & test accounts from readux.annotations.tests fixtures = ['test_annotation_data.json'] user_credentials = { 'user': {'username': 'testuser', 'password': 'testing'}, 'superuser': {'username': 'testsuper', 'password': 'superme'} } def test_annotations(self): # find annotations associated with a volume, optionally filtered # by user User = get_user_model() testuser = User.objects.create(username='tester') testadmin = User.objects.create(username='super', is_superuser=True) mockapi = Mock() vol = Volume(mockapi, 'vol:1') # create annotations to test finding p1 = Annotation.objects.create(user=testuser, text='testuser p1', uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:1'}), volume_uri=vol.absolute_url) p2 = Annotation.objects.create(user=testuser, text='testuser p2', uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}), volume_uri=vol.absolute_url) p3 = Annotation.objects.create(user=testuser, text='testuser p3', uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:3'}), volume_uri=vol.absolute_url) v2p1 = Annotation.objects.create(user=testuser, text='testuser vol2 p1', uri=reverse('books:page', kwargs={'vol_pid': 'vol:2', 'pid': 'p:1'}), volume_uri='http://example.com/books/vol:2/') sup2 = Annotation.objects.create(user=testadmin, text='testsuper p2', uri=reverse('books:page', kwargs={'vol_pid': vol.pid, 'pid': 'p:2'}), volume_uri=vol.absolute_url) annotations = vol.annotations() self.assertEqual(4, annotations.count()) self.assert_(v2p1 not in annotations) # filter by user annotations = vol.annotations().visible_to(testuser) self.assertEqual(3, annotations.count()) self.assert_(sup2 not in annotations) annotations = vol.annotations().visible_to(testadmin) self.assertEqual(4, annotations.count()) self.assert_(sup2 in annotations) # annotation counts per page annotation_count = vol.page_annotation_count() self.assertEqual(1, annotation_count[p1.uri]) self.assertEqual(2, annotation_count[p2.uri]) self.assertEqual(1, annotation_count[p3.uri]) # by user annotation_count = vol.page_annotation_count(testuser) self.assertEqual(1, annotation_count[p2.uri]) annotation_count = vol.page_annotation_count(testadmin) self.assertEqual(2, annotation_count[p2.uri]) # total for a volume self.assertEqual(4, vol.annotation_count()) self.assertEqual(3, vol.annotation_count(testuser)) self.assertEqual(4, vol.annotation_count(testadmin)) # total for all volumes totals = Volume.volume_annotation_count() self.assertEqual(1, totals['http://example.com/books/vol:2/']) self.assertEqual(4, totals[vol.absolute_url]) totals = Volume.volume_annotation_count(testuser) self.assertEqual(3, totals[vol.absolute_url]) def test_has_pages(self): mockapi = Mock() vol = Volume(mockapi, 'vol:1') vol.pages = [] self.assertFalse(vol.has_pages) # one page (i.e. cover image) is not enough to count as having pages vol.pages = [Mock(spec=Page)] self.assertFalse(vol.has_pages) vol.pages = [Mock(spec=Page), Mock(spec=Page)] self.assertTrue(vol.has_pages) def test_has_tei(self): mockapi = Mock() vol = Volume(mockapi, 'vol:1') p1 = Mock(spec=Page) p1.tei.exists = False p2 = Mock(spec=Page) p2.tei.exists = False vol.pages = [p1, p2] self.assertFalse(vol.has_tei) p2.tei.exists = True self.assertTrue(vol.has_tei) class VolumeV1_0Test(TestCase): def setUp(self): # use uningested objects for testing purposes repo = Repository() self.vol = repo.get_object(type=VolumeV1_0) self.vol.label = 'ocn460678076_V.1' self.vol.pid = 'rdxtest:4606' def test_ark_uri(self): ark_uri = 'http://pid.co/ark:/12345/ba45' self.vol.dc.content.identifier_list.extend([ark_uri, 'pid:ba45', 'otherid']) self.assertEqual(ark_uri, self.vol.ark_uri) def test_rdf_dc(self): # add metadata to test rdf generated ark_uri = 'http://pid.co/ark:/12345/ba45' self.vol.dc.content.identifier_list.append(ark_uri) self.vol.dc.content.title = 'Sunset, a novel' self.vol.dc.content.format = 'application/pdf' self.vol.dc.content.language = 'eng' self.vol.dc.content.rights = 'public domain' # NOTE: patching on class instead of instance because related object is a descriptor with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook: mockbook.dc.content.creator_list = ['Author, Joe'] mockbook.dc.content.date_list = ['1801', '2010'] mockbook.dc.content.description_list = ['digitized edition', 'mystery novel'] mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee &amp; Smith' mockbook.dc.content.relation_list = [ 'http://pid.co/ark:/12345/book', 'http://pid.co/ark:/12345/volpdf' ] graph = self.vol.rdf_dc_graph() lit = rdflib.Literal uri = rdflib.URIRef(self.vol.ark_uri) self.assert_((uri, RDF.type, BIBO.book) in graph, 'rdf graph type should be bibo:book') self.assert_((uri, DC.title, lit(self.vol.dc.content.title)) in graph, 'title should be set as dc:title') self.assert_((uri, BIBO.volume, lit(self.vol.volume)) in graph, 'volume label should be set as bibo:volume') self.assert_((uri, DC['format'], lit(self.vol.dc.content.format)) in graph, 'format should be set as dc:format') self.assert_((uri, DC.language, lit(self.vol.dc.content.language)) in graph, 'language should be set as dc:language') self.assert_((uri, DC.rights, lit(self.vol.dc.content.rights)) in graph, 'rights should be set as dc:rights') for rel in self.vol.dc.content.relation_list: self.assert_((uri, DC.relation, lit(rel)) in graph, 'related item %s should be set as dc:relation' % rel) # metadata pulled from book obj because not present in volume self.assert_((uri, DC.creator, lit(mockbook.dc.content.creator_list[0])) in graph, 'creator from book metadata should be set as dc:creator when not present in volume metadata') self.assert_((uri, DC.publisher, lit(mockbook.dc.content.publisher)) in graph, 'publisher from book metadata should be set as dc:publisher when not present in volume metadata') # earliest date only self.assert_((uri, DC.date, lit('1801')) in graph, 'earliest date 1801 from book metadata should be set as dc:date when not present in volume metadata') for d in mockbook.dc.content.description_list: self.assert_((uri, DC.description, lit(d)) in graph, 'description from book metadata should be set as dc:description when not present in volume metadata') # volume-level metadata should be used when present instead of book self.vol.dc.content.creator_list = ['Writer, Jane'] self.vol.dc.content.date_list = ['1832', '2012'] self.vol.dc.content.description_list = ['digital edition'] self.vol.dc.content.publisher = 'So &amp; So Publishers' graph = self.vol.rdf_dc_graph() self.assert_((uri, DC.creator, lit(self.vol.dc.content.creator_list[0])) in graph, 'creator from volume metadata should be set as dc:creator when present') self.assert_((uri, DC.publisher, lit(self.vol.dc.content.publisher)) in graph, 'publisher from volume metadata should be set as dc:publisher when present') # earliest date *only* should be present self.assert_((uri, DC.date, lit('1832')) in graph, 'earliest date 1832 from volume metadata should be set as dc:date when present') for d in self.vol.dc.content.description_list: self.assert_((uri, DC.description, lit(d)) in graph, 'description from volume metadata should be set as dc:description when present') def test_index_data(self): self.vol.owner = '' self.vol.dc.content.date = 1842 # NOTE: patching on class instead of instance because related object is a descriptor with patch.object(Volume, 'book', new=Mock(spec=Book)) as mockbook: mockbook.pid = 'book:123' mockbook.collection.pid = 'coll:123', mockbook.collection.short_label = 'Pile O\' Books' mockbook.dc.content.creator_list = ['Author, Joe'] mockbook.dc.content.date_list = ['1801', '2010'] mockbook.dc.content.description_list = ['digitized edition', 'mystery novel'] mockbook.dc.content.publisher = 'Nashville, Tenn. : Barbee &amp; Smith' mockbook.dc.content.relation_list = [ 'http://pid.co/ark:/12345/book', 'http://pid.co/ark:/12345/volpdf' ] mockbook.dc.content.subject_list = [] data = self.vol.index_data() self.assert_('fulltext' not in data, 'fulltext should not be set in index data when volume has no ocr') self.assert_('hasPrimaryImage' not in data, 'hasPrimaryImage should not be set in index data when volume has no cover') self.assertEqual(mockbook.pid, data['book_id'], 'associated book pid should be set as book id') self.assertEqual(mockbook.collection.pid, data['collection_id'], 'associated collection pid should be set as collection id') self.assertEqual(mockbook.collection.short_label, data['collection_label'], 'associated collection label short label should be set as collection label') self.assertEqual(mockbook.dc.content.creator_list, data['creator'], 'creator should be set from book DC creator') self.assertEqual(self.vol.dc.content.date_list, data['date'], 'date should be set from earliest volume DC date') self.assert_('subject' not in data, 'subject should not be set in index data when book has no subjects') self.assertEqual(0, data['page_count'], 'page count should be set to zero when volume has no pages loaded') # test hasPrimaryImage mockpage = Mock(spec=Page) mockpage.pid = 'page:1234' mockpage.uriref = rdflib.URIRef('info:fedora/%s' % mockpage.pid) self.vol.primary_image = mockpage data = self.vol.index_data() self.assertEqual(mockpage.pid, data['hasPrimaryImage'], 'hasPrimaryImage should be set to cover page pid, when present') # test subjects mockbook.dc.content.subject_list = ['subj1', 'subj2'] data = self.vol.index_data() self.assertEqual(mockbook.dc.content.subject_list, data['subject'], 'subject should be set when present in book DC') # test full-text with patch.object(self.vol, 'ocr') as mockocr: mockocr.exists = True ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml')) mockocr.content = ocr_xml data = self.vol.index_data() self.assert_('fulltext' in data, 'fulltext should be set in index data when OCR is available') # use mock to test pdf size indexing with patch.object(self.vol, 'pdf') as mockpdf: mockpdf.size = 1234567 data = self.vol.index_data() self.assertEqual(mockpdf.size, data['pdf_size'], 'pdf_size should be set from pdf size, when available') def test_voyant_url(self): # NOTE: this test is semi-redundant with the same test for the SolrVolume, # but since the method is implemented in BaseVolume and depends on # properties set on the subclasses, testing here to ensure it works # in both cases # no language self.vol.pid = 'vol:1234' url = self.vol.voyant_url() self.assert_(urlencode({'corpus': self.vol.pid}) in url, 'voyant url should include volume pid as corpus identifier') self.assert_(urlencode({'archive': self.vol.fulltext_absolute_url()}) in url, 'voyant url should include volume fulltext url as archive') self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) not in url, 'voyant url should not include english stopword list when volume is not in english') # english self.vol.dc.content.language = 'eng' url = self.vol.voyant_url() self.assert_(urlencode({'stopList': 'stop.en.taporware.txt'}) in url, 'voyant url should include english stopword list when volume is in english') def test_get_fulltext(self): with patch.object(self.vol, 'ocr') as mockocr: mockocr.exists = True # abbyy finereader v8 ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml')) mockocr.content = ocr_xml text = self.vol.get_fulltext() # check for arbitrary text content self.assert_('In presenting this, the initial volume of the' in text, 'ocr text content should be present in plain text') self.assert_('Now, kind reader, we ask that you do not crit' in text, 'ocr text content should be present in plain text') self.assert_(re.search(r'Baldwin\s+Dellinger\s+Brice', text), 'table row content should be displayed on a single line') # abbyy finereader v6 ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, 'abbyyocr_fr6v1.xml')) mockocr.content = ocr_xml text = self.vol.get_fulltext() # check for arbitrary text content self.assert_('was late in the autumn, the vines yet kept their leaves,' in text, 'ocr text content should be present in plain text') self.assert_('walked up the steps. The lady had not moved, and made' in text, 'ocr text content should be present in plain text') self.assert_(re.search(r'Modern\.\s+New Standard\.\s+Popular\.', text), 'table row content should be displayed on a single line') def test_ocr_ids(self): # pach in fixture ocr content with patch.object(self.vol, 'ocr') as mockocr: mockocr.exists = True ocr_xml = load_xmlobject_from_file(os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml')) mockocr.content = ocr_xml self.assertFalse(self.vol.ocr_has_ids) self.vol.add_ocr_ids() self.assertTrue(self.vol.ocr_has_ids) class PageV1_1Test(TestCase): metsalto_doc = os.path.join(FIXTURE_DIR, 'mets_alto.xml') def setUp(self): self.mets_alto = load_xmlobject_from_file(self.metsalto_doc, XmlObject) def test_ocr_ids(self): page = PageV1_1(Mock()) # use mock for fedora api, since we won't make any calls page.pid = 'rdxtest:4607' with patch.object(page, 'ocr') as mockocr: mockocr.exists = True mockocr.content = self.mets_alto self.assertFalse(page.ocr_has_ids) page.add_ocr_ids() self.assertTrue(page.ocr_has_ids) class AbbyyOCRTestCase(TestCase): fr6v1_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr6v1.xml') fr8v2_doc = os.path.join(FIXTURE_DIR, 'abbyyocr_fr8v2.xml') # language code eng = 'EnglishUnitedStates' def setUp(self): self.fr6v1 = load_xmlobject_from_file(self.fr6v1_doc, abbyyocr.Document) self.fr8v2 = load_xmlobject_from_file(self.fr8v2_doc, abbyyocr.Document) def test_document(self): # top-level document properties # finereader 6 v1 self.assertEqual(132, self.fr6v1.page_count) self.assertEqual(self.eng, self.fr6v1.language) self.assertEqual(self.eng, self.fr6v1.languages) self.assert_(self.fr6v1.pages, 'page list should be non-empty') self.assertEqual(132, len(self.fr6v1.pages), 'number of pages should match page count') self.assert_(isinstance(self.fr6v1.pages[0], abbyyocr.Page)) # finereader 8 v2 self.assertEqual(186, self.fr8v2.page_count) self.assertEqual(self.eng, self.fr8v2.language) self.assertEqual(self.eng, self.fr8v2.languages) self.assert_(self.fr8v2.pages, 'page list should be non-empty') self.assertEqual(186, len(self.fr8v2.pages), 'number of pages should match page count') self.assert_(isinstance(self.fr8v2.pages[0], abbyyocr.Page)) def test_page(self): # finereader 6 v1 self.assertEqual(1500, self.fr6v1.pages[0].width) self.assertEqual(2174, self.fr6v1.pages[0].height) self.assertEqual(300, self.fr6v1.pages[0].resolution) # second page has picture block, no text self.assertEqual(1, len(self.fr6v1.pages[1].blocks)) self.assertEqual(1, len(self.fr6v1.pages[1].picture_blocks)) self.assertEqual(0, len(self.fr6v1.pages[1].text_blocks)) self.assert_(isinstance(self.fr6v1.pages[1].blocks[0], abbyyocr.Block)) # fourth page has paragraph text self.assert_(self.fr6v1.pages[3].paragraphs) self.assert_(isinstance(self.fr6v1.pages[3].paragraphs[0], abbyyocr.Paragraph)) # finereader 8 v2 self.assertEqual(2182, self.fr8v2.pages[0].width) self.assertEqual(3093, self.fr8v2.pages[0].height) self.assertEqual(300, self.fr8v2.pages[0].resolution) # first page has multiple text/pic blocks self.assert_(self.fr8v2.pages[0].blocks) self.assert_(self.fr8v2.pages[0].picture_blocks) self.assert_(self.fr8v2.pages[0].text_blocks) self.assert_(isinstance(self.fr8v2.pages[0].blocks[0], abbyyocr.Block)) # first page has paragraph text self.assert_(self.fr8v2.pages[0].paragraphs) self.assert_(isinstance(self.fr8v2.pages[0].paragraphs[0], abbyyocr.Paragraph)) def test_block(self): # finereader 6 v1 # - basic block attributes b = self.fr6v1.pages[1].blocks[0] self.assertEqual('Picture', b.type) self.assertEqual(144, b.left) self.assertEqual(62, b.top) self.assertEqual(1358, b.right) self.assertEqual(2114, b.bottom) # - block with text b = self.fr6v1.pages[3].blocks[0] self.assert_(b.paragraphs) self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph)) # finereader 8 v2 b = self.fr8v2.pages[0].blocks[0] self.assertEqual('Text', b.type) self.assertEqual(282, b.left) self.assertEqual(156, b.top) self.assertEqual(384, b.right) self.assertEqual(228, b.bottom) self.assert_(b.paragraphs) self.assert_(isinstance(b.paragraphs[0], abbyyocr.Paragraph)) def test_paragraph_line(self): # finereader 6 v1 para = self.fr6v1.pages[3].paragraphs[0] # untested: align, left/right/start indent self.assert_(para.lines) self.assert_(isinstance(para.lines[0], abbyyocr.Line)) line = para.lines[0] self.assertEqual(283, line.baseline) self.assertEqual(262, line.left) self.assertEqual(220, line.top) self.assertEqual(1220, line.right) self.assertEqual(294, line.bottom) # line text available via unicode self.assertEqual(u'MABEL MEREDITH;', unicode(line)) # also mapped as formatted text (could repeat/segment) self.assert_(line.formatted_text) # should be non-empty self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting)) self.assertEqual(self.eng, line.formatted_text[0].language) self.assertEqual(u'MABEL MEREDITH;', line.formatted_text[0].text) # not normalized # finereader 8 v2 para = self.fr8v2.pages[1].paragraphs[0] self.assert_(para.lines) self.assert_(isinstance(para.lines[0], abbyyocr.Line)) line = para.lines[0] self.assertEqual(1211, line.baseline) self.assertEqual(845, line.left) self.assertEqual(1160, line.top) self.assertEqual(1382, line.right) self.assertEqual(1213, line.bottom) self.assertEqual(u'EMORY UNIVERSITY', unicode(line)) self.assert_(line.formatted_text) # should be non-empty self.assert_(isinstance(line.formatted_text[0], abbyyocr.Formatting)) self.assertEqual(self.eng, line.formatted_text[0].language) self.assertEqual(u'EMORY UNIVERSITY', line.formatted_text[0].text) def test_frns(self): self.assertEqual('fr6v1:par|fr8v2:par', abbyyocr.frns('par')) self.assertEqual('fr6v1:text/fr6v1:par|fr8v2:text/fr8v2:par', abbyyocr.frns('text/par'))
ALIAS = 'tag-ports-during-bulk-creation' IS_SHIM_EXTENSION = True IS_STANDARD_ATTR_EXTENSION = False NAME = 'Tag Ports During Bulk Creation' DESCRIPTION = 'Allow to tag ports during bulk creation' UPDATED_TIMESTAMP = '2019-12-29T19:00:00-00:00' RESOURCE_ATTRIBUTE_MAP = {} SUB_RESOURCE_ATTRIBUTE_MAP = {} ACTION_MAP = {} REQUIRED_EXTENSIONS = [] OPTIONAL_EXTENSIONS = [] ACTION_STATUS = {}
from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import unittest from textwrap import dedent from pants.backend.jvm.register import build_file_aliases as register_jvm from pants.backend.jvm.targets.exclude import Exclude from pants.backend.jvm.targets.jvm_binary import (Duplicate, JarRules, JvmBinary, ManifestEntries, Skip) from pants.base.address import BuildFileAddress from pants.base.exceptions import TargetDefinitionException from pants.base.payload_field import FingerprintedField from pants.base.target import Target from pants_test.base_test import BaseTest class JarRulesTest(unittest.TestCase): def test_jar_rule(self): dup_rule = Duplicate('foo', Duplicate.REPLACE) self.assertEquals('Duplicate(apply_pattern=foo, action=REPLACE)', repr(dup_rule)) skip_rule = Skip('foo') self.assertEquals('Skip(apply_pattern=foo)', repr(skip_rule)) def test_invalid_apply_pattern(self): with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'): Skip(None) with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'): Duplicate(None, Duplicate.SKIP) with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'): Skip(r')') with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'): Duplicate(r')', Duplicate.SKIP) def test_bad_action(self): with self.assertRaisesRegexp(ValueError, r'The supplied action must be one of'): Duplicate('foo', None) def test_duplicate_error(self): with self.assertRaisesRegexp(Duplicate.Error, r'Duplicate entry encountered for path foo'): raise Duplicate.Error('foo') def test_default(self): jar_rules = JarRules.default() self.assertTrue(4, len(jar_rules.rules)) for rule in jar_rules.rules: self.assertTrue(rule.apply_pattern.pattern.startswith(r'^META-INF')) def test_set_bad_default(self): with self.assertRaisesRegexp(ValueError, r'The default rules must be a JarRules'): JarRules.set_default(None) class JvmBinaryTest(BaseTest): @property def alias_groups(self): return register_jvm() def test_simple(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', basename='foo-base', ) ''')) target = self.target('//:foo') self.assertEquals('com.example.Foo', target.main) self.assertEquals('com.example.Foo', target.payload.main) self.assertEquals('foo-base', target.basename) self.assertEquals('foo-base', target.payload.basename) self.assertEquals([], target.deploy_excludes) self.assertEquals([], target.payload.deploy_excludes) self.assertEquals(JarRules.default(), target.deploy_jar_rules) self.assertEquals(JarRules.default(), target.payload.deploy_jar_rules) self.assertEquals({}, target.payload.manifest_entries.entries); def test_default_base(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', ) ''')) target = self.target('//:foo') self.assertEquals('foo', target.basename) def test_deploy_jar_excludes(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', deploy_excludes=[exclude(org='example.com', name='foo-lib')], ) ''')) target = self.target('//:foo') self.assertEquals([Exclude(org='example.com', name='foo-lib')], target.deploy_excludes) def test_deploy_jar_rules(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', deploy_jar_rules=jar_rules([Duplicate('foo', Duplicate.SKIP)], default_dup_action=Duplicate.FAIL) ) ''')) target = self.target('//:foo') jar_rules = target.deploy_jar_rules self.assertEquals(1, len(jar_rules.rules)) self.assertEquals('foo', jar_rules.rules[0].apply_pattern.pattern) self.assertEquals(repr(Duplicate.SKIP), repr(jar_rules.rules[0].action)) # <object object at 0x...> self.assertEquals(Duplicate.FAIL, jar_rules.default_dup_action) def test_bad_source_declaration(self): build_file = self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', source=['foo.py'], ) ''')) with self.assertRaisesRegexp(TargetDefinitionException, r'Invalid target JvmBinary.*foo.*source must be a single'): self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo')) def test_bad_sources_declaration(self): with self.assertRaisesRegexp(Target.IllegalArgument, r'jvm_binary only supports a single "source" argument'): self.make_target('foo:foo', target_type=JvmBinary, main='com.example.Foo', sources=['foo.py']) def test_bad_main_declaration(self): build_file = self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='bar', main=['com.example.Bar'], ) ''')) with self.assertRaisesRegexp(TargetDefinitionException, r'Invalid target JvmBinary.*bar.*main must be a fully'): self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'bar')) def test_bad_jar_rules(self): build_file = self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', deploy_jar_rules='invalid', ) ''')) with self.assertRaisesRegexp(TargetDefinitionException, r'Invalid target JvmBinary.*foo.*' r'deploy_jar_rules must be a JarRules specification. got str'): self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo')) def _assert_fingerprints_not_equal(self, fields): for field in fields: for other_field in fields: if field == other_field: continue self.assertNotEquals(field.fingerprint(), other_field.fingerprint()) def test_jar_rules_field(self): field1 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)])) field1_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)])) field2 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.CONCAT)])) field3 = FingerprintedField(JarRules(rules=[Duplicate('bar', Duplicate.SKIP)])) field4 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP), Duplicate('bar', Duplicate.SKIP)])) field5 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP), Skip('foo')])) field6 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)], default_dup_action=Duplicate.FAIL)) field6_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)], default_dup_action=Duplicate.FAIL)) field7 = FingerprintedField(JarRules(rules=[Skip('foo')])) field8 = FingerprintedField(JarRules(rules=[Skip('bar')])) field8_same = FingerprintedField(JarRules(rules=[Skip('bar')])) self.assertEquals(field1.fingerprint(), field1_same.fingerprint()) self.assertEquals(field6.fingerprint(), field6_same.fingerprint()) self.assertEquals(field8.fingerprint(), field8_same.fingerprint()) self._assert_fingerprints_not_equal([field1, field2, field3, field4, field5, field6, field7]) def test_manifest_entries(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', manifest_entries= { 'Foo-Field' : 'foo', } ) ''')) target = self.target('//:foo') self.assertTrue(isinstance(target.payload.manifest_entries, ManifestEntries)) entries = target.payload.manifest_entries.entries self.assertEquals({ 'Foo-Field' : 'foo'}, entries) def test_manifest_not_dict(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', manifest_entries= 'foo', ) ''')) with self.assertRaisesRegexp(TargetDefinitionException, r'Invalid target JvmBinary\(BuildFileAddress\(.*BUILD\), foo\)\): ' r'manifest_entries must be a dict. got str'): self.target('//:foo') def test_manifest_bad_key(self): self.add_to_build_file('BUILD', dedent(''' jvm_binary(name='foo', main='com.example.Foo', manifest_entries= { jar(org='bad', name='bad', rev='bad') : 'foo', } ) ''')) with self.assertRaisesRegexp(ManifestEntries.ExpectedDictionaryError, r'entries must be dictionary of strings, got key bad-bad-bad type JarDependency'): self.target('//:foo') def test_manifest_entries_fingerprint(self): field1 = ManifestEntries() field2 = ManifestEntries({'Foo-Field' : 'foo'}) field2_same = ManifestEntries({'Foo-Field' : 'foo'}) field3 = ManifestEntries({'Foo-Field' : 'foo', 'Bar-Field' : 'bar'}) self.assertEquals(field2.fingerprint(), field2_same.fingerprint()) self._assert_fingerprints_not_equal([field1, field2, field3])
""" Watch a running build job and output changes to the screen. """ import fcntl import os import select import socket import sys import tempfile import termios import time import traceback from rmake import errors from rmake.build import buildjob, buildtrove from rmake.cmdline import query def _getUri(client): if not isinstance(client.uri, str) or client.uri.startswith('unix://'): fd, tmpPath = tempfile.mkstemp() os.close(fd) uri = 'unix://' + tmpPath else: host = socket.gethostname() uri = 'http://%s' % host tmpPath = None return uri, tmpPath def monitorJob(client, jobId, showTroveDetails=False, showBuildLogs=False, exitOnFinish=None, uri=None, serve=True, out=None, displayClass=None): if not uri: uri, tmpPath = _getUri(client) else: tmpPath = None if not displayClass: displayClass = JobLogDisplay try: display = displayClass(client, showBuildLogs=showBuildLogs, out=out, exitOnFinish=exitOnFinish) client = client.listenToEvents(uri, jobId, display, showTroveDetails=showTroveDetails, serve=serve) return client finally: if serve and tmpPath: os.remove(tmpPath) def waitForJob(client, jobId, uri=None, serve=True): if not uri: uri, tmpPath = _getUri(client) else: tmpPath = None try: display = SilentDisplay(client) display._primeOutput(jobId) return client.listenToEvents(uri, jobId, display, serve=serve) finally: if tmpPath: os.remove(tmpPath) class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber): def __init__(self, client, showBuildLogs=True, out=None, exitOnFinish=True): self.client = client self.finished = False self.exitOnFinish = True # override exitOnFinish setting self.showBuildLogs = showBuildLogs if not out: out = sys.stdout self.out = out def close(self): pass def _serveLoopHook(self): pass def _msg(self, msg, *args): self.out.write('[%s] %s\n' % (time.strftime('%X'), msg)) self.out.flush() def _jobStateUpdated(self, jobId, state, status): isFinished = (state in (buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT)) if isFinished: self._setFinished() def _setFinished(self): self.finished = True def _isFinished(self): return self.finished def _shouldExit(self): return self._isFinished() and self.exitOnFinish def _primeOutput(self, jobId): job = self.client.getJob(jobId, withTroves=False) if job.isFinished(): self._setFinished() class SilentDisplay(_AbstractDisplay): pass class JobLogDisplay(_AbstractDisplay): def __init__(self, client, showBuildLogs=True, out=None, exitOnFinish=None): _AbstractDisplay.__init__(self, client, out=out, showBuildLogs=showBuildLogs, exitOnFinish=exitOnFinish) self.buildingTroves = {} def _tailBuildLog(self, jobId, troveTuple): mark = self.buildingTroves.get((jobId, troveTuple), [0])[0] self.buildingTroves[jobId, troveTuple] = [mark, True] self.out.write('Tailing %s build log:\n\n' % troveTuple[0]) def _stopTailing(self, jobId, troveTuple): mark = self.buildingTroves.get((jobId, troveTuple), [0])[0] self.buildingTroves[jobId, troveTuple] = [ mark, False ] def _serveLoopHook(self): if not self.buildingTroves: return for (jobId, troveTuple), (mark, tail) in self.buildingTroves.items(): if not tail: continue try: moreData, data, mark = self.client.getTroveBuildLog(jobId, troveTuple, mark) except: moreData = True data = '' self.out.write(data) if not moreData: del self.buildingTroves[jobId, troveTuple] else: self.buildingTroves[jobId, troveTuple][0] = mark def _jobTrovesSet(self, jobId, troveData): self._msg('[%d] - job troves set' % jobId) def _jobStateUpdated(self, jobId, state, status): _AbstractDisplay._jobStateUpdated(self, jobId, state, status) state = buildjob.stateNames[state] if self._isFinished(): self._serveLoopHook() self._msg('[%d] - State: %s' % (jobId, state)) if status: self._msg('[%d] - %s' % (jobId, status)) def _jobLogUpdated(self, jobId, state, status): self._msg('[%d] %s' % (jobId, status)) def _troveStateUpdated(self, (jobId, troveTuple), state, status): isBuilding = (state in (buildtrove.TroveState.BUILDING, buildtrove.TroveState.RESOLVING)) state = buildtrove.stateNames[state] self._msg('[%d] - %s - State: %s' % (jobId, troveTuple[0], state)) if status: self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status)) if isBuilding and self.showBuildLogs: self._tailBuildLog(jobId, troveTuple) else: self._stopTailing(jobId, troveTuple) def _troveLogUpdated(self, (jobId, troveTuple), state, status): state = buildtrove.stateNames[state] self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status)) def _trovePreparingChroot(self, (jobId, troveTuple), host, path): if host == '_local_': msg = 'Chroot at %s' % path else: msg = 'Chroot at Node %s:%s' % (host, path) self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg)) def _primeOutput(self, jobId): logMark = 0 while True: newLogs = self.client.getJobLogs(jobId, logMark) if not newLogs: break logMark += len(newLogs) for (timeStamp, message, args) in newLogs: print '[%s] [%s] - %s' % (timeStamp, jobId, message) BUILDING = buildtrove.TroveState.BUILDING troveTups = self.client.listTrovesByState(jobId, BUILDING).get(BUILDING, []) for troveTuple in troveTups: self._tailBuildLog(jobId, troveTuple) _AbstractDisplay._primeOutput(self, jobId) def set_raw_mode(): fd = sys.stdin.fileno() oldTerm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO termios.tcsetattr(fd, termios.TCSANOW, newattr) oldFlags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags | os.O_NONBLOCK) return oldTerm, oldFlags def restore_terminal(oldTerm, oldFlags): fd = sys.stdin.fileno() if oldTerm: termios.tcsetattr(fd, termios.TCSAFLUSH, oldTerm) if oldFlags: fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags) class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber): def __init__(self, client, showBuildLogs=True, out=None): self.client = client self.finished = False self.showBuildLogs = showBuildLogs self.troveStates = {} self.troveIndex = None self.troveDislay = False self.out = OutBuffer(out) def close(self): pass def _msg(self, msg, *args): self.out.write('\r[%s] %s\n' % (time.strftime('%X'), msg)) self.out.write('(h for help)>') self.out.flush() def _jobStateUpdated(self, jobId, state, status): isFinished = (state in (buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT)) if isFinished: self._setFinished() def _setFinished(self): self.finished = True def _isFinished(self): return self.finished def _shouldExit(self): return self._isFinished() and self.exitOnFinish def _primeOutput(self, jobId): job = self.client.getJob(jobId, withTroves=False) if job.isFinished(): self._setFinished() def _dispatch(self, methodname, (callData, responseHandler, args)): if methodname.startswith('_'): raise NoSuchMethodError(methodname) else: responseHandler.sendResponse('') getattr(self, methodname)(*args) class SilentDisplay(_AbstractDisplay): def _updateBuildLog(self): pass class JobLogDisplay(_AbstractDisplay): def __init__(self, client, state, out=None): _AbstractDisplay.__init__(self, client, out) self.troveToWatch = None self.watchTroves = False self.buildingTroves = {} self.state = state self.lastLen = 0 self.promptFormat = '%(jobId)s %(name)s%(context)s - %(state)s - (%(tailing)s) ([h]elp)>' self.updatePrompt() def close(self): self.out.write('\n') self.out.flush() def _msg(self, msg, *args): self.erasePrompt() self.out.write('[%s] %s\n' % (time.strftime('%X'), msg)) self.writePrompt() def updatePrompt(self): if self.troveToWatch: if self.troveToWatch not in self.state.troves: self.troveToWatch = self.state.troves[0] state = self.state.getTroveState(*self.troveToWatch) state = buildtrove.stateNames[state] name = self.troveToWatch[1][0].split(':', 1)[0] # remove :source context = self.troveToWatch[1][3] d = dict(jobId=self.troveToWatch[0], name=name, state=state, context=(context and '{%s}' % context or '')) else: d = dict(jobId='(None)', name='(None)', state='', context='') if not self.state.jobActive(): tailing = 'Job %s' % self.state.getJobStateName() elif self.watchTroves: tailing = 'Details on' else: tailing = 'Details off' d['tailing'] = tailing self.prompt = self.promptFormat % d self.erasePrompt() self.writePrompt() def erasePrompt(self): self.out.write('\r%s\r' % (' '*self.lastLen)) def writePrompt(self): self.out.write(self.prompt) self.lastLen = len(self.prompt) self.out.flush() def setWatchTroves(self, watchTroves=True): self.watchTroves = watchTroves self.updatePrompt() def getWatchTroves(self): return self.watchTroves def setTroveToWatch(self, jobId, troveTuple): self.troveToWatch = jobId, troveTuple self.updatePrompt() def _watchTrove(self, jobId, troveTuple): if not self.watchTroves: return False return self.troveToWatch == (jobId, troveTuple) def displayTroveStates(self): if not self.troveToWatch: return self.erasePrompt() job = self.client.getJob(self.troveToWatch[0]) query.displayTrovesByState(job, out=self.out) self.writePrompt() def setPrompt(self, promptFormat): self.promptFormat = promptFormat self.updatePrompt() def updateBuildLog(self, jobId, troveTuple): if not self._watchTrove(jobId, troveTuple): return mark = self.getMark(jobId, troveTuple) if mark is None: return try: moreData, data, mark = self.client.getTroveBuildLog(jobId, troveTuple, mark) except: return if data and data != '\n': self.erasePrompt() if data[0] == '\n': # we've already got a \n because we've cleared # the prompt. data = data[1:] self.out.write(data) if data[-1] != '\n': self.out.write('\n') self.writePrompt() if not moreData: mark = None self.setMark(jobId, troveTuple, mark) def getMark(self, jobId, troveTuple): if (jobId, troveTuple) not in self.buildingTroves: # display max 80 lines of back log self.buildingTroves[jobId, troveTuple] = -80 return self.buildingTroves[jobId, troveTuple] def setMark(self, jobId, troveTuple, mark): self.buildingTroves[jobId, troveTuple] = mark def _jobTrovesSet(self, jobId, troveList): self._msg('[%d] - job troves set' % jobId) self.troveToWatch = jobId, troveList[0] self.updatePrompt() def _jobStateUpdated(self, jobId, state, status): _AbstractDisplay._jobStateUpdated(self, jobId, state, status) state = buildjob.stateNames[state] if self._isFinished() and self.troveToWatch: self.updateBuildLog(*self.troveToWatch) self._msg('[%d] - State: %s' % (jobId, state)) if status: self._msg('[%d] - %s' % (jobId, status)) self.updatePrompt() def _jobLogUpdated(self, jobId, state, status): self._msg('[%d] %s' % (jobId, status)) def _troveStateUpdated(self, (jobId, troveTuple), state, status): isBuilding = (state == buildtrove.TroveState.BUILDING) state = buildtrove.stateNames[state] if troveTuple[3]: name = '%s{%s}' % (troveTuple[0], troveTuple[3]) else: name = troveTuple[0] self._msg('[%d] - %s - State: %s' % (jobId, name, state)) if status and self._watchTrove(jobId, troveTuple): self._msg('[%d] - %s - %s' % (jobId, name, status)) self.updatePrompt() def _troveLogUpdated(self, (jobId, troveTuple), state, status): if self._watchTrove(jobId, troveTuple): state = buildtrove.stateNames[state] self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status)) def _trovePreparingChroot(self, (jobId, troveTuple), host, path): if not self._watchTrove(jobId, troveTuple): return if host == '_local_': msg = 'Chroot at %s' % path else: msg = 'Chroot at Node %s:%s' % (host, path) self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg)) class OutBuffer(object): def __init__(self, fd): if fd is None: fd = sys.stdout.fileno() elif not isinstance(out, int): fd = out.fileno() self.fd = fd self.data = [] def write(self, data): self.data.append(data) def fileno(self): return self.fd def flush(self): while self.data: self.check() def check(self): while self.data: ready = select.select([], [self.fd], [], 0.1)[1] if not ready: return rc = os.write(self.fd, self.data[0]) if rc < len(self.data[0]): self.data[0] = self.data[0][rc:] else: self.data.pop(0) class DisplayState(object):#xmlrpc.BasicXMLRPCStatusSubscriber): def __init__(self, client): self.troves = [] self.states = {} self.buildingTroves = {} self.jobId = None self.client = client self.jobState = None def _primeOutput(self, jobId): #assert(not self.jobId) self.jobId = jobId job = self.client.getJob(jobId, withTroves=False) self.jobState = job.state if job.isBuilding() or job.isFinished() or job.isFailed(): self.updateTrovesForJob(jobId) def jobActive(self): return self.jobState in ( buildjob.JOB_STATE_STARTED, buildjob.JOB_STATE_LOADING, buildjob.JOB_STATE_LOADED, buildjob.JOB_STATE_BUILD, ) def getJobStateName(self): if self.jobState is None: return 'None' return buildjob.stateNames[self.jobState] def isFailed(self, jobId, troveTuple): return (self.getTroveState(jobId, troveTuple) == buildtrove.TroveState.FAILED) def isBuilding(self, jobId, troveTuple): return self.getTroveState(jobId, troveTuple) in ( buildtrove.TroveState.BUILDING, buildtrove.TroveState.PREPARING, buildtrove.TroveState.RESOLVING) def isFailed(self, jobId, troveTuple): # don't iterate through unbuildable - they are failures due to # secondary causes. return self.getTroveState(jobId, troveTuple) in ( buildtrove.TroveState.FAILED,) def findTroveByName(self, troveName): startsWith = None for jobId, troveTuple in sorted(self.states): if troveTuple[0].split(':', 1)[0] == troveName: # exact matches take priority return (jobId, troveTuple) elif troveTuple[0].startswith(troveName) and startsWith is None: startsWith = (jobId, troveTuple) return startsWith def getTroveState(self, jobId, troveTuple): return self.states[jobId, troveTuple] def getBuildingTroves(self): return [ x[0] for x in self.states.iteritems() if x[1] in (buildtrove.TroveState.BUILDING, buildtrove.TroveState.RESOLVING) ] def updateTrovesForJob(self, jobId): self.troves = [] self.states = {} for state, troveTupleList in self.client.listTrovesByState(jobId).items(): for troveTuple in troveTupleList: self.troves.append((jobId, troveTuple)) self.states[jobId, troveTuple] = state self.troves.sort() def _troveStateUpdated(self, (jobId, troveTuple), state, status): if (jobId, troveTuple) not in self.states: self.updateTrovesForJob(jobId) else: self.states[jobId, troveTuple] = state def _jobStateUpdated(self, jobId, state, status): self.jobState = state if self._isBuilding(): self.updateTrovesForJob(jobId) def _jobTrovesSet(self, jobId, troveList): self.updateTrovesForJob(jobId) def _isBuilding(self): return self.jobState in (buildjob.JOB_STATE_BUILD, buildjob.JOB_STATE_STARTED) def _isFinished(self): return self.jobState in ( buildjob.JOB_STATE_FAILED, buildjob.JOB_STATE_BUILT) class DisplayManager(object):#xmlrpc.BasicXMLRPCStatusSubscriber): displayClass = JobLogDisplay stateClass = DisplayState def __init__(self, client, showBuildLogs, out=None, exitOnFinish=None): self.termInfo = set_raw_mode() if out is None: out = open('/dev/tty', 'w') self.state = self.stateClass(client) self.display = self.displayClass(client, self.state, out) self.client = client self.troveToWatch = None self.troveIndex = 0 self.showBuildLogs = showBuildLogs if exitOnFinish is None: exitOnFinish = False self.exitOnFinish = exitOnFinish def _receiveEvents(self, *args, **kw): methodname = '_receiveEvents' method = getattr(self.state, methodname, None) if method: try: method(*args) except errors.uncatchableExceptions: raise except Exception, err: print 'Error in handler: %s\n%s' % (err, traceback.format_exc()) method = getattr(self.display, methodname, None) if method: try: method(*args) except errors.uncatchableExceptions: raise except Exception, err: print 'Error in handler: %s\n%s' % (err, traceback.format_exc()) return '' def getCurrentTrove(self): if self.state.troves: return self.state.troves[self.troveIndex] else: return None def _primeOutput(self, jobId): self.state._primeOutput(jobId) self.display._msg('Watching job %s' % jobId) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def displayTrove(self, jobId, troveTuple): self.display.setTroveToWatch(jobId, troveTuple) state = self.state.getTroveState(jobId, troveTuple) state = buildtrove.stateNames[state] def _serveLoopHook(self): ready = select.select([sys.stdin], [], [], 0.1)[0] if ready: cmd = sys.stdin.read(1) if cmd == '\x1b': cmd += sys.stdin.read(2) if cmd == ' ': self.do_switch_log() elif cmd == 'n' or cmd == '\x1b[C': self.do_next() elif cmd == 'p' or cmd == '\x1b[D': self.do_prev() elif cmd == 'q': sys.exit(0) elif cmd == 'h': self.do_help() elif cmd == 'b': self.do_next_building() elif cmd == 'f': self.do_next_failed() elif cmd == 'i': self.do_info() elif cmd == 'l': self.do_log() elif cmd == 's': self.do_status() elif cmd == 'g': self.do_goto() if self.showBuildLogs: for jobId, troveTuple in self.state.getBuildingTroves(): self.display.updateBuildLog(jobId, troveTuple) def do_next(self): if not self.state.troves: return self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def do_next_building(self): if not self.state.troves: return startIndex = self.troveIndex self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) while (not self.state.isBuilding(*self.getCurrentTrove()) and self.troveIndex != startIndex): self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.troveIndex != startIndex: self.displayTrove(*self.getCurrentTrove()) def do_goto(self): if not self.state.troves: print 'No troves loaded yet' return self.display.erasePrompt() restore_terminal(*self.termInfo) try: troveName = raw_input("\nName or part of name of trove: ") troveInfo = self.state.findTroveByName(troveName) if not troveInfo: print 'No trove starting with "%s"' % troveName self.display.writePrompt() return while not self.getCurrentTrove() == troveInfo: self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) self.displayTrove(*self.getCurrentTrove()) finally: self.termInfo = set_raw_mode() def do_next_failed(self): if not self.state.troves: return startIndex = self.troveIndex self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) while (not self.state.isFailed(*self.getCurrentTrove()) and self.troveIndex != startIndex): self.troveIndex = (self.troveIndex + 1) % len(self.state.troves) if self.troveIndex != startIndex: self.displayTrove(*self.getCurrentTrove()) def do_prev(self): if not self.state.troves: return self.troveIndex = (self.troveIndex - 1) % len(self.state.troves) if self.getCurrentTrove(): self.displayTrove(*self.getCurrentTrove()) def do_info(self): if not self.getCurrentTrove(): return jobId, troveTuple = self.getCurrentTrove() job = self.client.getJob(jobId) trove = job.getTrove(*troveTuple) dcfg = query.DisplayConfig(self.client, showTracebacks=True) self.display.setWatchTroves(False) self.display.erasePrompt() query.displayTroveDetail(dcfg, job, trove, out=self.display.out) self.display.writePrompt() def do_log(self): if not self.getCurrentTrove(): return jobId, troveTuple = self.getCurrentTrove() job = self.client.getJob(jobId) trove = job.getTrove(*troveTuple) moreData, data, mark = self.client.getTroveBuildLog(jobId, troveTuple, 0) if not data: self.display._msg('No log yet.') return fd, path = tempfile.mkstemp() os.fdopen(fd, 'w').write(data) try: os.system('less %s' % path) finally: os.remove(path) def do_help(self): print print "<space>: Turn on/off tailing of log" print "<left>/<right>: move to next/prev trove in list" print "b: move to next building trove" print "f: move to next failed trove" print "g: go to a particular trove" print "h: print help" print "i: display info for this trove" print "l: display log for this trove in less" print "q: quit" print "s: display status on all troves" def do_status(self): self.display.setWatchTroves(False) self.display.displayTroveStates() def do_switch_log(self): self.display.setWatchTroves(not self.display.getWatchTroves()) def _isFinished(self): return self.display._isFinished() def _shouldExit(self): return self._isFinished() and self.exitOnFinish def close(self): self.display.close() restore_terminal(*self.termInfo)
import socket PORT = 8090 MAX_OPEN_REQUESTS = 5 def process_client(clientsocket): print(clientsocket) data = clientsocket.recv(1024) print(data) web_contents = "<h1>Received</h1>" f = open("myhtml.html", "r") web_contents = f.read() f.close() web_headers = "HTTP/1.1 200" web_headers += "\n" + "Content-Type: text/html" web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents)) clientsocket.send(str.encode(web_headers + "\n\n" + web_contents)) clientsocket.close() serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) hostname = socket.gethostname() ip = socket.gethostbyname(hostname) hostname = "10.10.104.17" try: serversocket.bind((ip, PORT)) # become a server socket # MAX_OPEN_REQUESTS connect requests before refusing outside connections serversocket.listen(MAX_OPEN_REQUESTS) while True: # accept connections from outside print ("Waiting for connections at %s %i" % (hostname, PORT)) (clientsocket, address) = serversocket.accept() # now do something with the clientsocket # in this case, we'll pretend this is a non threaded server process_client(clientsocket) except socket.error: print("Problemas using port %i. Do you have permission?" % PORT)
from .inventory import ( GetInventoryRequest, Inventory, ListInventoriesRequest, ListInventoriesResponse, InventoryView, ) from .os_policy import OSPolicy from .os_policy_assignment_reports import ( GetOSPolicyAssignmentReportRequest, ListOSPolicyAssignmentReportsRequest, ListOSPolicyAssignmentReportsResponse, OSPolicyAssignmentReport, ) from .os_policy_assignments import ( CreateOSPolicyAssignmentRequest, DeleteOSPolicyAssignmentRequest, GetOSPolicyAssignmentRequest, ListOSPolicyAssignmentRevisionsRequest, ListOSPolicyAssignmentRevisionsResponse, ListOSPolicyAssignmentsRequest, ListOSPolicyAssignmentsResponse, OSPolicyAssignment, OSPolicyAssignmentOperationMetadata, UpdateOSPolicyAssignmentRequest, ) from .osconfig_common import FixedOrPercent from .patch_deployments import ( CreatePatchDeploymentRequest, DeletePatchDeploymentRequest, GetPatchDeploymentRequest, ListPatchDeploymentsRequest, ListPatchDeploymentsResponse, MonthlySchedule, OneTimeSchedule, PatchDeployment, PausePatchDeploymentRequest, RecurringSchedule, ResumePatchDeploymentRequest, UpdatePatchDeploymentRequest, WeekDayOfMonth, WeeklySchedule, ) from .patch_jobs import ( AptSettings, CancelPatchJobRequest, ExecStep, ExecStepConfig, ExecutePatchJobRequest, GcsObject, GetPatchJobRequest, GooSettings, Instance, ListPatchJobInstanceDetailsRequest, ListPatchJobInstanceDetailsResponse, ListPatchJobsRequest, ListPatchJobsResponse, PatchConfig, PatchInstanceFilter, PatchJob, PatchJobInstanceDetails, PatchRollout, WindowsUpdateSettings, YumSettings, ZypperSettings, ) from .vulnerability import ( CVSSv3, GetVulnerabilityReportRequest, ListVulnerabilityReportsRequest, ListVulnerabilityReportsResponse, VulnerabilityReport, ) __all__ = ( "GetInventoryRequest", "Inventory", "ListInventoriesRequest", "ListInventoriesResponse", "InventoryView", "OSPolicy", "GetOSPolicyAssignmentReportRequest", "ListOSPolicyAssignmentReportsRequest", "ListOSPolicyAssignmentReportsResponse", "OSPolicyAssignmentReport", "CreateOSPolicyAssignmentRequest", "DeleteOSPolicyAssignmentRequest", "GetOSPolicyAssignmentRequest", "ListOSPolicyAssignmentRevisionsRequest", "ListOSPolicyAssignmentRevisionsResponse", "ListOSPolicyAssignmentsRequest", "ListOSPolicyAssignmentsResponse", "OSPolicyAssignment", "OSPolicyAssignmentOperationMetadata", "UpdateOSPolicyAssignmentRequest", "FixedOrPercent", "CreatePatchDeploymentRequest", "DeletePatchDeploymentRequest", "GetPatchDeploymentRequest", "ListPatchDeploymentsRequest", "ListPatchDeploymentsResponse", "MonthlySchedule", "OneTimeSchedule", "PatchDeployment", "PausePatchDeploymentRequest", "RecurringSchedule", "ResumePatchDeploymentRequest", "UpdatePatchDeploymentRequest", "WeekDayOfMonth", "WeeklySchedule", "AptSettings", "CancelPatchJobRequest", "ExecStep", "ExecStepConfig", "ExecutePatchJobRequest", "GcsObject", "GetPatchJobRequest", "GooSettings", "Instance", "ListPatchJobInstanceDetailsRequest", "ListPatchJobInstanceDetailsResponse", "ListPatchJobsRequest", "ListPatchJobsResponse", "PatchConfig", "PatchInstanceFilter", "PatchJob", "PatchJobInstanceDetails", "PatchRollout", "WindowsUpdateSettings", "YumSettings", "ZypperSettings", "CVSSv3", "GetVulnerabilityReportRequest", "ListVulnerabilityReportsRequest", "ListVulnerabilityReportsResponse", "VulnerabilityReport", )
import pytest import numpy as np from test.zoo.pipeline.utils.test_utils import ZooTestCase from zoo.chronos.detector.anomaly.ae_detector import AEDetector class TestAEDetector(ZooTestCase): def setup_method(self, method): pass def teardown_method(self, method): pass def create_data(self): cycles = 10 time = np.arange(0, cycles * np.pi, 0.01) data = np.sin(time) data[600:800] = 10 return data def test_ae_fit_score_rolled_keras(self): y = self.create_data() ad = AEDetector(roll_len=314) ad.fit(y) anomaly_scores = ad.score() assert len(anomaly_scores) == len(y) anomaly_indexes = ad.anomaly_indexes() assert len(anomaly_indexes) == int(ad.ratio * len(y)) def test_ae_fit_score_rolled_pytorch(self): y = self.create_data() ad = AEDetector(roll_len=314, backend="torch") ad.fit(y) anomaly_scores = ad.score() assert len(anomaly_scores) == len(y) anomaly_indexes = ad.anomaly_indexes() assert len(anomaly_indexes) == int(ad.ratio * len(y)) def test_ae_fit_score_unrolled(self): y = self.create_data() ad = AEDetector(roll_len=0) ad.fit(y) anomaly_scores = ad.score() assert len(anomaly_scores) == len(y) anomaly_indexes = ad.anomaly_indexes() assert len(anomaly_indexes) == int(ad.ratio * len(y)) def test_corner_cases(self): y = self.create_data() ad = AEDetector(roll_len=314, backend="dummy") with pytest.raises(ValueError): ad.fit(y) ad = AEDetector(roll_len=314) with pytest.raises(RuntimeError): ad.score() y = np.array([1]) with pytest.raises(ValueError): ad.fit(y) y = self.create_data() y = y.reshape(2, -1) with pytest.raises(ValueError): ad.fit(y)
from __future__ import absolute_import, unicode_literals import os from django import VERSION as DJANGO_VERSION from django.utils.translation import ugettext_lazy as _ USE_MODELTRANSLATION = False ALLOWED_HOSTS = ['localhost', '127.0.0.1', '111.222.333.444'] TIME_ZONE = 'UTC' USE_TZ = True LANGUAGE_CODE = "en" LANGUAGES = ( ('en', _('English')), ) DEBUG = False SESSION_EXPIRE_AT_BROWSER_CLOSE = True SITE_ID = 1 USE_I18N = False AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",) FILE_UPLOAD_PERMISSIONS = 0o644 DATABASES = { "default": { # Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle". "ENGINE": "django.db.backends.", # DB name or path to database file if using sqlite3. "NAME": "cloudSolarDB", # Not used with sqlite3. "USER": "valia", # Not used with sqlite3. "PASSWORD": "scenetwork", # Set to empty string for localhost. Not used with sqlite3. "HOST": "localhost", # Set to empty string for default. Not used with sqlite3. "PORT": "5432", } } PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__)) PROJECT_APP = os.path.basename(PROJECT_APP_PATH) PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH) CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP STATIC_URL = "/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/")) MEDIA_URL = STATIC_URL + "media/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/")) ROOT_URLCONF = "%s.urls" % PROJECT_APP TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [ os.path.join(PROJECT_ROOT, "templates") ], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", "django.template.context_processors.debug", "django.template.context_processors.i18n", "django.template.context_processors.static", "django.template.context_processors.media", "django.template.context_processors.request", "django.template.context_processors.tz", "mezzanine.conf.context_processors.settings", "mezzanine.pages.context_processors.page", ], "builtins": [ "mezzanine.template.loader_tags", ], }, }, ] if DJANGO_VERSION < (1, 9): del TEMPLATES[0]["OPTIONS"]["builtins"] INSTALLED_APPS = ( "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.redirects", "django.contrib.sessions", "django.contrib.sites", "django.contrib.sitemaps", "django.contrib.staticfiles", "mezzanine.boot", "mezzanine.conf", "mezzanine.core", "mezzanine.generic", "mezzanine.pages", "mezzanine.blog", "mezzanine.forms", "mezzanine.galleries", "mezzanine.twitter", # "mezzanine.accounts", # "mezzanine.mobile", ) MIDDLEWARE_CLASSES = ( "mezzanine.core.middleware.UpdateCacheMiddleware", 'django.contrib.sessions.middleware.SessionMiddleware', # Uncomment if using internationalisation or localisation # 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', "mezzanine.core.request.CurrentRequestMiddleware", "mezzanine.core.middleware.RedirectFallbackMiddleware", "mezzanine.core.middleware.TemplateForDeviceMiddleware", "mezzanine.core.middleware.TemplateForHostMiddleware", "mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware", "mezzanine.core.middleware.SitePermissionMiddleware", # Uncomment the following if using any of the SSL settings: # "mezzanine.core.middleware.SSLRedirectMiddleware", "mezzanine.pages.middleware.PageMiddleware", "mezzanine.core.middleware.FetchFromCacheMiddleware", ) PACKAGE_NAME_FILEBROWSER = "filebrowser_safe" PACKAGE_NAME_GRAPPELLI = "grappelli_safe" OPTIONAL_APPS = ( "debug_toolbar", "django_extensions", "compressor", PACKAGE_NAME_FILEBROWSER, PACKAGE_NAME_GRAPPELLI, ) f = os.path.join(PROJECT_APP_PATH, "local_settings.py") if os.path.exists(f): import sys import imp module_name = "%s.local_settings" % PROJECT_APP module = imp.new_module(module_name) module.__file__ = f sys.modules[module_name] = module exec(open(f, "rb").read()) try: from mezzanine.utils.conf import set_dynamic_settings except ImportError: pass else: set_dynamic_settings(globals())
import pyautogui, win32api, win32con, ctypes, autoit from PIL import ImageOps, Image, ImageGrab from numpy import * import os import time import cv2 import random from Bot import * def main(): bot = Bot() autoit.win_wait(bot.title, 5) counter = 0 poitonUse = 0 cycle = True fullCounter = 0 while cycle: hpstatus = bot.checkOwnHp() print 'hp ' + str(hpstatus) if hpstatus == 0: autoit.control_send(bot.title, '', '{F9}', 0) bot.sleep(0.3,0.6) print 'Dead' cv2.imwrite('Dead' + str(int(time.time())) + '.png',bot.getScreen(leftCornerx,leftCornery,x2,fullY2)) cycle = False if hpstatus == 1: if poitonUse == 0: autoit.control_send(bot.title, '', '{F10}', 0) poitonUse += 1 if poitonUse > 5: poitonUse = 0 else: poitonUse = 0 res = bot.findHP(); print 'tgs ' + str(res) if res == 3: fullCounter += 1 print 'fc ' + str(fullCounter) autoit.control_send(bot.title, '', '{F1}', 0) else: fullCounter = 0 if fullCounter > 4: autoit.control_send(bot.title, '', '{ESC}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.1,0.3) autoit.control_send(bot.title, '', '{F1}', 0) # bot.mouseRotate() fullCounter = 0 if res > 0: autoit.control_send(bot.title, '', '{F1}', 0) counter = 0 if res == 1 or res == 3: bot.sleep(0.3,0.6) if res > 1 and res < 3: bot.sleep(1,3) if res == 1: autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F2}', 0) bot.sleep(0.3,0.6) autoit.control_send(bot.title, '', '{F1}', 0) else: fullCounter = 0 if counter < 3: autoit.control_send(bot.title, '', '{F3}', 0) bot.sleep(0.5,0.8) autoit.control_send(bot.title, '', '{F1}', 0) print 'F3' if counter > 2: # bot.findTarget() autoit.control_send(bot.title, '', '{F7}', 0) # if counter > 3: # autoit.control_send(bot.title, '', '{F8}', 0) # counter = 0 counter += 1 print 'cnt ' + str(counter) pass if __name__ == '__main__': main()
def fibs(): previous, current = 0, 1 while True: previous, current = current, previous + current yield current def problem2(bound): sum = 0 for n in fibs(): if n >= bound: break if n % 2 == 0: sum += n return sum print problem2(4000000)
from click.testing import CliRunner from twelve_tone.cli import main def test_main(): runner = CliRunner() result = runner.invoke(main, []) assert result.exit_code == 0
import numpy as np from numba import cuda, float32 from numba.cuda.testing import unittest, CUDATestCase def generate_input(n): A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32) B = np.array(np.arange(n) + 0, dtype=A.dtype) return A, B class TestCudaNonDet(CUDATestCase): def test_for_pre(self): """Test issue with loop not running due to bad sign-extension at the for loop precondition. """ @cuda.jit(argtypes=[float32[:, :], float32[:, :], float32[:]]) def diagproduct(c, a, b): startX, startY = cuda.grid(2) gridX = cuda.gridDim.x * cuda.blockDim.x gridY = cuda.gridDim.y * cuda.blockDim.y height = c.shape[0] width = c.shape[1] for x in range(startX, width, (gridX)): for y in range(startY, height, (gridY)): c[y, x] = a[y, x] * b[x] N = 8 A, B = generate_input(N) F = np.empty(A.shape, dtype=A.dtype) blockdim = (32, 8) griddim = (1, 1) dA = cuda.to_device(A) dB = cuda.to_device(B) dF = cuda.to_device(F, copy=False) diagproduct[griddim, blockdim](dF, dA, dB) E = np.dot(A, np.diag(B)) np.testing.assert_array_almost_equal(dF.copy_to_host(), E) if __name__ == '__main__': unittest.main()
from setuptools import setup, find_packages setup( name = 'wechat-python-sdk', version = '0.5.7', keywords = ('wechat', 'sdk', 'wechat sdk'), description = u'微信公众平台Python开发包', long_description = open("README.rst").read(), license = 'BSD License', url = 'https://github.com/doraemonext/wechat-python-sdk', author = 'doraemonext', author_email = 'doraemonext@gmail.com', packages = find_packages(), include_package_data = True, platforms = 'any', install_requires=open("requirements.txt").readlines(), )
import sys import itertools from functools import reduce from operator import iadd import numpy from PyQt4.QtGui import ( QFormLayout, QGraphicsRectItem, QGraphicsGridLayout, QFontMetrics, QPen, QIcon, QPixmap, QLinearGradient, QPainter, QColor, QBrush, QTransform, QGraphicsWidget, QApplication ) from PyQt4.QtCore import Qt, QRect, QRectF, QSize, QPointF from PyQt4.QtCore import pyqtSignal as Signal import pyqtgraph as pg import Orange.data import Orange.misc from Orange.clustering import hierarchical from Orange.widgets import widget, gui, settings from Orange.widgets.utils import itemmodels, colorbrewer from .owhierarchicalclustering import DendrogramWidget, GraphicsSimpleTextList from Orange.widgets.io import FileFormat def _remove_item(item): item.setParentItem(None) scene = item.scene() if scene is not None: scene.removeItem(item) class DistanceMapItem(pg.ImageItem): """A distance matrix image with user selectable regions. """ class SelectionRect(QGraphicsRectItem): def boundingRect(self): return super().boundingRect().adjusted(-1, -1, 1, 1) def paint(self, painter, option, widget=None): t = painter.transform() rect = t.mapRect(self.rect()) painter.save() painter.setTransform(QTransform()) pwidth = self.pen().widthF() painter.setPen(self.pen()) painter.drawRect(rect.adjusted(pwidth, -pwidth, -pwidth, pwidth)) painter.restore() selectionChanged = Signal() Clear, Select, Commit = 1, 2, 4 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setAcceptedMouseButtons(Qt.LeftButton | Qt.RightButton) self.setAcceptHoverEvents(True) self.__selections = [] #: (QGraphicsRectItem, QRectF) | None self.__dragging = None def __select(self, area, command): if command & self.Clear: self.__clearSelections() if command & self.Select: area = area.normalized() intersects = [rect.intersects(area) for item, rect in self.__selections] def partition(predicate, iterable): t1, t2 = itertools.tee(iterable) return (itertools.filterfalse(predicate, t1), filter(predicate, t2)) def intersects(selection): _, selarea = selection return selarea.intersects(area) disjoint, intersection = partition(intersects, self.__selections) disjoint = list(disjoint) intersection = list(intersection) # merge intersecting selections into a single area area = reduce(QRect.united, (area for _, area in intersection), area) visualarea = self.__visualRectForSelection(area) item = DistanceMapItem.SelectionRect(visualarea, self) item.setPen(QPen(Qt.red, 0)) selection = disjoint + [(item, area)] for item, _ in intersection: _remove_item(item) self.__selections = selection self.selectionChanged.emit() def __elastic_band_select(self, area, command): if command & self.Clear and self.__dragging: item, area = self.__dragging _remove_item(item) self.__dragging = None if command & self.Select: if self.__dragging: item, _ = self.__dragging else: item = DistanceMapItem.SelectionRect(self) item.setPen(QPen(Qt.red, 0)) # intersection with existing regions intersection = [(item, selarea) for item, selarea in self.__selections if area.intersects(selarea)] fullarea = reduce( QRect.united, (selarea for _, selarea in intersection), area ) visualarea = self.__visualRectForSelection(fullarea) item.setRect(visualarea) self.__dragging = item, area if command & self.Commit and self.__dragging: item, area = self.__dragging self.__select(area, self.Select) def mousePressEvent(self, event): if event.button() == Qt.LeftButton: r, c = self._cellAt(event.pos()) if r != -1 and c != -1: # Clear existing selection # TODO: Fix extended selection. self.__select(QRect(), self.Clear) selrange = QRect(c, r, 1, 1) self.__elastic_band_select(selrange, self.Select | self.Clear) elif event.button() == Qt.RightButton: self.__select(QRect(), self.Clear) super().mousePressEvent(event) event.accept() def mouseMoveEvent(self, event): if event.buttons() & Qt.LeftButton and self.__dragging: r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton)) r2, c2 = self._cellCloseTo(event.pos()) selrange = QRect(c1, r1, 1, 1).united(QRect(c2, r2, 1, 1)) self.__elastic_band_select(selrange, self.Select) super().mouseMoveEvent(event) event.accept() def mouseReleaseEvent(self, event): if event.button() == Qt.LeftButton and self.__dragging: r1, c1 = self._cellAt(event.buttonDownPos(Qt.LeftButton)) r2, c2 = self._cellCloseTo(event.pos()) selrange = QRect(c1, r1, 1, 1).united(QRect(c2, r2, 1, 1)) self.__elastic_band_select(selrange, self.Select | self.Commit) self.__elastic_band_select(QRect(), self.Clear) super().mouseReleaseEvent(event) event.accept() def _cellAt(self, pos): """Return the i, j cell index at `pos` in local coordinates.""" if self.image is None: return -1, -1 else: h, w = self.image.shape i, j = numpy.floor([pos.y(), pos.x()]) if 0 <= i < h and 0 <= j < w: return int(i), int(j) else: return -1, -1 def _cellCloseTo(self, pos): """Return the i, j cell index closest to `pos` in local coordinates.""" if self.image is None: return -1, -1 else: h, w = self.image.shape i, j = numpy.floor([pos.y(), pos.x()]) i = numpy.clip(i, 0, h - 1) j = numpy.clip(j, 0, w - 1) return int(i), int(j) def __clearSelections(self): for item, _ in self.__selections: _remove_item(item) self.__selections = [] def __visualRectForSelection(self, rect): h, w = self.image.shape rect = rect.normalized() rect = rect.intersected(QRect(0, 0, w, h)) r1, r2 = rect.top(), rect.bottom() + 1 c1, c2 = rect.left(), rect.right() + 1 return QRectF(QPointF(c1, r1), QPointF(c2, r2)) def __selectionForArea(self, area): r1, c1 = self._cellAt(area.topLeft()) r2, c2 = self._cellAt(area.bottomRight()) selarea = QRect(c1, r1, c2 - c1 + 1, r2 - r1 + 1) return selarea.normalized() def selections(self): selections = [self.__selectionForArea(area) for _, area in self.__selections] return [(range(r.top(), r.bottom() + 1), range(r.left(), r.right() + 1)) for r in selections] def hoverMoveEvent(self, event): super().hoverMoveEvent(event) i, j = self._cellAt(event.pos()) if i != -1 and j != -1: d = self.image[i, j] self.setToolTip("{}, {}: {:.3f}".format(i, j, d)) else: self.setToolTip("") _color_palettes = sorted(colorbrewer.colorSchemes["sequential"].items()) + \ [("Blue-Yellow", {2: [(0, 0, 255), (255, 255, 0)]})] _default_colormap_index = len(_color_palettes) - 1 class OWDistanceMap(widget.OWWidget): name = "Distance Map" description = "Visualize a distance matrix." icon = "icons/DistanceMap.svg" priority = 1200 inputs = [("Distances", Orange.misc.DistMatrix, "set_distances")] outputs = [("Data", Orange.data.Table), ("Features", widget.AttributeList)] settingsHandler = settings.PerfectDomainContextHandler() #: type of ordering to apply to matrix rows/columns NoOrdering, Clustering, OrderedClustering = 0, 1, 2 sorting = settings.Setting(NoOrdering) colormap = settings.Setting(_default_colormap_index) color_gamma = settings.Setting(0.0) color_low = settings.Setting(0.0) color_high = settings.Setting(1.0) annotation_idx = settings.ContextSetting(0, exclude_metas=False) autocommit = settings.Setting(True) graph_name = "grid_widget" # Disable clustering for inputs bigger than this _MaxClustering = 3000 # Disable cluster leaf ordering for inputs bigger than this _MaxOrderedClustering = 1000 def __init__(self): super().__init__() self.matrix = None self._tree = None self._ordered_tree = None self._sorted_matrix = None self._sort_indices = None self._selection = None box = gui.widgetBox(self.controlArea, "Element sorting", margin=0) self.sorting_cb = gui.comboBox( box, self, "sorting", items=["None", "Clustering", "Clustering with ordered leaves"], callback=self._invalidate_ordering) box = gui.widgetBox(self.controlArea, "Colors") self.colormap_cb = gui.comboBox( box, self, "colormap", callback=self._update_color ) self.colormap_cb.setIconSize(QSize(64, 16)) self.palettes = list(_color_palettes) init_color_combo(self.colormap_cb, self.palettes, QSize(64, 16)) self.colormap_cb.setCurrentIndex(self.colormap) form = QFormLayout( formAlignment=Qt.AlignLeft, labelAlignment=Qt.AlignLeft, fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow ) form.addRow( "Low", gui.hSlider(box, self, "color_low", minValue=0.0, maxValue=1.0, step=0.05, ticks=True, intOnly=False, createLabel=False, callback=self._update_color) ) form.addRow( "High", gui.hSlider(box, self, "color_high", minValue=0.0, maxValue=1.0, step=0.05, ticks=True, intOnly=False, createLabel=False, callback=self._update_color) ) box.layout().addLayout(form) box = gui.widgetBox(self.controlArea, "Annotations") self.annot_combo = gui.comboBox(box, self, "annotation_idx", callback=self._invalidate_annotations, contentsLength=12) self.annot_combo.setModel(itemmodels.VariableListModel()) self.annot_combo.model()[:] = ["None", "Enumeration"] self.controlArea.layout().addStretch() gui.auto_commit(self.controlArea, self, "autocommit", "Send data", "Auto send is on") self.inline_graph_report() self.view = pg.GraphicsView(background="w") self.mainArea.layout().addWidget(self.view) self.grid_widget = pg.GraphicsWidget() self.grid = QGraphicsGridLayout() self.grid_widget.setLayout(self.grid) self.viewbox = pg.ViewBox(enableMouse=False, enableMenu=False) self.viewbox.setAcceptedMouseButtons(Qt.NoButton) self.viewbox.setAcceptHoverEvents(False) self.grid.addItem(self.viewbox, 1, 1) self.left_dendrogram = DendrogramWidget( self.grid_widget, orientation=DendrogramWidget.Left, selectionMode=DendrogramWidget.NoSelection, hoverHighlightEnabled=False ) self.left_dendrogram.setAcceptedMouseButtons(Qt.NoButton) self.left_dendrogram.setAcceptHoverEvents(False) self.top_dendrogram = DendrogramWidget( self.grid_widget, orientation=DendrogramWidget.Top, selectionMode=DendrogramWidget.NoSelection, hoverHighlightEnabled=False ) self.top_dendrogram.setAcceptedMouseButtons(Qt.NoButton) self.top_dendrogram.setAcceptHoverEvents(False) self.grid.addItem(self.left_dendrogram, 1, 0) self.grid.addItem(self.top_dendrogram, 0, 1) self.right_labels = TextList( alignment=Qt.AlignLeft) self.bottom_labels = TextList( orientation=Qt.Horizontal, alignment=Qt.AlignRight) self.grid.addItem(self.right_labels, 1, 2) self.grid.addItem(self.bottom_labels, 2, 1) self.view.setCentralItem(self.grid_widget) self.left_dendrogram.hide() self.top_dendrogram.hide() self.right_labels.hide() self.bottom_labels.hide() self.matrix_item = None self.dendrogram = None self.grid_widget.scene().installEventFilter(self) def set_distances(self, matrix): self.closeContext() self.clear() self.error(0) if matrix is not None: N, _ = matrix.shape if N < 2: self.error(0, "Empty distance matrix.") matrix = None self.matrix = matrix if matrix is not None: self.set_items(matrix.row_items, matrix.axis) else: self.set_items(None) if matrix is not None: N, _ = matrix.shape else: N = 0 model = self.sorting_cb.model() item = model.item(2) msg = None if N > OWDistanceMap._MaxOrderedClustering: item.setFlags(item.flags() & ~Qt.ItemIsEnabled) if self.sorting == OWDistanceMap.OrderedClustering: self.sorting = OWDistanceMap.Clustering msg = "Cluster ordering was disabled due to the input " \ "matrix being to big" else: item.setFlags(item.flags() | Qt.ItemIsEnabled) item = model.item(1) if N > OWDistanceMap._MaxClustering: item.setFlags(item.flags() & ~Qt.ItemIsEnabled) if self.sorting == OWDistanceMap.Clustering: self.sorting = OWDistanceMap.NoOrdering msg = "Clustering was disabled due to the input " \ "matrix being to big" else: item.setFlags(item.flags() | Qt.ItemIsEnabled) self.information(1, msg) def set_items(self, items, axis=1): self.items = items model = self.annot_combo.model() if items is None: model[:] = ["None", "Enumeration"] elif not axis: model[:] = ["None", "Enumeration", "Attribute names"] elif isinstance(items, Orange.data.Table): annot_vars = list(items.domain) + list(items.domain.metas) model[:] = ["None", "Enumeration"] + annot_vars self.annotation_idx = 0 self.openContext(items.domain) elif isinstance(items, list) and \ all(isinstance(item, Orange.data.Variable) for item in items): model[:] = ["None", "Enumeration", "Name"] else: model[:] = ["None", "Enumeration"] self.annotation_idx = min(self.annotation_idx, len(model) - 1) def clear(self): self.matrix = None self.cluster = None self._tree = None self._ordered_tree = None self._sorted_matrix = None self._selection = [] self._clear_plot() def handleNewSignals(self): if self.matrix is not None: self._update_ordering() self._setup_scene() self._update_labels() self.unconditional_commit() def _clear_plot(self): def remove(item): item.setParentItem(None) item.scene().removeItem(item) if self.matrix_item is not None: self.matrix_item.selectionChanged.disconnect( self._invalidate_selection) remove(self.matrix_item) self.matrix_item = None self._set_displayed_dendrogram(None) self._set_labels(None) def _cluster_tree(self): if self._tree is None: self._tree = hierarchical.dist_matrix_clustering(self.matrix) return self._tree def _ordered_cluster_tree(self): if self._ordered_tree is None: tree = self._cluster_tree() self._ordered_tree = \ hierarchical.optimal_leaf_ordering(tree, self.matrix) return self._ordered_tree def _setup_scene(self): self._clear_plot() self.matrix_item = DistanceMapItem(self._sorted_matrix) # Scale the y axis to compensate for pg.ViewBox's y axis invert self.matrix_item.scale(1, -1) self.viewbox.addItem(self.matrix_item) # Set fixed view box range. h, w = self._sorted_matrix.shape self.viewbox.setRange(QRectF(0, -h, w, h), padding=0) self.matrix_item.selectionChanged.connect(self._invalidate_selection) if self.sorting == OWDistanceMap.NoOrdering: tree = None elif self.sorting == OWDistanceMap.Clustering: tree = self._cluster_tree() elif self.sorting == OWDistanceMap.OrderedClustering: tree = self._ordered_cluster_tree() self._set_displayed_dendrogram(tree) self._update_color() def _set_displayed_dendrogram(self, root): self.left_dendrogram.set_root(root) self.top_dendrogram.set_root(root) self.left_dendrogram.setVisible(root is not None) self.top_dendrogram.setVisible(root is not None) constraint = 0 if root is None else -1 # 150 self.left_dendrogram.setMaximumWidth(constraint) self.top_dendrogram.setMaximumHeight(constraint) def _invalidate_ordering(self): self._sorted_matrix = None if self.matrix is not None: self._update_ordering() self._setup_scene() def _update_ordering(self): if self.sorting == OWDistanceMap.NoOrdering: self._sorted_matrix = self.matrix self._sort_indices = None else: if self.sorting == OWDistanceMap.Clustering: tree = self._cluster_tree() elif self.sorting == OWDistanceMap.OrderedClustering: tree = self._ordered_cluster_tree() leaves = hierarchical.leaves(tree) indices = numpy.array([leaf.value.index for leaf in leaves]) X = self.matrix self._sorted_matrix = X[indices[:, numpy.newaxis], indices[numpy.newaxis, :]] self._sort_indices = indices def _invalidate_annotations(self): if self.matrix is not None: self._update_labels() def _update_labels(self, ): if self.annotation_idx == 0: labels = None elif self.annotation_idx == 1: labels = [str(i + 1) for i in range(self.matrix.shape[0])] elif self.annot_combo.model()[self.annotation_idx] == "Attribute names": attr = self.matrix.row_items.domain.attributes labels = [str(attr[i]) for i in range(self.matrix.shape[0])] elif self.annotation_idx == 2 and \ isinstance(self.items, widget.AttributeList): labels = [v.name for v in self.items] elif isinstance(self.items, Orange.data.Table): var = self.annot_combo.model()[self.annotation_idx] column, _ = self.items.get_column_view(var) labels = [var.repr_val(value) for value in column] self._set_labels(labels) def _set_labels(self, labels): self._labels = labels if labels and self.sorting != OWDistanceMap.NoOrdering: sortind = self._sort_indices labels = [labels[i] for i in sortind] for textlist in [self.right_labels, self.bottom_labels]: textlist.set_labels(labels or []) textlist.setVisible(bool(labels)) constraint = -1 if labels else 0 self.right_labels.setMaximumWidth(constraint) self.bottom_labels.setMaximumHeight(constraint) def _update_color(self): if self.matrix_item: name, colors = self.palettes[self.colormap] n, colors = max(colors.items()) colors = numpy.array(colors, dtype=numpy.ubyte) low, high = self.color_low * 255, self.color_high * 255 points = numpy.linspace(low, high, n) space = numpy.linspace(0, 255, 255) r = numpy.interp(space, points, colors[:, 0], left=255, right=0) g = numpy.interp(space, points, colors[:, 1], left=255, right=0) b = numpy.interp(space, points, colors[:, 2], left=255, right=0) colortable = numpy.c_[r, g, b] self.matrix_item.setLookupTable(colortable) def _invalidate_selection(self): ranges = self.matrix_item.selections() ranges = reduce(iadd, ranges, []) indices = reduce(iadd, ranges, []) if self.sorting != OWDistanceMap.NoOrdering: sortind = self._sort_indices indices = [sortind[i] for i in indices] self._selection = list(sorted(set(indices))) self.commit() def commit(self): datasubset = None featuresubset = None if not self._selection: pass elif isinstance(self.items, Orange.data.Table): indices = self._selection if self.matrix.axis == 1: datasubset = self.items.from_table_rows(self.items, indices) elif self.matrix.axis == 0: domain = Orange.data.Domain( [self.items.domain[i] for i in indices], self.items.domain.class_vars, self.items.domain.metas) datasubset = Orange.data.Table.from_table(domain, self.items) elif isinstance(self.items, widget.AttributeList): subset = [self.items[i] for i in self._selection] featuresubset = widget.AttributeList(subset) self.send("Data", datasubset) self.send("Features", featuresubset) def onDeleteWidget(self): super().onDeleteWidget() self.clear() def send_report(self): annot = self.annot_combo.currentText() if self.annotation_idx <= 1: annot = annot.lower() self.report_items(( ("Sorting", self.sorting_cb.currentText().lower()), ("Annotations", annot) )) if self.matrix is not None: self.report_plot() class TextList(GraphicsSimpleTextList): def resizeEvent(self, event): super().resizeEvent(event) self._updateFontSize() def _updateFontSize(self): crect = self.contentsRect() if self.orientation == Qt.Vertical: h = crect.height() else: h = crect.width() n = len(getattr(self, "label_items", [])) if n == 0: return if self.scene() is not None: maxfontsize = self.scene().font().pointSize() else: maxfontsize = QApplication.instance().font().pointSize() lineheight = max(1, h / n) fontsize = min(self._point_size(lineheight), maxfontsize) font = self.font() font.setPointSize(fontsize) self.setFont(font) self.layout().invalidate() self.layout().activate() def _point_size(self, height): font = self.font() font.setPointSize(height) fix = 0 while QFontMetrics(font).lineSpacing() > height and height - fix > 1: fix += 1 font.setPointSize(height - fix) return height - fix def palette_gradient(colors, discrete=False): n = len(colors) stops = numpy.linspace(0.0, 1.0, n, endpoint=True) gradstops = [(float(stop), color) for stop, color in zip(stops, colors)] grad = QLinearGradient(QPointF(0, 0), QPointF(1, 0)) grad.setStops(gradstops) return grad def palette_pixmap(colors, size): img = QPixmap(size) img.fill(Qt.transparent) painter = QPainter(img) grad = palette_gradient(colors) grad.setCoordinateMode(QLinearGradient.ObjectBoundingMode) painter.setPen(Qt.NoPen) painter.setBrush(QBrush(grad)) painter.drawRect(0, 0, size.width(), size.height()) painter.end() return img def init_color_combo(cb, palettes, iconsize): cb.clear() iconsize = cb.iconSize() for name, palette in palettes: n, colors = max(palette.items()) colors = [QColor(*c) for c in colors] cb.addItem(QIcon(palette_pixmap(colors, iconsize)), name, palette) def test(argv=sys.argv): app = QApplication(list(argv)) argv = app.arguments() if len(argv) > 1: filename = argv[1] else: filename = "iris" import sip import Orange.distance w = OWDistanceMap() w.show() w.raise_() data = Orange.data.Table(filename) dist = Orange.distance.Euclidean(data) w.set_distances(dist) w.handleNewSignals() rval = app.exec_() w.set_distances(None) w.saveSettings() w.onDeleteWidget() sip.delete(w) del w return rval if __name__ == "__main__": sys.exit(test())
from rdflib import Graph, BNode, Literal, URIRef from rdflib.namespace import FOAF from flask import Flask import flask_rdf import random app = Flask(__name__) custom_formatter = flask_rdf.FormatSelector() custom_formatter.wildcard_mimetype = 'text/plain' custom_formatter.add_format('text/plain', 'turtle') custom_decorator = flask_rdf.flask.Decorator(custom_formatter) @app.route('/') @app.route('/<path:path>') @custom_decorator def random_age(path=''): graph = Graph('IOMemory', BNode()) graph.add((URIRef(path), FOAF.age, Literal(random.randint(20, 50)))) return graph if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
""" Commands for X-ray Diffraction Note that an XRD camera must be installed! """ def setup_epics_shutter(prefix='13MARCCD4:'): """ Setup Epics shutter for CCD camera open /close pv = 13IDA:m70.VAL (SSA H WID) open val = 0.080, close val = -0.020 """ caput(prefix+'cam1:ShutterOpenEPICS.OUT', '13IDA:m70.VAL') caput(prefix+'cam1:ShutterCloseEPICS.OUT', '13IDA:m70.VAL') caput(prefix+'cam1:ShutterOpenEPICS.OCAL', '0.080') caput(prefix+'cam1:ShutterCloseEPICS.OCAL', '-0.020') caput(prefix+'cam1:ShutterOpenDelay', 1.50) caput(prefix+'cam1:ShutterCloseDelay', 0.0) caput(prefix+'cam1:ShutterMode', 1) def clear_epics_shutter(prefix='13MARCCD4:'): """ Clear Epics shutter PV for CCD camera """ caput(prefix+'cam1:ShutterOpenEPICS.OUT', '') caput(prefix+'cam1:ShutterCloseEPICS.OUT', '') caput(prefix+'cam1:ShutterOpenEPICS.OCAL', '0') caput(prefix+'cam1:ShutterCloseEPICS.OCAL', '0') caput(prefix+'cam1:ShutterOpenDelay', 0.1) caput(prefix+'cam1:ShutterCloseDelay', 0.1) caput(prefix+'cam1:ShutterMode', 0) def close_ccd_shutter(): caput('13IDA:m70.VAL', -0.025, wait=True) sleep(1.0) def open_ccd_shutter(): caput('13IDA:m70.VAL', 0.080, wait=True) sleep(1.0) def save_xrd(name, t=10, ext=None, prefix='13PEL1:', timeout=60.0): """ Save XRD image from XRD camera. Parameters: name (string): name of datafile t (float): exposure time in seconds [default= 10] ext (int or None): number for file extension if left as None, the extension will be auto-incremented. prefix (string): PV prefix for areaDetector camera ['13PE1:'] timeout (float): maximumn time in seconds to wait for image to be saved [60] Examples: save_xrd('CeO2', t=20) Note: calls one of `save_xrd_marccd` or `save_xrd_pe` See Also: `save_xrd_marccd`, `save_xrd_pe` """ if 'mar' in prefix.lower(): save_xrd_marccd(name, t=t, ext=ext, prefix=prefix) else: save_xrd_pe(name, t=t, ext=ext, prefix=prefix) #endif def save_xrd_pe(name, t=10, ext=None, prefix='13PEL1:', timeout=60.0): """ Save XRD image from Perkin-Elmer camera. Parameters: name (string): name of datafile t (float): exposure time in seconds [default= 10] ext (int or None): number for file extension if left as None, the extension will be auto-incremented. prefix (string): PV prefix for areaDetector camera ['13PE1:'] timeout (float): maximumn time in seconds to wait for image to be saved [60] Examples: save_xrd_pe('CeO2', t=20) Note: detector pool PE detector has prefix like 'dp_pe2:' """ # prefix='dp_pe2:' # save shutter mode, disable shutter for now shutter_mode = caget(prefix+'cam1:ShutterMode') caput(prefix+'cam1:ShutterMode', 0) sleep(0.1) caput(prefix+'cam1:Acquire', 0) sleep(0.1) print("Save XRD...") caput(prefix+'TIFF1:EnableCallbacks', 0) caput(prefix+'TIFF1:AutoSave', 0) caput(prefix+'TIFF1:AutoIncrement', 1) caput(prefix+'TIFF1:FileName', name) if ext is not None: caput(prefix+'TIFF1:FileNumber', ext) #endif caput(prefix+'TIFF1:EnableCallbacks', 1) caput(prefix+'cam1:ImageMode', 3) sleep(0.5) acq_time =caget(prefix+'cam1:AcquireTime') numimages = int(t*1.0/acq_time) caput(prefix+'cam1:NumImages', numimages) # expose caput(prefix+'cam1:Acquire', 1) sleep(0.5 + max(0.5, 0.5*t)) t0 = clock() nrequested = caget(prefix+'cam1:NumImages') print('Wait for Acquire ... %i' % nrequested) while ((1 == caget(prefix+'cam1:Acquire')) and (clock()-t0 < timeout)): sleep(0.25) #endwhile print('Acquire Done, writing file %s' % name) sleep(0.1) # clean up, returning to short dwell time caput(prefix+'TIFF1:WriteFile', 1) caput(prefix+'TIFF1:EnableCallbacks', 0) sleep(0.5) caput(prefix+'cam1:ImageMode', 2) caput(prefix+'cam1:ShutterMode', shutter_mode) sleep(0.5) caput(prefix+'cam1:Acquire', 1) sleep(1.5) def save_xrd_marccd(name, t=10, ext=None, prefix='13MARCCD4:', timeout=60.0): """ save XRD image from MARCCD (Rayonix 165) camera to file Parameters: name (string): name of datafile t (float): exposure time in seconds [default= 10] ext (int or None): number for file extension if left as None, the extension will be auto-incremented. prefix (string): PV prefix for areaDetector camera ['13MARCCD1:'] timeout (float): maximumn time in seconds to wait for image to be saved [60] Examples: save_xrd_marccd('CeO2', t=20) Note: The marccd requires the Epics Shutter to be set up correctly. """ start_time = systime() # save shutter mode, disable shutter for now shutter_mode = caget(prefix+'cam1:ShutterMode') # NOTE: Need to start acquisition with the shutter # having been closed for awhile # using the SSA H Width as shutter we want # NOTE: Need to start acquisition with the shutter # having been closed for awhile # using the SSA H Width as shutter we want caput(prefix+'cam1:ShutterControl', 0) close_ccd_shutter() caput(prefix+'cam1:FrameType', 0) caput(prefix+'cam1:ImageMode', 0) caput(prefix+'cam1:AutoSave', 1) caput(prefix+'cam1:AutoIncrement', 1) caput(prefix+'cam1:FileName', name) if ext is not None: caput(prefix+'cam1:FileNumber', ext) #endif caput(prefix+'cam1:AcquireTime', t) sleep(0.1) # expose caput(prefix+'cam1:Acquire', 1) sleep(1.0 + max(1.0, t)) t0 = systime() print('Wait for Acquire ... ') while ((1 == caget(prefix+'cam1:Acquire')) and (clock()-t0 < timeout)): sleep(0.25) #endwhile fname = caget(prefix+'cam1:FullFileName_RBV', as_string=True) print('Acquire Done! %.3f sec' % (systime()-start_time)) print('Wrote %s' % fname) sleep(1.0) caput(prefix+'cam1:ShutterControl', 1) def xrd_at(posname, t): move_samplestage(posname, wait=True) save_xrd(posname, t=t, ext=1) def xrd_bgr_marccd(prefix='13MARCCD4:', timeout=120.0): """ collect XRD Background for marccd Parameters: prefix (string): PV prefix for camera ['13MARCCD1:'] timeout (float): maximum time to wait [120] """ caput(prefix+'cam1:ShutterControl', 0) caput(prefix+'cam1:FrameType', 1) sleep(0.1) caput(prefix+'cam1:Acquire', 1) sleep(3) t0 = clock() print('Wait for Acquire ... ') while ((1 == caget(prefix+'cam1:Acquire')) and (clock()-t0 < timeout)): sleep(0.25) #endwhile sleep(2.0) def xrd_bgr(prefix='13PEL1:'): """ collect XRD Background for Perkin Elmer Parameters: prefix (string): PV prefix for camera ['13MARCCD1:'] """ caput(prefix+'cam1:ShutterMode', 1) immode = caget(prefix+'cam1:ImageMode') caput(prefix+'cam1:ImageMode', 1) caput(prefix+'cam1:ShutterControl', 0) sleep(3) caput(prefix+'cam1:PEAcquireOffset', 1) sleep(5) caput(prefix+'cam1:ShutterControl', 1) caput(prefix+'cam1:ImageMode', immode) caput(prefix+'cam1:Acquire', 1) sleep(2.0)
import os import sys import test_common def main(): ast = test_common.Ami() ast.username = sys.argv[1] ast.password = sys.argv[2] if ast.conn() == False: print("Could not connect.") return 1 # create dlma ret = ast.sendCmd("OutQueueCreate", Name="TestDlma", Detail="TestDetail") res = ret if res[0]["Response"] != "Success": print("Couldn not pass the test_queue. res[%s]" % res) raise "test_queue" for i in range(10): evt = ast.recvEvt() if evt["Event"] == "OutQueueCreate": break if evt["Name"] != "TestDlma" or evt["Detail"] != "TestDetail": print("Couldn not pass the test_queue. ret[%s]" % evt) raise "test_queue" test_uuid = evt["Uuid"] # get dlma ret = ast.sendCmd("OutQueueShow", Uuid=test_uuid) flg = False for i in range(len(ret)): msg = ret[i] if "Uuid" not in msg: continue if msg["Uuid"] == test_uuid: flg = True break if flg == False: print("Couldn not pass the test_queue. ret[%s]" % ret) raise "test_queue" # delete dlma ret = ast.sendCmd("OutQueueDelete", Uuid=test_uuid) if ret[0]["Response"] != "Success": print("Couldn not pass the test_queue. ret[%s]" % ret) raise "test_queue" for i in range(10): ret = ast.recvEvt() if ret["Event"] == "OutQueueDelete": break if ret["Uuid"] != test_uuid: print("Could not pass the test_queue. ret[%s]" % ret) raise "test_queue" # get campaign ret = ast.sendCmd("OutQueueShow", Uuid=test_uuid) for i in range(len(ret)): msg = ret[i] if "Uuid" not in msg: continue if msg["Uuid"] == test_uuid: print("Could not pass the test_queue. ret[%s], uuid[%s]" % (ret, test_uuid)) raise "test_queue" return 0 if __name__ == '__main__': main()
from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'chart_title01.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of an XlsxWriter file with default title.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'column'}) chart.axis_ids = [46165376, 54462720] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) worksheet.write_column('C1', data[2]) chart.add_series({'values': '=Sheet1!$A$1:$A$5', 'name': 'Foo'}) chart.set_title({'none': True}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
from __future__ import unicode_literals import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Car', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('car_model', models.CharField(max_length=20)), ('color', models.CharField(max_length=20)), ('year', models.SmallIntegerField(help_text='Use year as YYYY.', validators=[django.core.validators.RegexValidator('^[0-9]{4}$', 'Year in invalid format!', 'invalid')])), ('mileage', models.IntegerField(default=0, help_text='Or your car is brand new or it have some mileage traveled', validators=[django.core.validators.MinValueValidator(0)])), ], ), migrations.CreateModel( name='OilChange', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(verbose_name='date changed')), ('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])), ('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')), ], ), migrations.CreateModel( name='Refuel', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField(verbose_name='date refueled')), ('liters', models.DecimalField(decimal_places=3, max_digits=7)), ('fuel_price', models.DecimalField(decimal_places=2, max_digits=4)), ('mileage', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)])), ('fuel_type', models.CharField(choices=[('Regular gas', 'Regular gas'), ('Premium gas', 'Premium gas'), ('Alcohol', 'Alcohol'), ('Diesel', 'Diesel')], default='Regular gas', max_length=20)), ('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='car.Car')), ], ), ]
"""Widget for creating classes from non-numeric attribute by substrings""" import re from itertools import count import numpy as np from AnyQt.QtWidgets import QGridLayout, QLabel, QLineEdit, QSizePolicy from AnyQt.QtCore import QSize, Qt from Orange.data import StringVariable, DiscreteVariable, Domain from Orange.data.table import Table from Orange.statistics.util import bincount from Orange.preprocess.transformation import Transformation, Lookup from Orange.widgets import gui, widget from Orange.widgets.settings import DomainContextHandler, ContextSetting from Orange.widgets.utils.itemmodels import DomainModel from Orange.widgets.widget import Msg def map_by_substring(a, patterns, case_sensitive, match_beginning): """ Map values in a using a list of patterns. The patterns are considered in order of appearance. Args: a (np.array): input array of `dtype` `str` patterns (list of str): list of stirngs case_sensitive (bool): case sensitive match match_beginning (bool): match only at the beginning of the string Returns: np.array of floats representing indices of matched patterns """ res = np.full(len(a), np.nan) if not case_sensitive: a = np.char.lower(a) patterns = (pattern.lower() for pattern in patterns) for val_idx, pattern in reversed(list(enumerate(patterns))): indices = np.char.find(a, pattern) matches = indices == 0 if match_beginning else indices != -1 res[matches] = val_idx return res class ValueFromStringSubstring(Transformation): """ Transformation that computes a discrete variable from a string variable by pattern matching. Given patterns `["abc", "a", "bc", ""]`, string data `["abcd", "aa", "bcd", "rabc", "x"]` is transformed to values of the new attribute with indices`[0, 1, 2, 0, 3]`. Args: variable (:obj:`~Orange.data.StringVariable`): the original variable patterns (list of str): list of string patterns case_sensitive (bool, optional): if set to `True`, the match is case sensitive match_beginning (bool, optional): if set to `True`, the pattern must appear at the beginning of the string """ def __init__(self, variable, patterns, case_sensitive=False, match_beginning=False): super().__init__(variable) self.patterns = patterns self.case_sensitive = case_sensitive self.match_beginning = match_beginning def transform(self, c): """ Transform the given data. Args: c (np.array): an array of type that can be cast to dtype `str` Returns: np.array of floats representing indices of matched patterns """ nans = np.equal(c, None) c = c.astype(str) c[nans] = "" res = map_by_substring( c, self.patterns, self.case_sensitive, self.match_beginning) res[nans] = np.nan return res class ValueFromDiscreteSubstring(Lookup): """ Transformation that computes a discrete variable from discrete variable by pattern matching. Say that the original attribute has values `["abcd", "aa", "bcd", "rabc", "x"]`. Given patterns `["abc", "a", "bc", ""]`, the values are mapped to the values of the new attribute with indices`[0, 1, 2, 0, 3]`. Args: variable (:obj:`~Orange.data.DiscreteVariable`): the original variable patterns (list of str): list of string patterns case_sensitive (bool, optional): if set to `True`, the match is case sensitive match_beginning (bool, optional): if set to `True`, the pattern must appear at the beginning of the string """ def __init__(self, variable, patterns, case_sensitive=False, match_beginning=False): super().__init__(variable, []) self.case_sensitive = case_sensitive self.match_beginning = match_beginning self.patterns = patterns # Finally triggers computation of the lookup def __setattr__(self, key, value): """__setattr__ is overloaded to recompute the lookup table when the patterns, the original attribute or the flags change.""" super().__setattr__(key, value) if hasattr(self, "patterns") and \ key in ("case_sensitive", "match_beginning", "patterns", "variable"): self.lookup_table = map_by_substring( self.variable.values, self.patterns, self.case_sensitive, self.match_beginning) class OWCreateClass(widget.OWWidget): name = "Create Class" description = "Create class attribute from a string attribute" icon = "icons/CreateClass.svg" category = "Data" keywords = ["data"] inputs = [("Data", Table, "set_data")] outputs = [("Data", Table)] want_main_area = False settingsHandler = DomainContextHandler() attribute = ContextSetting(None) class_name = ContextSetting("class") rules = ContextSetting({}) match_beginning = ContextSetting(False) case_sensitive = ContextSetting(False) TRANSFORMERS = {StringVariable: ValueFromStringSubstring, DiscreteVariable: ValueFromDiscreteSubstring} class Warning(widget.OWWidget.Warning): no_nonnumeric_vars = Msg("Data contains only numeric variables.") def __init__(self): super().__init__() self.data = None # The following lists are of the same length as self.active_rules #: list of pairs with counts of matches for each patter when the # patterns are applied in order and when applied on the entire set, # disregarding the preceding patterns self.match_counts = [] #: list of list of QLineEdit: line edit pairs for each pattern self.line_edits = [] #: list of QPushButton: list of remove buttons self.remove_buttons = [] #: list of list of QLabel: pairs of labels with counts self.counts = [] combo = gui.comboBox( self.controlArea, self, "attribute", label="From column: ", box=True, orientation=Qt.Horizontal, callback=self.update_rules, model=DomainModel(valid_types=(StringVariable, DiscreteVariable))) # Don't use setSizePolicy keyword argument here: it applies to box, # not the combo combo.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed) patternbox = gui.vBox(self.controlArea, box=True) #: QWidget: the box that contains the remove buttons, line edits and # count labels. The lines are added and removed dynamically. self.rules_box = rules_box = QGridLayout() patternbox.layout().addLayout(self.rules_box) box = gui.hBox(patternbox) gui.button( box, self, "+", callback=self.add_row, autoDefault=False, flat=True, minimumSize=(QSize(20, 20))) gui.rubber(box) self.rules_box.setColumnMinimumWidth(1, 70) self.rules_box.setColumnMinimumWidth(0, 10) self.rules_box.setColumnStretch(0, 1) self.rules_box.setColumnStretch(1, 1) self.rules_box.setColumnStretch(2, 100) rules_box.addWidget(QLabel("Name"), 0, 1) rules_box.addWidget(QLabel("Substring"), 0, 2) rules_box.addWidget(QLabel("#Instances"), 0, 3, 1, 2) self.update_rules() gui.lineEdit( self.controlArea, self, "class_name", label="Name for the new class:", box=True, orientation=Qt.Horizontal) optionsbox = gui.vBox(self.controlArea, box=True) gui.checkBox( optionsbox, self, "match_beginning", "Match only at the beginning", callback=self.options_changed) gui.checkBox( optionsbox, self, "case_sensitive", "Case sensitive", callback=self.options_changed) layout = QGridLayout() gui.widgetBox(self.controlArea, orientation=layout) for i in range(3): layout.setColumnStretch(i, 1) layout.addWidget(self.report_button, 0, 0) apply = gui.button(None, self, "Apply", autoDefault=False, callback=self.apply) layout.addWidget(apply, 0, 2) # TODO: Resizing upon changing the number of rules does not work self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum) @property def active_rules(self): """ Returns the class names and patterns corresponding to the currently selected attribute. If the attribute is not yet in the dictionary, set the default. """ return self.rules.setdefault(self.attribute and self.attribute.name, [["", ""], ["", ""]]) def rules_to_edits(self): """Fill the line edites with the rules from the current settings.""" for editr, textr in zip(self.line_edits, self.active_rules): for edit, text in zip(editr, textr): edit.setText(text) def set_data(self, data): """Input data signal handler.""" self.closeContext() self.rules = {} self.data = data model = self.controls.attribute.model() model.set_domain(data and data.domain) self.Warning.no_nonnumeric_vars(shown=data is not None and not model) if not model: self.attribute = None self.send("Data", None) return self.attribute = model[0] self.openContext(data) self.update_rules() self.apply() def update_rules(self): """Called when the rules are changed: adjust the number of lines in the form and fill them, update the counts. The widget does not have auto-apply.""" self.adjust_n_rule_rows() self.rules_to_edits() self.update_counts() # TODO: Indicator that changes need to be applied def options_changed(self): self.update_counts() def adjust_n_rule_rows(self): """Add or remove lines if needed and fix the tab order.""" def _add_line(): self.line_edits.append([]) n_lines = len(self.line_edits) for coli in range(1, 3): edit = QLineEdit() self.line_edits[-1].append(edit) self.rules_box.addWidget(edit, n_lines, coli) edit.textChanged.connect(self.sync_edit) button = gui.button( None, self, label='×', flat=True, height=20, styleSheet='* {font-size: 16pt; color: silver}' '*:hover {color: black}', autoDefault=False, callback=self.remove_row) button.setMinimumSize(QSize(12, 20)) self.remove_buttons.append(button) self.rules_box.addWidget(button, n_lines, 0) self.counts.append([]) for coli, kwargs in enumerate( (dict(alignment=Qt.AlignRight), dict(alignment=Qt.AlignLeft, styleSheet="color: gray"))): label = QLabel(**kwargs) self.counts[-1].append(label) self.rules_box.addWidget(label, n_lines, 3 + coli) def _remove_line(): for edit in self.line_edits.pop(): edit.deleteLater() self.remove_buttons.pop().deleteLater() for label in self.counts.pop(): label.deleteLater() def _fix_tab_order(): prev = None for row, rule in zip(self.line_edits, self.active_rules): for col_idx, edit in enumerate(row): edit.row, edit.col_idx = rule, col_idx if prev is not None: self.setTabOrder(prev, edit) prev = edit n = len(self.active_rules) while n > len(self.line_edits): _add_line() while len(self.line_edits) > n: _remove_line() _fix_tab_order() def add_row(self): """Append a new row at the end.""" self.active_rules.append(["", ""]) self.adjust_n_rule_rows() self.update_counts() def remove_row(self): """Remove a row.""" remove_idx = self.remove_buttons.index(self.sender()) del self.active_rules[remove_idx] self.update_rules() self.update_counts() def sync_edit(self, text): """Handle changes in line edits: update the active rules and counts""" edit = self.sender() edit.row[edit.col_idx] = text self.update_counts() def class_labels(self): """Construct a list of class labels. Empty labels are replaced with C1, C2, C3. If C<n> already appears in the list of values given by the user, the labels start at C<n+1> instead. """ largest_c = max((int(label[1:]) for label, _ in self.active_rules if re.match("^C\\d+", label)), default=0) class_count = count(largest_c + 1) return [label_edit.text() or "C{}".format(next(class_count)) for label_edit, _ in self.line_edits] def update_counts(self): """Recompute and update the counts of matches.""" def _matcher(strings, pattern): """Return indices of strings into patterns; consider case sensitivity and matching at the beginning. The given strings are assumed to be in lower case if match is case insensitive. Patterns are fixed on the fly.""" if not self.case_sensitive: pattern = pattern.lower() indices = np.char.find(strings, pattern.strip()) return indices == 0 if self.match_beginning else indices != -1 def _lower_if_needed(strings): return strings if self.case_sensitive else np.char.lower(strings) def _string_counts(): """ Generate pairs of arrays for each rule until running out of data instances. np.sum over the two arrays in each pair gives the number of matches of the remaining instances (considering the order of patterns) and of the original data. For _string_counts, the arrays contain bool masks referring to the original data """ nonlocal data data = data.astype(str) data = data[~np.char.equal(data, "")] data = _lower_if_needed(data) remaining = np.array(data) for _, pattern in self.active_rules: matching = _matcher(remaining, pattern) total_matching = _matcher(data, pattern) yield matching, total_matching remaining = remaining[~matching] if len(remaining) == 0: break def _discrete_counts(): """ Generate pairs similar to _string_counts, except that the arrays contain bin counts for the attribute's values matching the pattern. """ attr_vals = np.array(attr.values) attr_vals = _lower_if_needed(attr_vals) bins = bincount(data, max_val=len(attr.values) - 1)[0] remaining = np.array(bins) for _, pattern in self.active_rules: matching = _matcher(attr_vals, pattern) yield remaining[matching], bins[matching] remaining[matching] = 0 if not np.any(remaining): break def _clear_labels(): """Clear all labels""" for lab_matched, lab_total in self.counts: lab_matched.setText("") lab_total.setText("") def _set_labels(): """Set the labels to show the counts""" for (n_matched, n_total), (lab_matched, lab_total), (lab, patt) in \ zip(self.match_counts, self.counts, self.active_rules): n_before = n_total - n_matched lab_matched.setText("{}".format(n_matched)) if n_before and (lab or patt): lab_total.setText("+ {}".format(n_before)) if n_matched: tip = "{} of the {} matching instances are already " \ "covered above".format(n_before, n_total) else: tip = "All matching instances are already covered above" lab_total.setToolTip(tip) lab_matched.setToolTip(tip) def _set_placeholders(): """Set placeholders for empty edit lines""" matches = [n for n, _ in self.match_counts] + \ [0] * len(self.line_edits) for n_matched, (_, patt) in zip(matches, self.line_edits): if not patt.text(): patt.setPlaceholderText( "(remaining instances)" if n_matched else "(unused)") labels = self.class_labels() for label, (lab_edit, _) in zip(labels, self.line_edits): if not lab_edit.text(): lab_edit.setPlaceholderText(label) _clear_labels() attr = self.attribute if attr is None: return counters = {StringVariable: _string_counts, DiscreteVariable: _discrete_counts} data = self.data.get_column_view(attr)[0] self.match_counts = [[int(np.sum(x)) for x in matches] for matches in counters[type(attr)]()] _set_labels() _set_placeholders() def apply(self): """Output the transformed data.""" if not self.attribute: self.send("Data", None) return domain = self.data.domain rules = self.active_rules # Transposition + stripping valid_rules = [label or pattern or n_matches for (label, pattern), n_matches in zip(rules, self.match_counts)] patterns = [pattern for (_, pattern), valid in zip(rules, valid_rules) if valid] names = [name for name, valid in zip(self.class_labels(), valid_rules) if valid] transformer = self.TRANSFORMERS[type(self.attribute)] compute_value = transformer( self.attribute, patterns, self.case_sensitive, self.match_beginning) new_class = DiscreteVariable( self.class_name, names, compute_value=compute_value) new_domain = Domain( domain.attributes, new_class, domain.metas + domain.class_vars) new_data = Table(new_domain, self.data) self.send("Data", new_data) def send_report(self): def _cond_part(): rule = "<b>{}</b> ".format(class_name) if patt: rule += "if <b>{}</b> contains <b>{}</b>".format( self.attribute.name, patt) else: rule += "otherwise" return rule def _count_part(): if not n_matched: return "all {} matching instances are already covered " \ "above".format(n_total) elif n_matched < n_total and patt: return "{} matching instances (+ {} that are already " \ "covered above".format(n_matched, n_total - n_matched) else: return "{} matching instances".format(n_matched) if not self.attribute: return self.report_items("Input", [("Source attribute", self.attribute.name)]) output = "" names = self.class_labels() for (n_matched, n_total), class_name, (lab, patt) in \ zip(self.match_counts, names, self.active_rules): if lab or patt or n_total: output += "<li>{}; {}</li>".format(_cond_part(), _count_part()) if output: self.report_items("Output", [("Class name", self.class_name)]) self.report_raw("<ol>{}</ol>".format(output)) def main(): # pragma: no cover """Simple test for manual inspection of the widget""" import sys from AnyQt.QtWidgets import QApplication a = QApplication(sys.argv) table = Table("zoo") ow = OWCreateClass() ow.show() ow.set_data(table) a.exec() ow.saveSettings() if __name__ == "__main__": # pragma: no cover main()
""" This is a subfile for IsyClass.py These funtions are accessable via the Isy class opj """ __author__ = 'Peter Shipley <peter.shipley@gmail.com>' __copyright__ = "Copyright (C) 2013 Peter Shipley" __license__ = "BSD" import time def load_clim(self): """ Load climate data from ISY device args: none internal function call """ if self.debug & 0x01: print("load_clim") clim_tree = self._getXMLetree("/rest/climate") self.climateinfo = dict() if clim_tree is None: return # Isy._printXML(self.climateinfo) for cl in clim_tree.iter("climate"): for k, v in cl.items(): self.climateinfo[k] = v for ce in list(cl): self.climateinfo[ce.tag] = ce.text self.climateinfo["time"] = time.gmtime() def clim_get_val(self, prop): pass def clim_query(self): """ returns dictionary of climate info """ if not self.climateinfo: self.load_clim() # # ADD CODE to check self.cachetime # return self.climateinfo def clim_iter(self): """ Iterate though climate values args: None returns: Return an iterator over the climate values """ if not self.climateinfo: self.load_clim() k = self.climateinfo.keys() for p in k: yield self.climateinfo[p] if __name__ == "__main__": import __main__ print(__main__.__file__) print("syntax ok") exit(0)
from zested.main import main if __name__ == "__main__": main()
from __future__ import print_function import re from streamlink.plugin import Plugin from streamlink.plugin.api import http, useragents from streamlink.stream import HDSStream from streamlink.stream import HLSStream class TF1(Plugin): url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(\w+)/direct|(lci).fr/direct)/?") embed_url = "http://www.wat.tv/embedframe/live{0}" embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE) api_url = "http://www.wat.tv/get/{0}/591997" swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf" hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci"} hls_channel_remap = {"lci": "LCI", "tf1": "V4"} @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None def _get_hds_streams(self, channel): channel = self.hds_channel_remap.get(channel, "{0}live".format(channel)) manifest_url = http.get(self.api_url.format(channel), params={"getURL": 1}, headers={"User-Agent": useragents.FIREFOX}).text for s in HDSStream.parse_manifest(self.session, manifest_url, pvswf=self.swf_url, headers={"User-Agent": useragents.FIREFOX}).items(): yield s def _get_hls_streams(self, channel): channel = self.hls_channel_remap.get(channel, channel) embed_url = self.embed_url.format(channel) self.logger.debug("Found embed URL: {0}", embed_url) # page needs to have a mobile user agent embed_page = http.get(embed_url, headers={"User-Agent": useragents.ANDROID}) m = self.embed_re.search(embed_page.text) if m: hls_stream_url = m.group(1) try: for s in HLSStream.parse_variant_playlist(self.session, hls_stream_url).items(): yield s except Exception: self.logger.error("Failed to load the HLS playlist for {0}", channel) def _get_streams(self): m = self.url_re.match(self.url) if m: channel = m.group(1) or m.group(2) self.logger.debug("Found channel {0}", channel) for s in self._get_hds_streams(channel): yield s for s in self._get_hls_streams(channel): yield s __plugin__ = TF1
import bcrypt def hash_password(password): default_rounds = 14 bcrypt_salt = bcrypt.gensalt(default_rounds) hashed_password = bcrypt.hashpw(password, bcrypt_salt) return hashed_password def check_password(password, hashed): return bcrypt.checkpw(password, hashed)
import os import requests import time import math import datetime import random import envoy import jsonfield import logging import urllib from collections import defaultdict from magic_repr import make_repr from hashlib import md5, sha1 from django.db import models from django.db.models import Q from django.utils import timezone from django.conf import settings from django.contrib.auth.models import AbstractBaseUser, UserManager as BaseUserManager from django.core.cache import cache from twiggy_goodies.threading import log from allmychanges.validators import URLValidator from allmychanges.downloaders.utils import normalize_url from allmychanges.issues import calculate_issue_importance from allmychanges.utils import ( split_filenames, parse_search_list, get_one_or_none, ) from allmychanges import chat from allmychanges.downloaders import ( get_downloader) from allmychanges.utils import reverse from allmychanges.tasks import ( update_preview_task, update_changelog_task) from allmychanges.exceptions import SynonymError MARKUP_CHOICES = ( ('markdown', 'markdown'), ('rest', 'rest'), ) NAME_LENGTH = 80 NAMESPACE_LENGTH = 80 DESCRIPTION_LENGTH = 255 PROCESSING_STATUS_LENGTH = 40 from pytz import common_timezones TIMEZONE_CHOICES = [(tz, tz) for tz in common_timezones] class URLField(models.URLField): default_validators = [URLValidator()] class UserManager(BaseUserManager): def _create_user(self, username, email=None, password=None, **extra_fields): now = timezone.now() email = self.normalize_email(email) user = self.model(username=username, email=email, last_login=now, date_joined=now, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create(self, *args, **kwargs): email = kwargs.get('email') if email and self.filter(email=email).count() > 0: raise ValueError('User with email "{0}" already exists'.format(email)) username = kwargs.get('username') url = settings.BASE_URL + reverse('admin-user-profile', username=username) chat.send(('New user <{url}|{username}> ' 'with email "{email}" (from create)').format( url=url, username=username, email=email)) return super(UserManager, self).create(*args, **kwargs) def create_user(self, username, email=None, password=None, **extra_fields): if email and self.filter(email=email).count() > 0: raise ValueError('User with email "{0}" already exists'.format(email)) url = settings.BASE_URL + reverse('admin-user-profile', username=username) chat.send(('New user <{url}|{username}> ' 'with email "{email}" (from create_user)').format( url=url, username=username, email=email)) return self._create_user(username, email, password, **extra_fields) def active_users(self, interval): """Outputs only users who was active in last `interval` days. """ after = timezone.now() - datetime.timedelta(interval) queryset = self.all() queryset = queryset.filter(history_log__action__in=ACTIVE_USER_ACTIONS, history_log__created_at__gte=after).distinct() return queryset SEND_DIGEST_CHOICES = ( ('daily', 'Every day'), ('weekly', 'Every week (on Monday)'), ('never', 'Never')) RSS_HASH_LENGH = 32 class User(AbstractBaseUser): """ A fully featured User model with admin-compliant permissions that uses a full-length email field as the username. Email and password are required. Other fields are optional. """ username = models.CharField('user name', max_length=254, unique=True) email = models.EmailField('email address', max_length=254) email_is_valid = models.BooleanField(default=False) date_joined = models.DateTimeField('date joined', default=timezone.now) timezone = models.CharField(max_length=100, choices=TIMEZONE_CHOICES, default='UTC') changelogs = models.ManyToManyField('Changelog', through='ChangelogTrack', related_name='trackers') feed_versions = models.ManyToManyField('Version', through='FeedItem', related_name='users') feed_sent_id = models.IntegerField( default=0, help_text='Keeps position in feed items already sent in digest emails') last_digest_sent_at = models.DateTimeField( blank=True, null=True, help_text='Date when last email digest was sent') skips_changelogs = models.ManyToManyField('Changelog', through='ChangelogSkip', related_name='skipped_by') moderated_changelogs = models.ManyToManyField('Changelog', through='Moderator', related_name='moderators') # notification settings send_digest = models.CharField(max_length=100, choices=SEND_DIGEST_CHOICES, default='daily') slack_url = models.URLField(max_length=2000, default='', blank=True) webhook_url = models.URLField(max_length=2000, default='', blank=True) rss_hash = models.CharField(max_length=RSS_HASH_LENGH, unique=True, blank=True, null=True) custom_fields = jsonfield.JSONField( default={}, help_text='Custom fields such like "Location" or "SecondEmail".', blank=True) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] class Meta: verbose_name = 'user' verbose_name_plural = 'users' __repr__ = make_repr('username', 'email') def get_avatar(self, size): robohash_template = 'https://robohash.org/{hash}.png?size={size}x{size}' if self.email: hash = md5(self.email.lower()).hexdigest() default = robohash_template.format(size=size, hash=hash) avatar_url = 'https://www.gravatar.com/avatar/{hash}?{opts}'.format( hash=hash, opts=urllib.urlencode( dict( s=str(size), d=default ) ) ) else: hash = md5(self.username).hexdigest() avatar_url = robohash_template.format(size=size, hash=hash) return avatar_url @property def is_superuser(self): return self.username in settings.SUPERUSERS def does_track(self, changelog): """Check if this user tracks given changelog.""" return self.changelogs.filter(pk=changelog.id).exists() def track(self, changelog): if not self.does_track(changelog): if changelog.namespace == 'web' and changelog.name == 'allmychanges': action = 'track-allmychanges' action_description = 'User tracked our project\'s changelog.' else: action = 'track' action_description = 'User tracked changelog:{0}'.format(changelog.id) UserHistoryLog.write(self, '', action, action_description) ChangelogTrack.objects.create( user=self, changelog=changelog) def untrack(self, changelog): if self.does_track(changelog): if changelog.namespace == 'web' and changelog.name == 'allmychanges': action = 'untrack-allmychanges' action_description = 'User untracked our project\'s changelog.' else: action = 'untrack' action_description = 'User untracked changelog:{0}'.format(changelog.id) UserHistoryLog.write(self, '', action, action_description) ChangelogTrack.objects.filter( user=self, changelog=changelog).delete() def does_skip(self, changelog): """Check if this user skipped this changelog in package selector.""" return self.skips_changelogs.filter(pk=changelog.id).exists() def skip(self, changelog): if not self.does_skip(changelog): action = 'skip' action_description = 'User skipped changelog:{0}'.format(changelog.id) UserHistoryLog.write(self, '', action, action_description) ChangelogSkip.objects.create( user=self, changelog=changelog) def add_feed_item(self, version): if self.send_digest == 'never': return None return FeedItem.objects.create(user=self, version=version) def save(self, *args, **kwargs): if self.rss_hash is None: self.rss_hash = sha1(self.username + settings.SECRET_KEY).hexdigest()[:RSS_HASH_LENGH] return super(User, self).save(*args, **kwargs) class Subscription(models.Model): email = models.EmailField() come_from = models.CharField(max_length=100) date_created = models.DateTimeField() def __unicode__(self): return self.email class Downloadable(object): """Adds method download, which uses attribute `source` to update attribute `downloader` if needed and then to download repository into a temporary directory. """ def download(self, downloader, report_back=lambda message, level=logging.INFO: None): """This method fetches repository into a temporary directory and returns path to this directory. It can report about downloading status using callback `report_back`. Everything what will passed to `report_back`, will be displayed to the end user in a processing log on a "Tune" page. """ if isinstance(downloader, dict): params = downloader.get('params', {}) downloader = downloader['name'] else: params = {} params.update(self.downloader_settings or {}) download = get_downloader(downloader) return download(self.source, report_back=report_back, **params) # A mixin to get/set ignore and check lists on a model. def get_ignore_list(self): """Returns a list with all filenames and directories to ignore when searching a changelog.""" return split_filenames(self.ignore_list) def set_ignore_list(self, items): self.ignore_list = u'\n'.join(items) def get_search_list(self): """Returns a list with all filenames and directories to check when searching a changelog.""" return parse_search_list(self.search_list) def set_search_list(self, items): def process(item): if isinstance(item, tuple) and item[1]: return u':'.join(item) else: return item self.search_list = u'\n'.join(map(process, items)) class ChangelogManager(models.Manager): def only_active(self): # active changelog is good and not paused queryset = self.good() return queryset.filter(paused_at=None) def good(self): # good changelog should have namespace, name, source and downloader return self.all().exclude( Q(name=None) | Q(namespace=None) | Q(downloader=None) | Q(source='')) def unsuccessful(self): return self.all().filter( Q(name=None) | Q(namespace=None) | Q(downloader=None) | Q(source='')) class Changelog(Downloadable, models.Model): objects = ChangelogManager() source = URLField(db_index=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) # TODO: remove processing_started_at = models.DateTimeField(blank=True, null=True) problem = models.CharField(max_length=1000, help_text='Latest error message', blank=True, null=True) # TODO: remove filename = models.CharField(max_length=1000, help_text=('If changelog was discovered, then ' 'field will store it\'s filename'), blank=True, null=True) updated_at = models.DateTimeField(blank=True, null=True) next_update_at = models.DateTimeField(default=timezone.now) paused_at = models.DateTimeField(blank=True, null=True) last_update_took = models.IntegerField( help_text=('Number of seconds required to ' 'update this changelog last time'), default=0) ignore_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to ignore searching' ' changelog.'), blank=True) # TODO: выяснить зачем тут два поля check_list и search_list check_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to search' ' changelog.'), blank=True) search_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to search' ' changelog.'), blank=True) xslt = models.TextField(default='', help_text=('XSLT transform to be applied to all html files.'), blank=True) namespace = models.CharField(max_length=NAMESPACE_LENGTH, blank=True, null=True) name = models.CharField(max_length=NAME_LENGTH, blank=True, null=True) description = models.CharField(max_length=DESCRIPTION_LENGTH, blank=True, default='') downloader = models.CharField(max_length=20, blank=True, null=True) downloader_settings = jsonfield.JSONField( default={}, help_text=('JSON with settings for selected downloader.'), blank=True) downloaders = jsonfield.JSONField( default=[], help_text=('JSON with guessed downloaders and their additional meta information.'), blank=True) status = models.CharField(max_length=40, default='created') processing_status = models.CharField(max_length=PROCESSING_STATUS_LENGTH) icon = models.CharField(max_length=1000, blank=True, null=True) class Meta: unique_together = ('namespace', 'name') def __unicode__(self): return u'Changelog from {0}'.format(self.source) __repr__ = make_repr('namespace', 'name', 'source') def latest_versions(self, limit): return self.versions.exclude(unreleased=True) \ .order_by('-order_idx')[:limit] def latest_version(self): versions = list(self.latest_versions(1)) if versions: return versions[0] def get_display_name(self): return u'{0}/{1}'.format( self.namespace, self.name) @staticmethod def create_uniq_name(namespace, name): """Returns a name which is unique in given namespace. Name is created by incrementing a value.""" if namespace and name: base_name = name counter = 0 while Changelog.objects.filter( namespace=namespace, name=name).exists(): counter += 1 name = '{0}{1}'.format(base_name, counter) return name @staticmethod def get_all_namespaces(like=None): queryset = Changelog.objects.all() if like is not None: queryset = queryset.filter( namespace__iexact=like ) return list(queryset.values_list('namespace', flat=True).distinct()) @staticmethod def normalize_namespaces(): namespaces_usage = defaultdict(int) changelogs_with_namespaces = Changelog.objects.exclude(namespace=None) for namespace in changelogs_with_namespaces.values_list('namespace', flat=True): namespaces_usage[namespace] += 1 def normalize(namespace): lowercased = namespace.lower() # here we process only capitalized namespaces if namespace == lowercased: return # if there lowercased is not used at all if lowercased not in namespaces_usage: return lowercased_count = namespaces_usage[lowercased] this_count = namespaces_usage[namespace] if lowercased_count >= this_count: # if num of occurences is equal, # prefer lowercased name Changelog.objects.filter( namespace=namespace).update( namespace=lowercased) else: Changelog.objects.filter( namespace=lowercased).update( namespace=namespace) del namespaces_usage[namespace] del namespaces_usage[lowercased] all_namespaces = namespaces_usage.keys() all_namespaces.sort() for namespace in all_namespaces: normalize(namespace) def save(self, *args, **kwargs): if self.id is None: # than objects just created and this is good # time to fix it's namespace existing_namespaces = Changelog.get_all_namespaces(like=self.namespace) if existing_namespaces: self.namespace = existing_namespaces[0] return super(Changelog, self).save(*args, **kwargs) def get_absolute_url(self): return reverse('project', namespace=self.namespace, name=self.name) def editable_by(self, user, light_user=None): light_moderators = set(self.light_moderators.values_list('light_user', flat=True)) moderators = set(self.moderators.values_list('id', flat=True)) if user.is_authenticated(): # Any changelog could be edited by me if user.is_superuser: return True if moderators or light_moderators: return user.id in moderators else: if moderators or light_moderators: return light_user in light_moderators return True def is_unsuccessful(self): return self.name is None or \ self.namespace is None or \ self.downloader is None or \ not self.source def is_moderator(self, user, light_user=None): light_moderators = set(self.light_moderators.values_list('light_user', flat=True)) moderators = set(self.moderators.values_list('id', flat=True)) if user.is_authenticated(): return user.id in moderators else: return light_user in light_moderators def add_to_moderators(self, user, light_user=None): """Adds user to moderators and returns 'normal' or 'light' if it really added him. In case if user already was a moderator, returns None.""" if not self.is_moderator(user, light_user): if user.is_authenticated(): Moderator.objects.create(changelog=self, user=user) return 'normal' else: if light_user is not None: self.light_moderators.create(light_user=light_user) return 'light' def create_issue(self, type, comment='', related_versions=[]): joined_versions = u', '.join(related_versions) # for some types, only one issue at a time is allowed if type == 'lesser-version-count': if self.issues.filter(type=type, resolved_at=None, related_versions=joined_versions).count() > 0: return issue = self.issues.create(type=type, comment=comment.format(related_versions=joined_versions), related_versions=joined_versions) chat.send(u'New issue of type "{issue.type}" with comment: "{issue.comment}" was created for <https://allmychanges.com/issues/?namespace={issue.changelog.namespace}&name={issue.changelog.name}|{issue.changelog.namespace}/{issue.changelog.name}>'.format( issue=issue)) def resolve_issues(self, type): self.issues.filter(type=type, resolved_at=None).update(resolved_at=timezone.now()) def create_preview(self, user, light_user, **params): params.setdefault('downloader', self.downloader) params.setdefault('downloader_settings', self.downloader_settings) params.setdefault('downloaders', self.downloaders) params.setdefault('source', self.source) params.setdefault('search_list', self.search_list) params.setdefault('ignore_list', self.ignore_list) params.setdefault('xslt', self.xslt) preview = self.previews.create(user=user, light_user=light_user, **params) # preview_test_task.delay( # preview.id, # ['Guessing downloders', # 'Downloading using git', # 'Searching versions', # 'Nothing found', # 'Downloading from GitHub Review', # 'Searching versions', # 'Some results were found']) return preview def set_status(self, status, **kwargs): changed_fields = ['status', 'updated_at'] if status == 'error': self.problem = kwargs.get('problem') changed_fields.append('problem') self.status = status self.updated_at = timezone.now() self.save(update_fields=changed_fields) def set_processing_status(self, status, level=logging.INFO): self.processing_status = status[:PROCESSING_STATUS_LENGTH] self.updated_at = timezone.now() self.save(update_fields=('processing_status', 'updated_at')) key = 'preview-processing-status:{0}'.format(self.id) cache.set(key, status, 10 * 60) def get_processing_status(self): key = 'preview-processing-status:{0}'.format(self.id) result = cache.get(key, self.processing_status) return result def calc_next_update(self): """Returns date and time when next update should be scheduled. """ hour = 60 * 60 min_update_interval = hour max_update_interval = 48 * hour num_trackers = self.trackers.count() # here we divide max interval on 2 because # on the last stage will add some randomness to # the resulting value time_to_next_update = (max_update_interval / 2) / math.log(max(math.e, num_trackers)) time_to_next_update = max(min_update_interval, time_to_next_update, 2 * self.last_update_took) # add some randomness time_to_next_update = random.randint( int(time_to_next_update * 0.8), int(time_to_next_update * 2.0)) # limit upper bound return timezone.now() + datetime.timedelta(0, time_to_next_update) def calc_next_update_if_error(self): # TODO: check and remove return timezone.now() + datetime.timedelta(0, 1 * 60 * 60) def schedule_update(self, async=True, full=False): with log.fields(changelog_name=self.name, changelog_namespace=self.namespace, async=async, full=full): log.info('Scheduling changelog update') self.set_status('processing') self.set_processing_status('Waiting in the queue') self.problem = None self.save() if full: self.versions.all().delete() if async: update_changelog_task.delay(self.id) else: update_changelog_task(self.id) def resume(self): self.paused_at = None self.next_update_at = timezone.now() # we don't need to save here, because this will be done in schedule_update self.schedule_update() def clean(self): super(Changelog, self).clean() self.source, _, _ = normalize_url(self.source, for_checkout=False) def update_description_from_source(self, fall_asleep_on_rate_limit=False): # right now this works only for github urls if 'github.com' not in self.source: return url, username, repo = normalize_url(self.source) url = 'https://api.github.com/repos/{0}/{1}'.format(username, repo) headers={'Authorization': 'token ' + settings.GITHUB_TOKEN} response = requests.get(url, headers=headers) if response.status_code == 200: data = response.json() self.description = data.get('description', '') self.save(update_fields=('description', )) if fall_asleep_on_rate_limit: remaining = int(response.headers['x-ratelimit-remaining']) if remaining == 1: to_sleep = int(response.headers['x-ratelimit-reset']) - time.time() + 10 print 'OK, now I need to sleep {0} seconds because of GitHub\'s rate limit.'.format(to_sleep) time.sleep(to_sleep) def add_synonym(self, synonym): """Just a shortcut.""" if self.synonyms.filter(source=synonym).count() == 0: # if this synonym already bound to some another project # then raise exception found = list(SourceSynonym.objects.filter(source=synonym)) if found: with log.fields(changelog_id=self.pk, another_changelog_id=found[0].changelog_id): raise SynonymError('Synonym already bound to a changelog') found = list(Changelog.objects.filter(source=synonym)) if found: with log.fields(changelog_id=self.pk, another_changelog_id=found[0].pk): raise SynonymError('Synonym matches a changelog\'s source') self.synonyms.create(source=synonym) def merge_into(self, to_ch): # move trackers to_ch_trackers = set(to_ch.trackers.values_list('id', flat=True)) for user in self.trackers.all(): if user.id not in to_ch_trackers: ChangelogTrack.objects.create(user=user, changelog=to_ch) action = 'moved-during-merge' action_description = 'User was moved from {0}/{1} to changelog:{2}'.format( self.namespace, self.name, to_ch.id) UserHistoryLog.write(user, '', action, action_description) # move issues for issue in self.issues.all(): issue.changelog = to_ch issue.save(update_fields=('changelog',)) # remove itself Changelog.objects.filter(pk=self.pk).delete() # add synonym to_ch.add_synonym(self.source) def set_tag(self, user, name, version_number): """Sets or updates tag with `name` on the version. If tag was updated, returns 'updated' otherwise, returns 'created' """ assert isinstance(version_number, basestring), \ 'Parameter "version_number" should be a string, not "{0}"'.format( type(version_number)) params = dict(user=user, name=name) existing_tag = self.tags.filter( **params) update = existing_tag.count() > 0 if update: existing_tag.delete() version = get_one_or_none(self.versions, number=version_number) self.tags.create(version=version, version_number=version_number, **params) return 'updated' if update else 'created' def remove_tag(self, user, name): """Removes tag with `name` on the version. """ self.tags.filter(user=user, name=name).delete() class SourceSynonym(models.Model): changelog = models.ForeignKey(Changelog, related_name='synonyms') created_at = models.DateTimeField(default=timezone.now) source = URLField(unique=True) class ChangelogTrack(models.Model): user = models.ForeignKey(User) changelog = models.ForeignKey(Changelog) created_at = models.DateTimeField(default=timezone.now) class ChangelogSkip(models.Model): user = models.ForeignKey(User) changelog = models.ForeignKey(Changelog) created_at = models.DateTimeField(default=timezone.now) class Issue(models.Model): """Keeps track any issues, related to a changelog. """ changelog = models.ForeignKey(Changelog, related_name='issues', blank=True, null=True) user = models.ForeignKey(User, related_name='issues', blank=True, null=True) light_user = models.CharField(max_length=40, blank=True, null=True) type = models.CharField(max_length=40) comment = models.TextField() created_at = models.DateTimeField(auto_now_add=True) resolved_at = models.DateTimeField(blank=True, null=True) resolved_by = models.ForeignKey(User, related_name='resolved_issues', blank=True, null=True) related_versions = models.TextField(default='', blank=True, help_text='Comma-separated list of versions, related to this issue') email = models.CharField(max_length=100, blank=True, null=True) page = models.CharField(max_length=100, blank=True, null=True) importance = models.IntegerField(db_index=True, blank=True, default=0) __repr__ = make_repr('changelog', 'type', 'comment', 'created_at', 'resolved_at') def save(self, *args, **kwargs): if not self.importance: self.importance = calculate_issue_importance( num_trackers=self.changelog.trackers.count() if self.changelog else 0, user=self.user, light_user=self.light_user) return super(Issue, self).save(*args, **kwargs) @staticmethod def merge(user, light_user): entries = Issue.objects.filter(user=None, light_user=light_user) if entries.count() > 0: with log.fields(username=user.username, num_entries=entries.count(), light_user=light_user): log.info('Merging issues') entries.update(user=user) def editable_by(self, user, light_user=None): return self.changelog.editable_by(user, light_user) def get_related_versions(self): response = [version.strip() for version in self.related_versions.split(',')] return filter(None, response) def get_related_deployments(self): return DeploymentHistory.objects \ .filter(deployed_at__lte=self.created_at) \ .order_by('-id')[:3] def resolve(self, user, notify=True): self.resolved_at = timezone.now() self.resolved_by = user self.save(update_fields=('resolved_at', 'resolved_by')) if notify: chat.send((u'Issue <https://allmychanges.com{url}|#{issue_id}> ' u'for {namespace}/{name} was resolved by {username}.').format( url=reverse('issue-detail', pk=self.id), issue_id=self.id, namespace=self.changelog.namespace, name=self.changelog.name, username=user.username)) if self.type == 'auto-paused': changelog = self.changelog with log.fields(changelog_id=changelog.id): log.info('Resuming changelog updates') changelog.resume() if notify: chat.send(u'Autopaused package {namespace}/{name} was resumed {username}.'.format( namespace=changelog.namespace, name=changelog.name, username=user.username)) class IssueComment(models.Model): issue = models.ForeignKey(Issue, related_name='comments') user = models.ForeignKey(User, blank=True, null=True, related_name='issue_comments') created_at = models.DateTimeField(default=timezone.now) message = models.TextField() class DiscoveryHistory(models.Model): """Keeps track any issues, related to a changelog. """ changelog = models.ForeignKey(Changelog, related_name='discovery_history') discovered_versions = models.TextField() new_versions = models.TextField() num_discovered_versions = models.IntegerField() num_new_versions = models.IntegerField() created_at = models.DateTimeField(auto_now_add=True) __repr__ = make_repr('discovered_versions') class LightModerator(models.Model): """These entries are created when anonymouse user adds another package into the system. When user signs up, these entries should be transformed into the Moderator entries. """ changelog = models.ForeignKey(Changelog, related_name='light_moderators') light_user = models.CharField(max_length=40) created_at = models.DateTimeField(auto_now_add=True) @staticmethod def merge(user, light_user): entries = LightModerator.objects.filter(light_user=light_user) for entry in entries: with log.fields(username=user.username, light_user=light_user): log.info('Transforming light moderator into the permanent') Moderator.objects.create( changelog=entry.changelog, user=user, from_light_user=light_user) entries.delete() @staticmethod def remove_stale_moderators(): LightModerator.objects.filter( created_at__lte=timezone.now() - datetime.timedelta(1)).delete() class Moderator(models.Model): changelog = models.ForeignKey(Changelog, related_name='+') user = models.ForeignKey(User, related_name='+') created_at = models.DateTimeField(auto_now_add=True) from_light_user = models.CharField(max_length=40, blank=True, null=True) class Preview(Downloadable, models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='previews', blank=True, null=True) changelog = models.ForeignKey(Changelog, related_name='previews') light_user = models.CharField(max_length=40) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(blank=True, null=True) source = models.URLField() ignore_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to ignore searching' ' changelog.'), blank=True) # TODO: remove this field after migration on production check_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to search' ' changelog.'), blank=True) search_list = models.CharField(max_length=1000, default='', help_text=('Comma-separated list of directories' ' and filenames to search' ' changelog.'), blank=True) xslt = models.TextField(default='', help_text=('XSLT transform to be applied to all html files.'), blank=True) problem = models.CharField(max_length=1000, help_text='Latest error message', blank=True, null=True) downloader = models.CharField(max_length=255, blank=True, null=True) downloader_settings = jsonfield.JSONField( default={}, help_text=('JSON with settings for selected downloader.'), blank=True) downloaders = jsonfield.JSONField( default=[], help_text=('JSON with guessed downloaders and their additional meta information.'), blank=True) done = models.BooleanField(default=False) status = models.CharField(max_length=40, default='created') processing_status = models.CharField(max_length=40) log = jsonfield.JSONField(default=[], help_text=('JSON with log of all operation applied during preview processing.'), blank=True) @property def namespace(self): return self.changelog.namespace @property def name(self): return self.changelog.name @property def description(self): return self.changelog.description def set_status(self, status, **kwargs): changed_fields = ['status', 'updated_at'] if status == 'processing': self.versions.all().delete() self.updated_at = timezone.now() changed_fields.append('updated_at') elif status == 'error': self.problem = kwargs.get('problem') changed_fields.append('problem') self.status = status self.updated_at = timezone.now() self.save(update_fields=changed_fields) def set_processing_status(self, status, level=logging.INFO): self.log.append(status) self.processing_status = status[:PROCESSING_STATUS_LENGTH] self.updated_at = timezone.now() self.save(update_fields=('processing_status', 'updated_at', 'log')) key = 'preview-processing-status:{0}'.format(self.id) cache.set(key, status, 10 * 60) def get_processing_status(self): key = 'preview-processing-status:{0}'.format(self.id) result = cache.get(key, self.processing_status) return result def schedule_update(self): self.set_status('processing') self.set_processing_status('Waiting in the queue') self.versions.all().delete() update_preview_task.delay(self.pk) class VersionManager(models.Manager): use_for_related_fields = True def create(self, *args, **kwargs): version = super(VersionManager, self).create(*args, **kwargs) changelog = kwargs.get('changelog') if changelog: version.associate_with_free_tags() return version def released(self): return self.exclude(unreleased=True) def unreleased(self): return self.filter(unreleased=True) class Version(models.Model): changelog = models.ForeignKey(Changelog, related_name='versions', blank=True, null=True, on_delete=models.SET_NULL) preview = models.ForeignKey(Preview, related_name='versions', blank=True, null=True, on_delete=models.SET_NULL) date = models.DateField(blank=True, null=True) number = models.CharField(max_length=255) unreleased = models.BooleanField(default=False) discovered_at = models.DateTimeField(blank=True, null=True) last_seen_at = models.DateTimeField(blank=True, null=True) filename = models.CharField(max_length=1000, help_text=('Source file where this version was found'), blank=True, null=True) raw_text = models.TextField(blank=True, null=True) processed_text = models.TextField(blank=True, null=True) order_idx = models.IntegerField(blank=True, null=True, help_text=('This field is used to reorder versions ' 'according their version numbers and to ' 'fetch them from database efficiently.')) tweet_id = models.CharField(max_length=1000, help_text=('Tweet id or None if we did not tweeted about this version yet.'), blank=True, null=True) objects = VersionManager() class Meta: get_latest_by = 'order_idx' ordering = ['-order_idx'] def __unicode__(self): return self.number def get_absolute_url(self): return self.changelog.get_absolute_url() + '#' + self.number def post_tweet(self): if not settings.TWITTER_CREDS: return if self.unreleased: raise RuntimeError('Unable to tweet about unreleased version') if self.tweet_id: return # because we already posted a tweet ch = self.changelog image_url = settings.BASE_URL + ch.get_absolute_url() \ + '?snap=1&version=' + self.number filename = sha1(image_url).hexdigest() + '.png' full_path = os.path.join(settings.SNAPSHOTS_ROOT, filename) result = envoy.run( '{root}/makescreenshot --width 590 --height 600 {url} {path}'.format( root=settings.PROJECT_ROOT, url=image_url, path=full_path)) if result.status_code != 0: with log.fields( status_code=result.status_code, std_out=result.std_out, std_err=result.std_err): log.error('Unable to make a screenshot') raise RuntimeError('Unable to make a screenshot') with open(full_path, 'rb') as f: from requests_oauthlib import OAuth1 auth = OAuth1(*settings.TWITTER_CREDS) response = requests.post( 'https://upload.twitter.com/1.1/media/upload.json', auth=auth, files={'media': ('screenshot.png', f.read(), 'image/png')}) media_id = response.json()['media_id_string'] url = settings.BASE_URL + self.get_absolute_url() text = '{number} of {namespace}/{name} was released: {url} #{namespace} #{name} #release'.format( number=self.number, namespace=ch.namespace, name=ch.name, url=url) response = requests.post( 'https://api.twitter.com/1.1/statuses/update.json', auth=auth, data={'status': text, 'media_ids': media_id}) if response.status_code == 200: self.tweet_id = response.json()['id_str'] self.save(update_fields=('tweet_id',)) return full_path def set_tag(self, user, name): """Convenience method to set tag on just this version. """ self.changelog.set_tag(user, name, self.number) def associate_with_free_tags(self): # associate free tags with this version for tag in self.changelog.tags.filter(version_number=self.number): tag.version = self tag.save(update_fields=('version',)) class Tag(models.Model): # this field shouldn't be blank or null # but I have to make it so, because otherwise # DB migrations wasn't possible changelog = models.ForeignKey(Changelog, blank=True, null=True, related_name='tags') # tag may be tied to a version in the database, # but in some cases, we may don't have parsed version # with given number version = models.ForeignKey(Version, blank=True, null=True, related_name='tags') user = models.ForeignKey(User, related_name='tags') # regex=ur'[a-z][a-z0-9-]*[a-z0-9]' name = models.CharField(max_length=40) # we have not any restrictions on the format of this field # this could be any string even something like 'latest' version_number = models.CharField(max_length=40) created_at = models.DateTimeField(auto_now_add=True) class Meta: unique_together = ('changelog', 'user', 'name') def get_absolute_url(self): # the name shouldn't contain any unicode or nonascii letters nor spaces # otherwise, we need to encode tu utf-8 and quote_plus it. return self.changelog.get_absolute_url() + '#' + self.name __repr__ = make_repr('name', 'version_number') class FeedItem(models.Model): user = models.ForeignKey(User) version = models.ForeignKey(Version, related_name='feed_items') created_at = models.DateTimeField(auto_now_add=True) ACTIVE_USER_ACTIONS = ( u'landing-digest-view', u'landing-track', u'landing-ignore', u'login', u'profile-update', u'digest-view', u'package-view', u'package-create', u'package-edit', u'edit-digest-view', u'index-view', u'track', u'untrack', u'untrack-allmychanges', u'create-issue', u'email-digest-open', u'email-digest-click') class UserHistoryLog(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='history_log', blank=True, null=True) light_user = models.CharField(max_length=40) action = models.CharField(max_length=40) description = models.CharField(max_length=1000) created_at = models.DateTimeField(auto_now_add=True) @staticmethod def merge(user, light_user): entries = UserHistoryLog.objects.filter(user=None, light_user=light_user) if entries.count() > 0: with log.fields(username=user.username, num_entries=entries.count(), light_user=light_user): log.info('Merging user history logs') entries.update(user=user) @staticmethod def write(user, light_user, action, description): user = user if user is not None and user.is_authenticated() else None return UserHistoryLog.objects.create(user=user, light_user=light_user, action=action, description=description) class UserStateHistory(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='state_history') date = models.DateField() state = models.CharField(max_length=40) class DeploymentHistory(models.Model): hash = models.CharField(max_length=32, default='') description = models.TextField() deployed_at = models.DateTimeField(auto_now_add=True) __repr__ = make_repr('deployed_at', 'hash') class EmailVerificationCode(models.Model): user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='email_verification_code') hash = models.CharField(max_length=32, default='') deployed_at = models.DateTimeField(auto_now_add=True) @staticmethod def new_code_for(user): hash = md5(str(time.time()) + settings.SECRET_KEY).hexdigest() try: code = user.email_verification_code code.hash = hash code.save() except EmailVerificationCode.DoesNotExist: code = EmailVerificationCode.objects.create( user=user, hash=hash) return code AUTOCOMPLETE_TYPES = ( ('source', 'Source URL'), ('namespace', 'Namespace'), ('package', 'Package')) AUTOCOMPLETE_ORIGINS = ( ('app-store', 'App Store'), ('pypi', 'PyPi')) COMMON_WORDS = set('a,able,about,across,after,all,almost,also,am,among,an,and,any,are,as,at,be,because,been,but,by,can,cannot,could,dear,did,do,does,either,else,ever,every,for,from,get,got,had,has,have,he,her,hers,him,his,how,however,i,if,in,into,is,it,its,just,least,let,like,likely,may,me,might,most,must,my,neither,no,nor,not,of,off,often,on,only,or,other,our,own,rather,said,say,says,she,should,since,so,some,than,that,the,their,them,then,there,these,they,this,tis,to,too,twas,us,wants,was,we,were,what,when,where,which,while,who,whom,why,will,with,would,yet,you,your'.split(',')) class AutocompleteData(models.Model): origin = models.CharField(max_length=100, choices=AUTOCOMPLETE_ORIGINS) title = models.CharField(max_length=255) description = models.CharField(max_length=DESCRIPTION_LENGTH, default='') type = models.CharField(max_length=10, choices=AUTOCOMPLETE_TYPES) source = models.CharField(max_length=255, # we need this because MySQL will output warning and break our migrations for greater length blank=True, null=True, db_index=True) icon = models.CharField(max_length=255, blank=True, null=True) changelog = models.ForeignKey(Changelog, blank=True, null=True, related_name='autocomplete') score = models.IntegerField(default=0, help_text=('A value from 0 to infinity. ' 'Items with bigger values ' 'should appear at the top ' 'of the suggest.')) __repr__ = make_repr('title') def save(self, *args, **kwargs): super(AutocompleteData, self).save(*args, **kwargs) if self.words.count() == 0: self.add_words() def add_words(self, db_name='default'): if db_name == 'default': data = self else: data = AutocompleteData.objects.using(db_name).get(pk=self.pk) words = data.title.split() words = (word.strip() for word in words) words = set(word.lower() for word in words if len(word) > 3) words -= COMMON_WORDS words.add(data.title.lower()) words = [AutocompleteWord2.objects.using(db_name).get_or_create(word=word)[0] for word in words] data.words2.add(*words) class AutocompleteWord(models.Model): word = models.CharField(max_length=100, db_index=True) data = models.ForeignKey(AutocompleteData, related_name='words') __repr__ = make_repr('word') class AutocompleteWord2(models.Model): word = models.CharField(max_length=100, unique=True) data_objects = models.ManyToManyField( AutocompleteData, related_name='words2') __repr__ = make_repr('word') class AppStoreBatch(models.Model): """To identify separate processing batches. """ created = models.DateTimeField(auto_now_add=True) __repr__ = make_repr() class AppStoreUrl(models.Model): """This model is used when we are fetching data from app store for our autocomplete. Use management command update_appstore_urls to populate this collection. """ # we need this because MySQL will output warning and break our migrations for greater length source = models.CharField(max_length=255, blank=True, null=True, unique=True) autocomplete_data = models.OneToOneField(AutocompleteData, blank=True, null=True, related_name='appstore_url', on_delete=models.SET_NULL) batch = models.ForeignKey(AppStoreBatch, blank=True, null=True, related_name='urls', on_delete=models.SET_NULL) rating = models.FloatField(blank=True, null=True) rating_count = models.IntegerField(blank=True, null=True) __repr__ = make_repr('source') class MandrillMessage(models.Model): mid = models.CharField(max_length=32, help_text='Mandrills ID', db_index=True) timestamp = models.IntegerField() email = models.EmailField() user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='mandrill_messages', on_delete=models.SET_NULL, blank=True, null=True) payload = models.TextField() __repr__ = make_repr('mid', 'email')
from .aws import Action as BaseAction from .aws import BaseARN service_name = "AWS Shield" prefix = "shield" class Action(BaseAction): def __init__(self, action: str = None) -> None: super().__init__(prefix, action) class ARN(BaseARN): def __init__(self, resource: str = "", region: str = "", account: str = "") -> None: super().__init__( service=prefix, resource=resource, region=region, account=account ) AssociateDRTLogBucket = Action("AssociateDRTLogBucket") AssociateDRTRole = Action("AssociateDRTRole") AssociateHealthCheck = Action("AssociateHealthCheck") AssociateProactiveEngagementDetails = Action("AssociateProactiveEngagementDetails") CreateProtection = Action("CreateProtection") CreateProtectionGroup = Action("CreateProtectionGroup") CreateSubscription = Action("CreateSubscription") DeleteProtection = Action("DeleteProtection") DeleteProtectionGroup = Action("DeleteProtectionGroup") DeleteSubscription = Action("DeleteSubscription") DescribeAttack = Action("DescribeAttack") DescribeAttackStatistics = Action("DescribeAttackStatistics") DescribeDRTAccess = Action("DescribeDRTAccess") DescribeEmergencyContactSettings = Action("DescribeEmergencyContactSettings") DescribeProtection = Action("DescribeProtection") DescribeProtectionGroup = Action("DescribeProtectionGroup") DescribeSubscription = Action("DescribeSubscription") DisableApplicationLayerAutomaticResponse = Action( "DisableApplicationLayerAutomaticResponse" ) DisableProactiveEngagement = Action("DisableProactiveEngagement") DisassociateDRTLogBucket = Action("DisassociateDRTLogBucket") DisassociateDRTRole = Action("DisassociateDRTRole") DisassociateHealthCheck = Action("DisassociateHealthCheck") EnableApplicationLayerAutomaticResponse = Action( "EnableApplicationLayerAutomaticResponse" ) EnableProactiveEngagement = Action("EnableProactiveEngagement") GetSubscriptionState = Action("GetSubscriptionState") ListAttacks = Action("ListAttacks") ListProtectionGroups = Action("ListProtectionGroups") ListProtections = Action("ListProtections") ListResourcesInProtectionGroup = Action("ListResourcesInProtectionGroup") ListTagsForResource = Action("ListTagsForResource") TagResource = Action("TagResource") UntagResource = Action("UntagResource") UpdateApplicationLayerAutomaticResponse = Action( "UpdateApplicationLayerAutomaticResponse" ) UpdateEmergencyContactSettings = Action("UpdateEmergencyContactSettings") UpdateProtectionGroup = Action("UpdateProtectionGroup") UpdateSubscription = Action("UpdateSubscription")
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('api', '0002_auto_20150326_1433'), ] operations = [ migrations.RemoveField( model_name='problem', name='id', ), migrations.AlterField( model_name='problem', name='problemId', field=models.IntegerField(serialize=False, primary_key=True), preserve_default=True, ), ]
from app import app import gevent from gevent.pywsgi import WSGIServer from gevent.pool import Pool from gevent import monkey import signal monkey.patch_all() server = WSGIServer(('', 5000), app, spawn=Pool(None)) def stop(): server.stop() gevent.signal(signal.SIGINT, stop) if __name__ == "__main__": server.serve_forever()
""" """ from datetime import datetime from ... import SEQUENCE_TYPES, STRING_TYPES from .formatters import format_time from ...utils.conv import to_raw def check_type(input_value, value_type): if isinstance(input_value, value_type): return True if isinstance(input_value, SEQUENCE_TYPES): for value in input_value: if not isinstance(value, value_type): return False return True return False def always_valid(input_value): return True def validate_generic_single_value(input_value): if not isinstance(input_value, SEQUENCE_TYPES): return True try: # object couldn't have a __len__ method if len(input_value) == 1: return True except Exception: pass return False def validate_integer(input_value): if check_type(input_value, (float, bool)): return False if str is bytes: # Python 2, check for long too if check_type(input_value, (int, long)): return True else: # Python 3, int only if check_type(input_value, int): return True sequence = True # indicates if a sequence must be returned if not isinstance(input_value, SEQUENCE_TYPES): sequence = False input_value = [input_value] else: sequence = True # indicates if a sequence must be returned valid_values = [] # builds a list of valid int values for element in input_value: try: # try to convert any type to int, an invalid conversion raise TypeError of ValueError, if both are valid and equal then then int() value is used float_value = float(element) int_value = int(element) if float_value == int_value: valid_values.append(int(element)) else: return False except (ValueError, TypeError): return False if sequence: return valid_values else: return valid_values[0] def validate_bytes(input_value): return check_type(input_value, bytes) def validate_boolean(input_value): # it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed if validate_generic_single_value(input_value): # valid only if a single value or a sequence with a single element if isinstance(input_value, SEQUENCE_TYPES): input_value = input_value[0] if isinstance(input_value, bool): if input_value: return 'TRUE' else: return 'FALSE' if isinstance(input_value, STRING_TYPES): if input_value.lower() == 'true': return 'TRUE' elif input_value.lower() == 'false': return 'FALSE' return False def validate_time(input_value): # if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC if not isinstance(input_value, SEQUENCE_TYPES): sequence = False input_value = [input_value] else: sequence = True # indicates if a sequence must be returned valid_values = [] changed = False for element in input_value: if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time if isinstance(format_time(to_raw(element)), datetime): # valid Generalized Time string valid_values.append(element) else: return False elif isinstance(element, datetime): changed = True if element.tzinfo: # a datetime with a timezone valid_values.append(element.strftime('%Y%m%d%H%M%S%z')) else: # datetime without timezone, assumed local and adjusted to UTC offset = datetime.now() - datetime.utcnow() valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ')) else: return False if changed: if sequence: return valid_values else: return valid_values[0] else: return True
""" LeBLEU - Letter-edit / Levenshtein BLEU """ import logging __version__ = '0.0.1' __author__ = 'Stig-Arne Gronroos' __author_email__ = "stig-arne.gronroos@aalto.fi" _logger = logging.getLogger(__name__) def get_version(): return __version__ from .lebleu import LeBLEU def eval_single(*args, **kwargs): lb = LeBLEU(**kwargs) return lb.eval_single(*args) def eval(*args, **kwargs): lb = LeBLEU(**kwargs) return lb.eval(*args)
import cStringIO import zlib import wx def getMailData(): return zlib.decompress( "x\xda\x01M\x01\xb2\xfe\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\ \x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\ \x08d\x88\x00\x00\x01\x04IDATX\x85\xed\x941\x0e\x82@\x10E\x9f\xc6`,\x88\xad\ \x8d\x8d\x89r\x02B\xc1\t\xbc\x94\x857\xf04\x9e\xc0C\x00\x95\xb1\xb1\xa52\xda\ h\xc1N\xe1\xc8f5j\x9cD^Ev\x98\x81\xffv\x01::\xfe\x9d^\x91e\xd7\xb6\xc2d\xb9\ \x04`\xb8X\xbc\xf5\x80sY\x02p\xdcn[\xeb\xfd\xb7\xa6\x7f\x80\x81\xaf o<O\xd3f\ \xc1\x19y\x1a\xd7\xbf\xf7$\x17\xec\x19\x90\xbd?\x15\x05\x00\xd5z\r\xc0\\n\ \x08\x99p\x89\xa5o<\x9b\x010J\x12\xe0\xf1,\xd83\x10\xafV\xcd\x85K \x04M\x04\ \x92\xcb\\\xfb\x06\x84\xa7M\xa8u_r\x1fv\r\x08\xb1\xfc\x07\x14\x952\xf3\x90\ \xdc\xd3\xa71l\xe0p\x00\xe0R\xd7@8\x91N.}\x91\x9b\xc3t\xda\xdag\xd0\x80$\xdf\ \xed\x00\x88\xf2\xbcYw\tb\xf9\xfe\xd5\x19\xd0\xa7=\xf2\xcdQ\xd83\xe0K\xae\t}\ \xdf\xd2'sd\xae\xc6\x9e\x81P\xf2\x97Q&\xd8l\xee\xca\xf6\x0c\xf8\xf6\xea[\xfc\ \xdc@G\xc7\rv\x18V\xd3#+\xef\x8c\x00\x00\x00\x00IEND\xaeB`\x82\xb38\x8e\xb0"\ ) def getMailBitmap(): return wx.BitmapFromImage(getMailImage()) def getMailImage(): stream = cStringIO.StringIO(getMailData()) return wx.ImageFromStream(stream) def getMailIcon(): icon = wx.EmptyIcon() icon.CopyFromBitmap(getMailBitmap()) return icon def getNoMailData(): return zlib.decompress( 'x\xda\x01G\x04\xb8\xfb\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\ \x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\ \x08d\x88\x00\x00\x03\xfeIDATX\x85\xed\x97[o\xdb8\x10F\x0fu\xa1$\xeb\x96(A\ \x92\x1a}\xe8\xcf\xdc\xdd?\xeb\xa0h\x12\'\xa9#;\xba\x8b\x12\xb5\x0f\x81\x88\ \xba\xb6w\xb37\xf4a;\x80!\x98\xb09gf8\xdfPBX6?\xd2\xac\x1f\xea\xfd\'\xc0O\ \x00\xc0\xf9\xed\xd7_\xa6i\x9a\xf6\x16\xb3,\xe3\xea\xea\x8a8\x8eY,\x16X\xd6\ \xdf\xe3\x1c\xc7\x91\xba\xae\xa9\xaa\x8a\xa7\xa7\'6\x9b\xcd!@\x92$\x07\x8b\ \xbe\xef\x9b\xe7\xe5\xe5%a\x18"\xa5\xc4\xb6\xdf\xd7\xb2\xe38\xd2\xf7=UU\xf1\ \xf8\xf8HUUx\x9eG\x9a\xa6\x87\x00\xc76\xa8\xeb\x9a\xae\xeb\xf0}\x9f\xeb\xebk\ \xc20$MS\\\xd7}\x17\x80R\x8a\xddnG]\xd7\x94e\xc9\xd3\xd3\x13\xe38\x1e\xfd\ \xed\x1e\x80\x94\x12\xdf\xf7\xd1Z3\x0c\x03M\xd3\xb0^\xaf\x11B\xe0\xba.q\x1c#\ \xa5\xc4q\x8er3\x0c\x03}\xdfS\xd75_\xbf~e\xbd^\xd34\r\x8e\xe3\xe0\xfb>\xb6m\ \xd3\xb6-]\xd7\x1d\x07\x08\xc3\x90\x8b\x8b\x0b\x94R4MC\xd7u\xacV+\xba\xae\ \xc3q\x1c\x84\x10\xa4iz\x12`\x1cG\xca\xb2\xe4\xf9\xf9\x99\xdb\xdb[\xee\xef\ \xef\rx\x10\x04x\x9e\xc7f\xb39\r\x90$\t\x1f?~\xa4\xaek6\x9b\rEQ\xd0u\x1d\xbb\ \xdd\x8e\xbb\xbb;\xc6qd\x9a\xa6\x83L\xcc\x91\x17E\xc1z\xbdf\xbd^\xb3\xdb\xed\ \xd0Z\x1b\x80,\xcb\x88\xa2\x08\xa5\x14///\xc7\x01\xd24\xe5\xd3\xa7O\xbc\xbc\ \xbc\xd0\xf7=sw\xf4}\xcf\xed\xed-M\xd3`Y\x16B\x08\x92$\xd9\x03\x98k\xbdZ\xad\ x||\xc4\xb2,\xa2("\x0cC\x92$\xe1\xc3\x87\x0fdY\xb6\xe7\xfc\x00\xc0\xf3<\xe28\ 6N]\xd7\xc5\xb2,^__)\xcb\x92\xedv\xcb\xfd\xfd=Zk\xa6ib\x18\x06\x00\xaa\xaa2\ \x91o\xb7[\xfa\xbe\'\x8a"\x13\xf9\xe5\xe5%Y\x96\x99\xcc\x9d\x04\xf8\xb6\x14R\ J\xa4\x94\x0c\xc3\x80\xd6\xdaD\xfa\xf9\xf3g\x9a\xa6A\x08\xc1\xf9\xf99\x00y\ \x9e\xb3Z\xadx~~F\x08A\x14EDQD\x9a\xa6,\x97Knnn\xf0<\x8f\xef\xf5\xe6$\x80\ \xef\xfb\xf8\xbeO\xd34\xa6\x96\x00eYR\x96%y\x9e\xf3\xf0\xf0@Q\x14f=\xcfs\xba\ \xae\xdbK{\x92$\xa4ij\xfa\xbfi\x9a\xf7\x01\xcc&\xa5$I\x12\x93\xf2\xd9\x94R|\ \xf9\xf2\x05!\x04\x00\xd34\xa1\xb5&\x0cC\xe3<MS\xe28\xfeS\xed8\n0\x9f\xf6\ \xb9\xff\x83 `\x1cG\xe3\xb0(\n\xaa\xaa\xa2\xef{\x03\x1a\x86!q\x1c\x13\xc71Q\ \x14\xe1\xfb>\xae\xeb"\x84`\x18\x06\xf3\xdfw\x01h\xad\xe9\xfb\x9e\xae\xebPJa\ Y\x16q\x1cc\xdb\xb6\xc9\x84\x10\xe2(@\x9a\xa6\x04A\x80\x10\x02\xa5\x14]\xd7\ \xd1u\xdd\xc9L\xec\x01h\xad\x19\xc7\x11\xad5u]\x1b\xe7s4\xf3SJ\x89eY\xb4m\ \x0b\xbcu\xcf\xd9\xd9\x19gggDQ\x84\x94\x12\xa5\x14\xd34\xa1\x94\xa2\xaek\x82\ 0>N\x02\xccCd\x18\x06^__\xb1m\x9b0\x0c\xf1<\x0f\xd7u\x99\xa6\x89\xf3\xf3s\ \xf2<\x07\xde\x0e\x1f@\x14E,\x97K...L\xa4s\xf4\xf3\\\x98\xa6\t\xc7q\x0ef\xc2\ \x1e\xc0L\xab\xb5F)\x85\xeb\xba,\x16\x0b\x82 \xc0u]#<\x8e\xe3\xd0\xb6-\x9e\ \xe7\x01\x10\xc71WWWdY\x06\xbc\xb5\xabR\n\xdb\xb6)\x8a\x82\xb6mi\xdb\x16\xcb\ \xb2PJ\x9d\x06\x98ew\xb1X\x18\xfd\x0e\x82\xc0\xcc\x81\xd9\x82 `\xb9\\\x9a\ \xcd\xa4\x94&\xc5\xf0v>\x1c\xc7!\x08\x02\xa6i\xc2\xb6m\x94RF\xdaO\x02\xcc\ \x9a>\x0b\x89\xe7yx\x9ewp!\x99\xc1N\x99m\xdb\xe63\x7f\xdf\xedv\xf4}\xff\xc7%\ \xf0}\x9f4MM\xddOM\xbd\xbfb\xf3\x1eQ\x141\x8e\xa3)\xdbQ\x80yn\xcf\xa7\xfc[\ \xbd\xff\'fY\x96\xb9k|\x1f\xd4\xd130\xcf\xff\x7f\xd3\xc6q4w\x8c=\x80\xa6i\ \x8c\xb8\xe4yn.\x11\xff\x85)\xa5\xd8n\xb7\xd4um\xd6\xc4\xcfw\xc3\xff=\xc0\ \xefa\x89?u1\xd3\xf5 \x00\x00\x00\x00IEND\xaeB`\x82\xc4\x1f\x08\x9f' ) def getNoMailBitmap(): return wx.BitmapFromImage(getNoMailImage()) def getNoMailImage(): stream = cStringIO.StringIO(getNoMailData()) return wx.ImageFromStream(stream) def getNoMailIcon(): icon = wx.EmptyIcon() icon.CopyFromBitmap(getNoMailBitmap()) return icon def getErrMailData(): return zlib.decompress( 'x\xda\x01W\x05\xa8\xfa\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00 \x00\ \x00\x00 \x08\x06\x00\x00\x00szz\xf4\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\ \x08d\x88\x00\x00\x05\x0eIDATX\x85\xcd\x97\xcf\x8f\xdb\xd4\x16\xc7?v\xae\x7f\ \xc5N&\x8e\xd3L\x92\xceL%T\x15\rbQQ!\xe8\x0e\xc4\x92\xff\x80%H\xac\xdeC\xf0\ \xfe\x94\x07\xdb\xf7\x96\xac\xfa\x1f TT\t\x06\x90\xa0,*UB#\x90f:i"\'\x99L\ \xec\xd8\xf1\xaf\x98\xc5LLC\x92\x8aH\xa0r$/|t\xef9\x1f\xdf\xfb\xbd\xe7\\K\ \x92\\\xe2E\x9a\xfcB\xb3\x03b\xdb\t\x9f}\xfa\xdf\xfc\xf5\xd1\x88\x83\xcf?\ \xa7\xf2\xf81\x00\xde\xe1!\xa7\xef\xbd\xc7\xf7\xf5:\xff\xfa\xf7G\xd2\xdf\n\ \xb0w\xff>\xd7\x83\x80\xeah\x84q\xe5\x93F#:GG\xec\x95\xcb\xdb\x86C\xdaV\x03\ \xdfjj\xfeZ\x9e#\xc71\xf2|\x0e\xc0\\\x96\x99\xab*?J\x12oF\xf1V+\xb0\xb5\x06\ \x1cUE\xccfEr\x00y>G\xccf8\xaa\xbam8\xc4\x7f>\xf98\xcf\xf3|\xc9\xd9n\xb7\xd9\ \xdb\xdbCQ\x94%\xff\xf5\xef\xbe\xa3~\xef\x1e\\\\\xac\rV\xaf\xd7\xf9\xe6\xc3\ \x0f\xf3\xb37\xdeX\xf2\'I\xc2\x93\'Ox\xfa\xf4\xe9*@\xa5RYu\nA\x92$\xe8\xba\ \x8eeY\xc5cw\xbb\xe8\xba\xbe\xf1kt]g\x7f\x7f\x1f\xeb\xe5\x97\xf1}\xbfx\x82 @\ \x08A\xb5Z]\xcd\xb5.\x90\xe7y\x84a\xc8\xee\xee.\x86a`\x9a&\xedv\x1b\xab\xd1@\ <g\x99UU\xa5\xd1h\xa0\xb7\xdbt\xbb]...\x18\x8dF\xf4\xfb}\xd24];g\t`\x91L\x92\ .u\x94\xe79\xc3\xe1\x10UU)\x97\xcb\x94\xc2\x90r\x96\xb1I\xb6Y\x96\x11\x86!\ \xe3\xf1\x98\xc1`\xc0p8$\xcfsvvv\x8ax\xd3\xe9\x940\x0c\xd7\x03T\xabU:\x9d\ \x0e\xa5\xd2e\x8a\xf3\xf3s\xfa\xfd>I\x92\x000w]\xdaq\xcc\xa65\x88\xe3\x18\ \xd7uyrr\xc2\xc9\xc9\t\xa3\xd1\x88k\xd7\xae\xd1j\xb5\n\xc0n\xb7\xfb|\x80\xfd\ \xfd}\xd24%\x08\x02\xe28&\x08\x02\x92$\xa1\xd7\xeb\xa1\xb9.N\x1coH\xff;@\xaf\ \xd7#I\x12L\xd3\xc44M,\xcb\xa2\\.#\x84\xc0\xf7}\xfa\xfd\xfef\x80\xbd\xbd=&\ \x93\tQ\x14aY\x16\xaa\xaa2\x1e\x8fq]\x97\xb2\xeb\xf2\xd2\x9f\x00p]\x17\xc7q\ \xa8\xd5j\xa8\xaaJ\xa9T\xa2^\xafS\xadV9;;[\x9a\xb3\x04\xa0\xaa*\x96e!I\x12Q\ \x14\x15\xfb\x15\xc71\xbe\xef#\x84(\xf4\xb1\xce$IB\x08\x81\xa6i\x94\xcbe*\ \x95J\xa1\xabj\xb5Z|\xd0F\x80\x85U*\x15TUe0\x18\xd0\xeb\xf50M\x93N\xa7C\xb3\ \xd9D\xd3\xb4\x8d\x00\x9a\xa6\xd1l6\x99w:h\x9a\x86\x10\x02\xc7qh4\x1a\xa8\ \xaa\xca\x1f\xeb\xcdF\x00M\xd3\xd04\x8d\xe9t\x8a,\xcb\xc5\xbbh\xb7\x99\xbe\ \xf2\n%IB\xef\xf5P\xa6S\x00\x12\xd3d\xd6j1=<D\xb4\xdb\xc5y\x97e\x19\xc30\x8a\ \xf7g\xc5\xf7\\\x80M\x16\x1c\x1c\xd0{\xf7]f\xad\x16\xbb_|Q\x00D\x8d\x06\xee\ \xdbos~\xe7\x0e\xb3+\xc5\xffY\xdb\n \xb5m|\xdbF\xb9\xb8 ;:*\xfc\x99e1\xbdy\ \x13\xff\xf0p\xab\xe4\xf0O\xbd\x90DQD\x1c\xc7dY\x86a\x18\x08\xb1<Lq\x1c\xa2\ \x1b7\x98\\\x1d\xc9\xe8\xc6\r\x84\xe3`\x9a\xe6\xf28E!\xcb2<\xcf[Q\xffs\x01|\ \xdf\xc7u]\x84\x104\x9b\xcd\xa22.,\x06\xce\xb3\x8c\xe4\xaa\xa0(\xbb\xbbX\xb7\ o\xe3\x1c\x1c,\x8d\xcb\xb2\x8c\xe9t\x8a\xef\xfb4\x1a\x8d\x15\xc0\x15\x80$I\ \x08\x82\xa0xj\xb5\x1a\xb6m\xaft\xc0sE\xe1\xc20\x08\xaeDh\x9a&V\xa7\x83m\xdb\ K\xe3f\xb3\x19a\x18\x16\xf1$I*\xca\xfaZ\x80\xc9d\xc2\xe9\xe9)\x95J\x85V\xab\ \x85i\x9a+\xcb\x0f\x97M\xab\xd5j\x15\xc1\x14E\xc10\x8c\x95q\x8b:\xa0\xeb:\ \xb3\xd9\x8c\xd3\xd3S&\x93\xc9f\x80(\x8a\xf0<\x8fj\xb5\x8a\xe38+E\'MS\xd24E\ \nCjA\x80\xbchR\x8aB*\xcb\xcc\xae\x92.\xa0\x85\x10\xec\xec\xec\xa0\xeb:\xddn\ \x17\xcf\xf3\x88\xa2h3\xc0\xa2\x19\xd5j\xb5\x95}\x07\x08\x82\x80\xe1p\x88x\ \xfc\x18\xe7\xe8\x08\xa3\xdb\xbd\x04\xeft\x18\xdd\xbdKrx\x88\xe38+\x17\x8fE/\ \x90$\t\xd7u7\x03\x18\x86\x81m\xdbh\x9aV|\xed\xb36\x1d\x8d\x18\x1f\x1f\xa3}\ \xfd5;\xf7\xee!\xfd\xfc\xf3\xe5\xca\xdc\xba\x857\x9f3S\x14tIZ\xabxM\xd3\xb0m\ {e\xab\xd6j`\xd3\x91)=z\x84\xf3\xe5\x97\x94\x1f>D\x1b\x0c~\x0f>\x18p\xed\xfe\ }\x82\xf1\x98\xe0\x9dw\xf0^}u\xed\xfc8\x8eW5\x10\x86a\xd1$\xfa\xfd>\xaa\xaa\ \xae\x15\x1e@\xeb\xa7\x9fx\xe9\xc1\x03v\x8e\x8f\x91\x9fi\xcb\xcaxL\xed\xe1C$\ \xcf\xe3\x17\xc7\xa1\xf7\x87\xcb\xec\xc2\xd24\xa5\xdf\xef\x13\x04A\xe1\xdb\ \xfa\xbf\xe0\xab\x0f\xde\xcfo\x9e\x9da\xff\xf0\x03\xc6U\x1d\x08ww9\xbfs\x87\ \xe3\xeb\xd7y\xeb\x7f\xff\xff{\xff\x8c\x1e\xdd\xbe\x8dqp@\xe9\xd7_\xc9\xaf\ \x00\xbcz\x9d\xee\xdd\xbb<\xaa\xd7\xb7\r\xb7\xfd\n\xfc\xd5\xf6\xc2\x9b\xd1o\ \xd1r.\xaf\xfe\x90\x016\x00\x00\x00\x00IEND\xaeB`\x82\x8a\x1a\x9f\x99' ) def getErrMailBitmap(): return wx.BitmapFromImage(getErrMailImage()) def getErrMailImage(): stream = cStringIO.StringIO(getErrMailData()) return wx.ImageFromStream(stream) def getErrMailIcon(): icon = wx.EmptyIcon() icon.CopyFromBitmap(getErrMailBitmap()) return icon
import flask; from flask import request import os import urllib.parse from voussoirkit import flasktools from voussoirkit import gentools from voussoirkit import stringtools import etiquette from .. import common site = common.site session_manager = common.session_manager @site.route('/album/<album_id>') def get_album_html(album_id): album = common.P_album(album_id, response_type='html') response = common.render_template( request, 'album.html', album=album, view=request.args.get('view', 'grid'), ) return response @site.route('/album/<album_id>.json') def get_album_json(album_id): album = common.P_album(album_id, response_type='json') album = album.jsonify() return flasktools.json_response(album) @site.route('/album/<album_id>.zip') def get_album_zip(album_id): album = common.P_album(album_id, response_type='html') recursive = request.args.get('recursive', True) recursive = stringtools.truthystring(recursive) streamed_zip = etiquette.helpers.zip_album(album, recursive=recursive) if album.title: download_as = f'album {album.id} - {album.title}.zip' else: download_as = f'album {album.id}.zip' download_as = etiquette.helpers.remove_path_badchars(download_as) download_as = urllib.parse.quote(download_as) outgoing_headers = { 'Content-Type': 'application/octet-stream', 'Content-Disposition': f'attachment; filename*=UTF-8\'\'{download_as}', } return flask.Response(streamed_zip, headers=outgoing_headers) @site.route('/album/<album_id>/add_child', methods=['POST']) @flasktools.required_fields(['child_id'], forbid_whitespace=True) def post_album_add_child(album_id): album = common.P_album(album_id, response_type='json') child_ids = stringtools.comma_space_split(request.form['child_id']) children = list(common.P_albums(child_ids, response_type='json')) print(children) album.add_children(children, commit=True) response = album.jsonify() return flasktools.json_response(response) @site.route('/album/<album_id>/remove_child', methods=['POST']) @flasktools.required_fields(['child_id'], forbid_whitespace=True) def post_album_remove_child(album_id): album = common.P_album(album_id, response_type='json') child_ids = stringtools.comma_space_split(request.form['child_id']) children = list(common.P_albums(child_ids, response_type='json')) album.remove_children(children, commit=True) response = album.jsonify() return flasktools.json_response(response) @site.route('/album/<album_id>/remove_thumbnail_photo', methods=['POST']) def post_album_remove_thumbnail_photo(album_id): album = common.P_album(album_id, response_type='json') album.set_thumbnail_photo(None) common.P.commit(message='album remove thumbnail photo endpoint') return flasktools.json_response(album.jsonify()) @site.route('/album/<album_id>/refresh_directories', methods=['POST']) def post_album_refresh_directories(album_id): album = common.P_album(album_id, response_type='json') for directory in album.get_associated_directories(): if not directory.is_dir: continue digest = common.P.digest_directory(directory, new_photo_ratelimit=0.1) gentools.run(digest) common.P.commit(message='refresh album directories endpoint') return flasktools.json_response({}) @site.route('/album/<album_id>/set_thumbnail_photo', methods=['POST']) @flasktools.required_fields(['photo_id'], forbid_whitespace=True) def post_album_set_thumbnail_photo(album_id): album = common.P_album(album_id, response_type='json') photo = common.P_photo(request.form['photo_id'], response_type='json') album.set_thumbnail_photo(photo) common.P.commit(message='album set thumbnail photo endpoint') return flasktools.json_response(album.jsonify()) @site.route('/album/<album_id>/add_photo', methods=['POST']) @flasktools.required_fields(['photo_id'], forbid_whitespace=True) def post_album_add_photo(album_id): ''' Add a photo or photos to this album. ''' album = common.P_album(album_id, response_type='json') photo_ids = stringtools.comma_space_split(request.form['photo_id']) photos = list(common.P_photos(photo_ids, response_type='json')) album.add_photos(photos, commit=True) response = album.jsonify() return flasktools.json_response(response) @site.route('/album/<album_id>/remove_photo', methods=['POST']) @flasktools.required_fields(['photo_id'], forbid_whitespace=True) def post_album_remove_photo(album_id): ''' Remove a photo or photos from this album. ''' album = common.P_album(album_id, response_type='json') photo_ids = stringtools.comma_space_split(request.form['photo_id']) photos = list(common.P_photos(photo_ids, response_type='json')) album.remove_photos(photos, commit=True) response = album.jsonify() return flasktools.json_response(response) @site.route('/album/<album_id>/add_tag', methods=['POST']) def post_album_add_tag(album_id): ''' Apply a tag to every photo in the album. ''' response = {} album = common.P_album(album_id, response_type='json') tag = request.form['tagname'].strip() try: tag = common.P_tag(tag, response_type='json') except etiquette.exceptions.NoSuchTag as exc: response = exc.jsonify() return flasktools.json_response(response, status=404) recursive = request.form.get('recursive', False) recursive = stringtools.truthystring(recursive) album.add_tag_to_all(tag, nested_children=recursive, commit=True) response['action'] = 'add_tag' response['tagname'] = tag.name return flasktools.json_response(response) @site.route('/album/<album_id>/edit', methods=['POST']) def post_album_edit(album_id): ''' Edit the title / description. ''' album = common.P_album(album_id, response_type='json') title = request.form.get('title', None) description = request.form.get('description', None) album.edit(title=title, description=description, commit=True) response = album.jsonify(minimal=True) return flasktools.json_response(response) @site.route('/album/<album_id>/show_in_folder', methods=['POST']) def post_album_show_in_folder(album_id): if not request.is_localhost: flask.abort(403) album = common.P_album(album_id, response_type='json') directories = album.get_associated_directories() if len(directories) != 1: flask.abort(400) directory = directories.pop() if os.name == 'nt': command = f'start explorer.exe "{directory.absolute_path}"' os.system(command) return flasktools.json_response({}) flask.abort(501) @site.route('/all_albums.json') @flasktools.cached_endpoint(max_age=15) def get_all_album_names(): all_albums = {album.id: album.display_name for album in common.P.get_albums()} response = {'albums': all_albums} return flasktools.json_response(response) def get_albums_core(): albums = list(common.P.get_root_albums()) albums.sort(key=lambda x: x.display_name.lower()) return albums @site.route('/albums') def get_albums_html(): albums = get_albums_core() response = common.render_template( request, 'album.html', albums=albums, view=request.args.get('view', 'grid'), ) return response @site.route('/albums.json') def get_albums_json(): albums = get_albums_core() albums = [album.jsonify(minimal=True) for album in albums] return flasktools.json_response(albums) @site.route('/albums/create_album', methods=['POST']) def post_albums_create(): title = request.form.get('title', None) description = request.form.get('description', None) parent_id = request.form.get('parent_id', None) if parent_id is not None: parent = common.P_album(parent_id, response_type='json') user = session_manager.get(request).user album = common.P.new_album(title=title, description=description, author=user) if parent_id is not None: parent.add_child(album) common.P.commit('create album endpoint') response = album.jsonify(minimal=False) return flasktools.json_response(response) @site.route('/album/<album_id>/delete', methods=['POST']) def post_album_delete(album_id): album = common.P_album(album_id, response_type='json') album.delete(commit=True) return flasktools.json_response({})
import sys def ip2str(ip): l = [ (ip >> (3*8)) & 0xFF, (ip >> (2*8)) & 0xFF, (ip >> (1*8)) & 0xFF, (ip >> (0*8)) & 0xFF, ] return '.'.join([str(i) for i in l]) def str2ip(line): a, b, c, d = [int(s) for s in line.split('.')] ip = 0 ip += (a << (3*8)) ip += (b << (2*8)) ip += (c << (1*8)) ip += (d << (0*8)) return ip blockip = str2ip(sys.stdin.readline()) hostmask = 1 bitcount = 1 for line in sys.stdin.readlines(): try: ip = str2ip(line.strip()) except: print 'Ignored line:', line, continue while (blockip & (~hostmask)) != (ip & (~hostmask)): hostmask = (hostmask << 1) | 1 bitcount += 1 print ip2str(blockip & (~hostmask)) + '/' + str(bitcount), 'hostmask =', ip2str(hostmask) print 'wrong way around'
""" Workaround for a conda-build bug where failing to compile some Python files results in a build failure. See https://github.com/conda/conda-build/issues/1001 """ import os import sys py2_only_files = [] py3_only_files = [ 'numba/tests/annotation_usecases.py', ] def remove_files(basedir): """ Remove unwanted files from the current source tree """ if sys.version_info >= (3,): removelist = py2_only_files msg = "Python 2-only file" else: removelist = py3_only_files msg = "Python 3-only file" for relpath in removelist: path = os.path.join(basedir, relpath) print("Removing %s %r" % (msg, relpath)) os.remove(path) if __name__ == "__main__": remove_files('.')
import datetime import random import sys class DayLife: """Life in a day.""" def __init__(self, date, life): """Set birth datetime and life.""" self.birthdate = date self.life = life finalyear = self.birthdate.year + self.life finaldate = datetime.datetime(finalyear, self.birthdate.month, self.birthdate.day) self.finaldate = finaldate - datetime.timedelta(days=1) def now(self): """Calculate current time.""" curdate = datetime.datetime.now() maxdays = (self.finaldate - self.birthdate).days curdays = (curdate - self.birthdate).days curtime = datetime.timedelta(days=1) / maxdays curtime = curtime * curdays return datetime.time( (curtime.seconds / 60) / 60, (curtime.seconds / 60) % 60, curtime.seconds % 60) if __name__ == '__main__': # options startyear = 1900 endyear = 2000 life = 200 print startyear, "<= a <=", endyear print "n =", life daycount = (datetime.datetime(endyear, 12, 31) - datetime.datetime(startyear, 1, 1)).days birthdate = datetime.datetime(startyear, 1, 1) + \ datetime.timedelta(days=random.randint(0, daycount)) args = sys.argv if len(args) == 4: year = int(args[1]) month = int(args[2]) date = int(args[3]) birthdate = datetime.datetime(year, month, date) print "birthdate:", birthdate.date() mylife = DayLife(birthdate, life) print "finaldate:", mylife.finaldate.date() print "today:", mylife.now()
""" The scheduler is responsible for the module handling. """ import modules from importlib import import_module from additional.Logging import Logging class Scheduler(): """ This class instantiates the modules, takes care of the module's versions and gets the module's select queries. """ # dictonary of instantiated modules _instantiated_modules = {} def __init__(self, db): self._db = db self._log = Logging(self.__class__.__name__).get_logger() ######################################################################## self._instantiate_modules() self._check_module_versions() ############################################################################ def _instantiate_modules(self): """ Method to instantiate modules. All modules must contain a class with the exact same name as the module. This class must implement the abstract base class (abc) DatasourceBase. """ # finds all modules to import for module_name in modules.__all__: # imports an instantiates the module by name module = import_module('modules.' + module_name) module = getattr(module, module_name)() # makes sure the module implements DatasourceBase if not isinstance(module, modules.DatasourceBase): raise SubClassError( 'Modul is not an instance of DatasourceBase: {}' .format(module.__class__.__name__)) # adds the module to the list of instantieated modules self._instantiated_modules[module.__class__.__name__] = module ############################################################################ def _check_module_versions(self): """ Method to check module's versions. """ for module_name, module in self._instantiated_modules.items(): module_version = module.get_version() # searches module's version in the database result = self._db.select_data(''' SELECT version FROM versions WHERE module = %s''', (module_name,)) if not result: # appends the module with it's version to the database self._db.insert_data(''' INSERT INTO versions (module, version) VALUES (%s, %s)''', (module_name, module_version)) elif result[0][0] < module_version: # updates the request entry self.server.db.update_data(''' UPDATE versions SET version = %s WHERE module = %s''', (module_version, module_name,)) elif result[0][0] > module_version: raise VersionError('Old module version detected!' + 'Module: {} - Expected: {} - Found: {}' .format(module_name, result[0][0], module_version)) ############################################################################ def get_module_select_queries(self): """ Returns the module's search queries. """ queries = {} for module_name, module in self._instantiated_modules.items(): queries[module_name] = module.get_queries('select') return queries class SubClassError(Exception): """ Exception for module subclass errors. """ class VersionError(Exception): """ Exception for module version errors. """
from django.conf.urls import patterns, include, url from django.shortcuts import redirect, render_to_response from django.template.context import RequestContext from django.contrib import admin admin.autodiscover() def to_blog(request): return redirect('/blog/', permanent=False) def sslicense(request): slicense = """ Copyright (c) 2012-2013 Justin Crawford <Justasic@gmail.com> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE """ ctx = { 'parts': { "title": "License", "html_title": "License", "fragment": slicense.replace('\n', '<br>'), }, } return render_to_response('docs/docs.html', RequestContext(request, ctx)) urlpatterns = patterns('', # Examples: # url(r'^$', 'StackSmash.views.home', name='home'), # url(r'^StackSmash/', include('StackSmash.foo.urls')), # TODO: Fix index and use something... Should identify subdomains somehow.. #url(r'^$', include('StackSmash.apps.blog.urls')), url(r'^license/', sslicense, name='license'), #url(r'^docs/', include('StackSmash.apps.docs.urls'), name='docs', app_name='docs'), url(r'^blog/', include('StackSmash.apps.blog.urls', namespace='blog')), url(r'^projects/', include('StackSmash.apps.projects.urls', namespace='projects')), url(r'^upload/', include('StackSmash.apps.uploader.urls', namespace='upload')), url(r'^$', to_blog, name='index'), #url(r'^projects/', include('StackSmash.apps.projects.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls), name='admin'), )
import sys import collections class GeocoderResult(collections.Iterator): """ A geocoder resultset to iterate through address results. Exemple: results = Geocoder.geocode('paris, us') for result in results: print(result.formatted_address, result.location) Provide shortcut to ease field retrieval, looking at 'types' in each 'address_components'. Example: result.country result.postal_code You can also choose a different property to display for each lookup type. Example: result.country__short_name By default, use 'long_name' property of lookup type, so: result.country and: result.country__long_name are equivalent. """ attribute_mapping = { "state": "administrative_area_level_1", "province": "administrative_area_level_1", "city": "locality", "county": "administrative_area_level_2", } def __init__(self, data): """ Creates instance of GeocoderResult from the provided JSON data array """ self.data = data self.len = len(self.data) self.current_index = 0 self.current_data = self.data[0] def __len__(self): return self.len def __iter__(self): return self def return_next(self): if self.current_index >= self.len: raise StopIteration self.current_data = self.data[self.current_index] self.current_index += 1 return self def __getitem__(self, key): """ Accessing GeocoderResult by index will return a GeocoderResult with just one data entry """ return GeocoderResult([self.data[key]]) def __unicode__(self): return self.formatted_address if sys.version_info[0] >= 3: # Python 3 def __str__(self): return self.__unicode__() def __next__(self): return self.return_next() else: # Python 2 def __str__(self): return self.__unicode__().encode('utf8') def next(self): return self.return_next() @property def count(self): return self.len @property def coordinates(self): """ Return a (latitude, longitude) coordinate pair of the current result """ location = self.current_data['geometry']['location'] return location['lat'], location['lng'] @property def latitude(self): return self.coordinates[0] @property def longitude(self): return self.coordinates[1] @property def raw(self): """ Returns the full result set in dictionary format """ return self.data @property def valid_address(self): """ Returns true if queried address is valid street address """ return self.current_data['types'] == ['street_address'] @property def formatted_address(self): return self.current_data['formatted_address'] def __getattr__(self, name): lookup = name.split('__') attribute = lookup[0] if (attribute in GeocoderResult.attribute_mapping): attribute = GeocoderResult.attribute_mapping[attribute] try: prop = lookup[1] except IndexError: prop = 'long_name' for elem in self.current_data['address_components']: if attribute in elem['types']: return elem[prop] class GeocoderError(Exception): """Base class for errors in the :mod:`pygeocoder` module. Methods of the :class:`Geocoder` raise this when something goes wrong. """ #: See http://code.google.com/apis/maps/documentation/geocoding/index.html#StatusCodes #: for information on the meaning of these status codes. G_GEO_OK = "OK" G_GEO_ZERO_RESULTS = "ZERO_RESULTS" G_GEO_OVER_QUERY_LIMIT = "OVER_QUERY_LIMIT" G_GEO_REQUEST_DENIED = "REQUEST_DENIED" G_GEO_MISSING_QUERY = "INVALID_REQUEST" def __init__(self, status, url=None, response=None): """Create an exception with a status and optional full response. :param status: Either a ``G_GEO_`` code or a string explaining the exception. :type status: int or string :param url: The query URL that resulted in the error, if any. :type url: string :param response: The actual response returned from Google, if any. :type response: dict """ Exception.__init__(self, status) # Exception is an old-school class self.status = status self.url = url self.response = response def __str__(self): """Return a string representation of this :exc:`GeocoderError`.""" return 'Error %s\nQuery: %s' % (self.status, self.url) def __unicode__(self): """Return a unicode representation of this :exc:`GeocoderError`.""" return unicode(self.__str__())
from setuptools import setup from djangocms_carousel import __version__ INSTALL_REQUIRES = [ ] CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Communications', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ] setup( name='djangocms-carousel', version=__version__, description='Slider Plugin for django CMS', author='Andrew Mirsky', author_email='andrew@mirsky.net', url='https://git.mirsky.net/mirskyconsulting/djangocms-carousel', packages=['djangocms_carousel', 'djangocms_carousel.migrations'], install_requires=INSTALL_REQUIRES, license='LICENSE.txt', platforms=['OS Independent'], classifiers=CLASSIFIERS, long_description=open('README.md').read(), include_package_data=True, zip_safe=False )
from scout.parse.variant.rank_score import parse_rank_score from scout.parse.variant.variant import parse_variant def test_parse_rank_score(): ## GIVEN a rank score string on genmod format rank_scores_info = "123:10" variant_score = 10.0 family_id = "123" ## WHEN parsing the rank score parsed_rank_score = parse_rank_score(rank_scores_info, family_id) ## THEN assert that the correct rank score is parsed assert variant_score == parsed_rank_score def test_parse_rank_score_no_score(): ## GIVEN a empty rank score string rank_scores_info = "" family_id = "123" ## WHEN parsing the rank score parsed_rank_score = parse_rank_score(rank_scores_info, family_id) ## THEN assert that None is returned assert parsed_rank_score == None def test_parse_rank_score_variant(cyvcf2_variant, case_obj, scout_config): ## GIVEN a variant rank_score = 15 case_id = case_obj["_id"] ## WHEN adding a rank score string to the INFO field rank_score_str = f"{case_id}:{rank_score}" cyvcf2_variant.INFO["RankScore"] = rank_score_str ## WHEN parsing the variant var_info = parse_variant(cyvcf2_variant, case_obj) ## THEN assert that the correct score is parsed assert var_info["rank_score"] == rank_score
import amitgroup as ag import numpy as np ag.set_verbose(True) data, digits = ag.io.load_mnist('training', selection=slice(0, 100)) pd = ag.features.PartsDescriptor((5, 5), 20, patch_frame=1, edges_threshold=5, samples_per_image=10) pd.train_from_images(data) ag.plot.images(pd.visparts)
from django.contrib.gis.geoip2 import GeoIP2 from geoip2.errors import GeoIP2Error from ipware import get_client_ip def get_location_from_ip(request): client_ip, is_routable = get_client_ip(request) if client_ip is not None: g = GeoIP2() try: record = g.city(client_ip) except GeoIP2Error: return None if record: city = record.get('city') or '' country = record.get('country') or '' delimeter = ', ' if city and country else '' return f'{city}{delimeter}{country}' return None
import sys, os, time, argparse import re import pprint import math import cPickle import ged_node from idautils import * from idc import * import idaapi def idascript_exit(code=0): idc.Exit(code) def get_short_function_name(function): return function.replace("?", "")[:100] def mkdir(dirname): if not os.path.exists(dirname): os.mkdir(dirname) def cPickle_dump(filename, data): with open(filename, "wb") as f: cPickle.dump(data, f) def print_cfg(cfg): for block in cfg: print "[%02d]" % block.id, print hex(block.startEA), succs = list(block.succs()) print "(succs(%d): " % len(succs), for i in range(len(succs)): sys.stdout.write(hex(succs[i].startEA)) if i < len(succs) - 1: sys.stdout.write(", ") print ")" def output_cfg_as_png_rec(g, block, memo): functions1, dummy = get_marks(block, 0) hashed_label1 = hash_label(functions1) label1 = hex(block.startEA) + ("\n%08x" % hashed_label1) g.add_node(pydot.Node(label1, fontcolor='#FFFFFF', color='#333399')) for b in list(block.succs()): functions2, dummy = get_marks(b, 0) hashed_label2 = hash_label(functions2) label2 = hex(b.startEA) + ("\n%08x" % hashed_label2) if b.startEA not in memo: memo.append(b.startEA) g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold')) output_cfg_as_png_rec(g, b, memo) else: g.add_edge(pydot.Edge(label1, label2, color='#333399', style='bold, dotted')) def output_cfg_as_png(cfg, filename, overwrite_flag): blocks_src = {} blocks_dst = {} block = cfg[0] f_name = GetFunctionName(block.startEA) if not overwrite_flag and os.path.exists(filename): return g = pydot.Dot(graph_type='digraph', bgcolor="#F0E0FF") size = "21" g.set_rankdir('TB') g.set_size(size) g.add_node(pydot.Node('node', shape='ellipse', margin='0.05', fontcolor='#FFFFFF', fontsize=size, color='#333399', style='filled', fontname='Consolas Bold')) g.add_node(pydot.Node('edge', color='lightgrey')) memo = [] output_cfg_as_png_rec(g, block, memo) g.write_png(filename) def get_cfg(function_start, function_end): f_name = GetFunctionName(function_start) cfg = idaapi.FlowChart(idaapi.get_func(function_start)) return list(cfg) def get_cfgs(): cfgs = [] for ea in Segments(): functions = list(Functions(SegStart(ea), SegEnd(ea))) functions.append(SegEnd(ea)) for i in range(len(functions) - 1): function_start = functions[i] function_end = functions[i+1] cfg = get_cfg(function_start, function_end) cfgs.append(cfg) return cfgs def hash_label(marks): tmp = sorted(set(marks)) tmp = "".join(tmp) tmp = tmp.upper() def rot13(string): return reduce(lambda h,c: ((h>>13 | h<<19)+ord(c)) & 0xFFFFFFFF, [0]+list(string)) hashed_label = rot13(tmp) hashed_label = hashed_label & 0xFFFFFFFF return hashed_label def get_marks(block, gamma): marks = [] for head in Heads(block.startEA, block.endEA): mnem = GetMnem(head) opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2)) if mnem not in ["call"]: for buf in (opnd[1], opnd[2]): if buf: match = re.search("([\dA-F]+)h", buf) if match: magic = int(match.group(1), 16) if 0x00001000 <= magic <= 0xffffffff: marks.append(hex(magic)) for buf in (opnd[0], opnd[1], opnd[2]): if buf: match = re.search("offset (a[\S]+)", buf) if match: offset_a = match.group(1) if offset_a[:4] == "asc_": continue marks.append(offset_a) continue else: gamma += 1 if opnd[0][:4] == "sub_": continue if opnd[0][0] in ["?", "$"]: continue if opnd[0] in ["eax", "ebx", "ecx", "edx", "esi", "edi"]: continue if opnd[0] in ["__SEH_prolog4", "__SEH_epilog4", "__EH_prolog3_catch"]: continue if opnd[0].find("cookie") >= 0: continue marks.append(opnd[0]) continue return marks, gamma def get_mnems(block): mnems = [] for head in Heads(block.startEA, block.endEA): mnem = GetMnem(head) opnd = (GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2)) buf = " " for o in opnd: if not o: break elif o in ["eax", "ebx", "ecx", "edx", "ax", "bx", "cx", "dx", "al", "bl", "cl", "dl", "ah", "bh", "ch", "dh", "esi", "edi", "si", "di", "esp", "ebp"]: buf += "reg " elif o[:3] == "xmm": buf += "reg " elif o.find("[") >= 0: buf += "mem " elif o[:6] == "offset": buf += "off " elif o[:4] == "loc_": buf += "loc " elif o[:4] == "sub_": buf += "sub " elif o.isdigit(): buf += "num " elif re.match("[\da-fA-F]+h", o): buf += "num " elif o[:6] == "dword_": buf += "dwd " else: buf += "lbl " mnems.append(mnem + buf) return mnems def cfg_to_cft_rec(block, memo, abr): (alpha, beta, gamma) = abr alpha += 1 marks, gamma = get_marks(block, gamma) hashed_label = hash_label(marks) mnems = get_mnems(block) tree = ged_node.Node(hashed_label) for b in list(block.succs()): beta += 1 if b.startEA not in memo: memo.append(b.startEA) tmp, (alpha, beta, gamma), tmp2 = cfg_to_cft_rec(b, memo, (alpha, beta, gamma)) tree = tree.addkid(tmp) mnems += tmp2 return tree, (alpha, beta, gamma), mnems def cfg_to_cft(cfg): block = cfg[0] memo = [] memo.append(block.startEA) return cfg_to_cft_rec(block, memo, (0, 0, 0)) def dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite): function_num = len(cfgs) dump_data_list = {} for cfg in cfgs: function_name = GetFunctionName(cfg[0].startEA) (cft, abr, mnems) = cfg_to_cft(cfg) dump_data_list[function_name] = {} dump_data_list[function_name]["FUNCTION_NAME"] = function_name dump_data_list[function_name]["CFT"] = cft dump_data_list[function_name]["ABR"] = abr dump_data_list[function_name]["MNEMS"] = mnems def dump_pickle(dump_data_list, program, function, f_overwrite): function_name_short = get_short_function_name(function) filename_pickle = os.path.join(function_name_short + ".pickle") if f_overwrite or not os.path.exists(filename_pickle): cPickle_dump(filename_pickle, dump_data_list[function]) cPickle_dump(program + ".dmp", dump_data_list) def main(function, f_image, f_all, f_overwrite): sys.setrecursionlimit(3000) program = idaapi.get_root_filename() start_time = time.time() cfgs = get_cfgs() dump_function_info(cfgs, program, function, f_image, f_all, f_overwrite) result_time = time.time() - start_time print "Dump finished." print "result_time: " + str(result_time) + " sec." if __name__ == '__main__': parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="") parser.add_argument('-f', dest='function', default=None, type=str, help='') parser.add_argument('-a', dest='f_all', default=False, action='store_true', help='') parser.add_argument('-i', dest='f_image', default=False, action='store_true', help='Image Flag (Output as PNG)') parser.add_argument('-o', dest='f_overwrite', default=False, action='store_true', help='Overwrite file') args = parser.parse_args() function = args.function f_image = args.f_image f_all = args.f_all f_overwrite = args.f_overwrite main(function, f_image, f_all, f_overwrite) #idascript_exit()
import os import os.path as osp import textwrap import waflib.Utils import waflib.Logs as msg from waflib.Configure import conf _heptooldir = osp.dirname(osp.abspath(__file__)) def options(opt): opt.load('hwaf-base', tooldir=_heptooldir) opt.add_option( '--with-cmake', default=None, help="Look for CMake at the given path") return def configure(conf): conf.load('hwaf-base', tooldir=_heptooldir) return @conf def find_cmake(ctx, **kwargs): if not ctx.env.HWAF_FOUND_C_COMPILER: ctx.fatal('load a C compiler first') pass if not ctx.env.HWAF_FOUND_CXX_COMPILER: ctx.fatal('load a C++ compiler first') pass path_list = waflib.Utils.to_list(kwargs.get('path_list', [])) if getattr(ctx.options, 'with_cmake', None): topdir = ctx.options.with_cmake topdir = ctx.hwaf_subst_vars(topdir) path_list.append(osp.join(topdir, "bin")) pass kwargs['path_list'] = path_list ctx.find_program( "cmake", var="CMAKE", **kwargs) kwargs['mandatory'] = False ctx.find_program( "ccmake", var="CCMAKE", **kwargs) ctx.find_program( "cpack", var="CPACK", **kwargs) ctx.find_program( "ctest", var="CTEST", **kwargs) version="N/A" cmd = [ctx.env.CMAKE, "--version"] lines=ctx.cmd_and_log(cmd).splitlines() for l in lines: l = l.lower() if "version" in l: version=l[l.find("version")+len("version"):].strip() break pass ctx.start_msg("CMake version") ctx.end_msg(version) ctx.hwaf_declare_runtime_env('CMAKE') ctx.env.CMAKE_HOME = osp.dirname(osp.dirname(ctx.env.CMAKE)) ctx.env.CMAKE_VERSION = version ctx.env.HWAF_FOUND_CMAKE = 1 return
import ctypes import pyglet from pyglet.window.xlib import xlib import lib_xinput as xi class XInputDevice: def __init__(self, display, device_info): self._x_display = display._display self._device_id = device_info.id self.name = device_info.name self._open_device = None # TODO: retrieve inputclassinfo from device_info and expose / save # for valuator axes etc. def open(self): if self._open_device: return self._open_device = xi.XOpenDevice(self._x_display, self._device_id) if not self._open_device: raise Exception('Cannot open device') def close(self): if not self._open_device: return xi.XCloseDevice(self._x_display, self._open_device) def attach(self, window): assert window._x_display == self._x_display return XInputDeviceInstance(self, window) class XInputDeviceInstance(pyglet.event.EventDispatcher): def __init__(self, device, window): """Create an opened instance of a device on the given window. :Parameters: `device` : XInputDevice Device to open `window` : Window Window to open device on """ assert device._x_display == window._x_display assert device._open_device self.device = device self.window = window self._events = list() try: dispatcher = window.__xinput_window_event_dispatcher except AttributeError: dispatcher = window.__xinput_window_event_dispatcher = \ XInputWindowEventDispatcher() dispatcher.add_instance(self) device = device._open_device.contents if not device.num_classes: return # Bind matching extended window events to bound instance methods # on this object. # # This is inspired by test.c of xinput package by Frederic # Lepied available at x.org. # # In C, this stuff is normally handled by the macro DeviceKeyPress and # friends. Since we don't have access to those macros here, we do it # this way. for i in range(device.num_classes): class_info = device.classes[i] if class_info.input_class == xi.KeyClass: self._add(class_info, xi._deviceKeyPress, dispatcher._event_xinput_key_press) self._add(class_info, xi._deviceKeyRelease, dispatcher._event_xinput_key_release) elif class_info.input_class == xi.ButtonClass: self._add(class_info, xi._deviceButtonPress, dispatcher._event_xinput_button_press) self._add(class_info, xi._deviceButtonRelease, dispatcher._event_xinput_button_release) elif class_info.input_class == xi.ValuatorClass: self._add(class_info, xi._deviceMotionNotify, dispatcher._event_xinput_motion) elif class_info.input_class == xi.ProximityClass: self._add(class_info, xi._proximityIn, dispatcher._event_xinput_proximity_in) self._add(class_info, xi._proximityOut, dispatcher._event_xinput_proximity_out) elif class_info.input_class == xi.FeedbackClass: pass elif class_info.input_class == xi.FocusClass: pass elif class_info.input_class == xi.OtherClass: pass array = (xi.XEventClass * len(self._events))(*self._events) xi.XSelectExtensionEvent(window._x_display, window._window, array, len(array)) def _add(self, class_info, event, handler): _type = class_info.event_type_base + event _class = self.device._device_id << 8 | _type self._events.append(_class) self.window._event_handlers[_type] = handler XInputDeviceInstance.register_event_type('on_button_press') XInputDeviceInstance.register_event_type('on_button_release') XInputDeviceInstance.register_event_type('on_motion') XInputDeviceInstance.register_event_type('on_proximity_in') XInputDeviceInstance.register_event_type('on_proximity_out') class XInputWindowEventDispatcher: def __init__(self): self._instances = dict() def add_instance(self, instance): self._instances[instance.device._device_id] = instance def remove_instance(self, instance): del self._instances[instance.device._device_id] def dispatch_instance_event(self, e, *args): try: instance = self._instances[e.deviceid] except KeyError: return instance.dispatch_event(*args) @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_key_press(self, ev): raise NotImplementedError('TODO') @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_key_release(self, ev): raise NotImplementedError('TODO') @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_button_press(self, ev): e = ctypes.cast(ctypes.byref(ev), ctypes.POINTER(xi.XDeviceButtonEvent)).contents self.dispatch_instance_event(e, 'on_button_press', e.button) @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_button_release(self, ev): e = ctypes.cast(ctypes.byref(ev), ctypes.POINTER(xi.XDeviceButtonEvent)).contents self.dispatch_instance_event(e, 'on_button_release', e.button) @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_motion(self, ev): e = ctypes.cast(ctypes.byref(ev), ctypes.POINTER(xi.XDeviceMotionEvent)).contents axis_data = list() for i in range(e.axes_count): axis_data.append(e.axis_data[i]) self.dispatch_instance_event(e, 'on_motion', axis_data, e.x, e.y) @pyglet.window.xlib.XlibEventHandler(0) def _event_xinput_proximity_in(self, ev): e = ctypes.cast(ctypes.byref(ev), ctypes.POINTER(xi.XProximityNotifyEvent)).contents self.dispatch_instance_event(e, 'on_proximity_in') @pyglet.window.xlib.XlibEventHandler(-1) def _event_xinput_proximity_out(self, ev): e = ctypes.cast(ctypes.byref(ev), ctypes.POINTER(xi.XProximityNotifyEvent)).contents self.dispatch_instance_event(e, 'on_proximity_out') def _check_extension(display): major_opcode = ctypes.c_int() first_event = ctypes.c_int() first_error = ctypes.c_int() xlib.XQueryExtension(display._display, 'XInputExtension', ctypes.byref(major_opcode), ctypes.byref(first_event), ctypes.byref(first_error)) if not major_opcode.value: raise Exception('XInput extension not available') def get_devices(display): _check_extension(display) devices = list() count = ctypes.c_int(0) device_list = xi.XListInputDevices(display._display, count) for i in range(count.value): device_info = device_list[i] devices.append(XInputDevice(display, device_info)) return devices
from __future__ import absolute_import from wtforms import validators from ..forms import ModelForm from digits import utils class ImageModelForm(ModelForm): """ Defines the form used to create a new ImageModelJob """ crop_size = utils.forms.IntegerField( 'Crop Size', validators=[ validators.NumberRange(min=1), validators.Optional() ], tooltip=("If specified, during training a random square crop will be " "taken from the input image before using as input for the network.") ) use_mean = utils.forms.SelectField( 'Subtract Mean', choices=[ ('none', 'None'), ('image', 'Image'), ('pixel', 'Pixel'), ], default='image', tooltip="Subtract the mean file or mean pixel for this dataset from each image." ) aug_flip = utils.forms.SelectField( 'Flipping', choices=[ ('none', 'None'), ('fliplr', 'Horizontal'), ('flipud', 'Vertical'), ('fliplrud', 'Horizontal and/or Vertical'), ], default='none', tooltip="Randomly flips each image during batch preprocessing." ) aug_quad_rot = utils.forms.SelectField( 'Quadrilateral Rotation', choices=[ ('none', 'None'), ('rot90', '0, 90 or 270 degrees'), ('rot180', '0 or 180 degrees'), ('rotall', '0, 90, 180 or 270 degrees.'), ], default='none', tooltip="Randomly rotates (90 degree steps) each image during batch preprocessing." ) aug_rot = utils.forms.IntegerField( 'Rotation (+- deg)', default=0, validators=[ validators.NumberRange(min=0, max=180) ], tooltip="The uniform-random rotation angle that will be performed during batch preprocessing." ) aug_scale = utils.forms.FloatField( 'Rescale (stddev)', default=0, validators=[ validators.NumberRange(min=0, max=1) ], tooltip=("Retaining image size, the image is rescaled with a " "+-stddev of this parameter. Suggested value is 0.07.") ) aug_noise = utils.forms.FloatField( 'Noise (stddev)', default=0, validators=[ validators.NumberRange(min=0, max=1) ], tooltip=("Adds AWGN (Additive White Gaussian Noise) during batch " "preprocessing, assuming [0 1] pixel-value range. Suggested value is 0.03.") ) aug_hsv_use = utils.forms.BooleanField( 'HSV Shifting', default=False, tooltip=("Augmentation by normal-distributed random shifts in HSV " "color space, assuming [0 1] pixel-value range."), ) aug_hsv_h = utils.forms.FloatField( 'Hue', default=0.02, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip=("Standard deviation of a shift that will be performed during " "preprocessing, assuming [0 1] pixel-value range.") ) aug_hsv_s = utils.forms.FloatField( 'Saturation', default=0.04, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip=("Standard deviation of a shift that will be performed during " "preprocessing, assuming [0 1] pixel-value range.") ) aug_hsv_v = utils.forms.FloatField( 'Value', default=0.06, validators=[ validators.NumberRange(min=0, max=0.5) ], tooltip=("Standard deviation of a shift that will be performed during " "preprocessing, assuming [0 1] pixel-value range.") )
from setuptools import setup, find_packages import os version = '0.9.7rt' def read(*rnames): return open(os.path.join(os.path.dirname(__file__), *rnames)).read() long_description = ( read('README.txt') + '\n' + read('js', 'chosen', 'test_chosen.txt') + '\n' + read('CHANGES.txt')) setup( name='js.chosen', version=version, description="Fanstatic packaging of Chosen", long_description=long_description, classifiers=[], keywords='', author='Fanstatic Developers', author_email='fanstatic@googlegroups.com', license='BSD', packages=find_packages(),namespace_packages=['js'], include_package_data=True, zip_safe=False, install_requires=[ 'fanstatic', 'js.jquery', 'setuptools', ], entry_points={ 'fanstatic.libraries': [ 'chosen = js.chosen:library', ], }, )
"""myproject URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.9/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Import the include() function: from django.conf.urls import url, include 3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import url, include from django.contrib import admin from django.conf import settings urlpatterns = [ url(r'^admin/', admin.site.urls), ] if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ]
__author__ = 'Cedric Da Costa Faro' from flask import render_template from . import main @main.app_errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 @main.app_errorhandler(405) def method_not_allowed(e): return render_template('405.html'), 405 @main.app_errorhandler(500) def internal_server_error(e): return render_template('500.html'), 500
""" PostgreSQL Session API ====================== The Session classes wrap the Queries :py:class:`Session <queries.Session>` and :py:class:`TornadoSession <queries.tornado_session.TornadoSession>` classes providing environment variable based configuration. Environment variables should be set using the ``PGSQL[_DBNAME]`` format where the value is a PostgreSQL URI. For PostgreSQL URI format, see: http://www.postgresql.org/docs/9.3/static/libpq-connect.html#LIBPQ-CONNSTRING As example, given the environment variable: .. code:: python PGSQL_FOO = 'postgresql://bar:baz@foohost:6000/foo' and code for creating a :py:class:`Session` instance for the database name ``foo``: .. code:: python session = sprockets.postgresql.Session('foo') A :py:class:`queries.Session` object will be created that connects to Postgres running on ``foohost``, port ``6000`` using the username ``bar`` and the password ``baz``, connecting to the ``foo`` database. """ version_info = (2, 0, 1) __version__ = '.'.join(str(v) for v in version_info) import logging import os from queries import pool import queries from queries import tornado_session _ARGUMENTS = ['host', 'port', 'dbname', 'user', 'password'] LOGGER = logging.getLogger(__name__) from queries import DictCursor from queries import NamedTupleCursor from queries import RealDictCursor from queries import LoggingCursor from queries import MinTimeLoggingCursor from queries import DataError from queries import DatabaseError from queries import IntegrityError from queries import InterfaceError from queries import InternalError from queries import NotSupportedError from queries import OperationalError from queries import ProgrammingError from queries import QueryCanceledError from queries import TransactionRollbackError def _get_uri(dbname): """Return the URI for the specified database name from an environment variable. If dbname is blank, the ``PGSQL`` environment variable is used, otherwise the database name is cast to upper case and concatenated to ``PGSQL_`` and the URI is retrieved from ``PGSQL_DBNAME``. For example, if the value ``foo`` is passed in, the environment variable used would be ``PGSQL_FOO``. :param str dbname: The database name to construct the URI for :return: str :raises: KeyError """ if not dbname: return os.environ['PGSQL'] return os.environ['PGSQL_{0}'.format(dbname).upper()] class Session(queries.Session): """Extends queries.Session using configuration data that is stored in environment variables. Utilizes connection pooling to ensure that multiple concurrent asynchronous queries do not block each other. Heavily trafficked services will require a higher ``max_pool_size`` to allow for greater connection concurrency. :param str dbname: PostgreSQL database name :param queries.cursor: The cursor type to use :param int pool_idle_ttl: How long idle pools keep connections open :param int pool_max_size: The maximum size of the pool to use :param str db_url: Optional database connection URL. Use this when you need to connect to a database that is only known at runtime. """ def __init__(self, dbname, cursor_factory=queries.RealDictCursor, pool_idle_ttl=pool.DEFAULT_IDLE_TTL, pool_max_size=pool.DEFAULT_MAX_SIZE, db_url=None): if db_url is None: db_url = _get_uri(dbname) super(Session, self).__init__(db_url, cursor_factory, pool_idle_ttl, pool_max_size) class TornadoSession(tornado_session.TornadoSession): """Extends queries.TornadoSession using configuration data that is stored in environment variables. Utilizes connection pooling to ensure that multiple concurrent asynchronous queries do not block each other. Heavily trafficked services will require a higher ``max_pool_size`` to allow for greater connection concurrency. :py:meth:`query <queries.tornado_session.TornadoSession.query>` and :py:meth:`callproc <queries.tornado_session.TornadoSession.callproc>` must call :py:meth:`Results.free <queries.tornado_session.Results.free>` :param str dbname: PostgreSQL database name :param queries.cursor: The cursor type to use :param int pool_idle_ttl: How long idle pools keep connections open :param int pool_max_size: The maximum size of the pool to use :param tornado.ioloop.IOLoop ioloop: Pass in the instance of the tornado IOLoop you would like to use. Defaults to the global instance. :param str db_url: Optional database connection URL. Use this when you need to connect to a database that is only known at runtime. """ def __init__(self, dbname, cursor_factory=queries.RealDictCursor, pool_idle_ttl=pool.DEFAULT_IDLE_TTL, pool_max_size=tornado_session.DEFAULT_MAX_POOL_SIZE, io_loop=None, db_url=None): if db_url is None: db_url = _get_uri(dbname) super(TornadoSession, self).__init__(db_url, cursor_factory, pool_idle_ttl, pool_max_size, io_loop)
__author__ = 'Robert Meyer' try: import scoop except ImportError: scoop = None def scoop_not_functional_check(): if scoop is not None and scoop.IS_RUNNING: print('SCOOP mode functional!') return False else: print('SCOOP NOT running!') return True from pypet.tests.integration.environment_test import EnvironmentTest, ResultSortTest from pypet.tests.integration.environment_multiproc_test import check_nice import pypet.pypetconstants as pypetconstants from pypet.tests.testutils.ioutils import parse_args, run_suite from pypet.tests.testutils.data import unittest @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed and running') class MultiprocSCOOPNetqueueTest(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netqueue', 'scoop' def set_mode(self): super(MultiprocSCOOPNetqueueTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETQUEUE self.multiproc = True self.freeze_input = False self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.graceful_exit = False @unittest.skip('Does not work with scoop (fully), because scoop uses main frame.') def test_niceness(self): pass # def test_run(self): # return super(MultiprocSCOOPLocalTest, self).test_run() @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed') class MultiprocSCOOPSortLocalTest(ResultSortTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'scoop' def set_mode(self): super(MultiprocSCOOPSortLocalTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_LOCAL self.freeze_input = False self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.graceful_exit = False @unittest.skip('Does not work with SCOOP') def test_graceful_exit(self): pass @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed') class MultiprocFrozenSCOOPLocalTest(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'local', 'scoop', 'freeze_input' def set_mode(self): super(MultiprocFrozenSCOOPLocalTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_LOCAL self.multiproc = True self.freeze_input = True self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.graceful_exit = False @unittest.skip('Does not work with scoop (fully), because scoop uses main frame.') def test_niceness(self): pass # def test_run(self): # return super(MultiprocSCOOPLocalTest, self).test_run() @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed') class MultiprocFrozenSCOOPSortNetlockTest(ResultSortTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop', 'freeze_input' def set_mode(self): super(MultiprocFrozenSCOOPSortNetlockTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETLOCK self.freeze_input = True self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.port = (10000, 60000) self.graceful_exit = False @unittest.skip('Does not work with SCOOP') def test_graceful_exit(self): pass @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed') class MultiprocFrozenSCOOPSortNetqueueTest(ResultSortTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netqueue', 'scoop', 'freeze_input', 'mehmet' def set_mode(self): super(MultiprocFrozenSCOOPSortNetqueueTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETQUEUE self.freeze_input = True self.multiproc = True self.ncores = 4 self.use_pool = False self.use_scoop = True self.graceful_exit = False #self.port = 'tcp://127.0.0.1:22334' @unittest.skip('Does not work with SCOOP') def test_graceful_exit(self): pass @unittest.skipIf(scoop_not_functional_check(), 'Only makes sense if scoop is installed') class MultiprocSCOOPNetlockTest(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'multiproc', 'netlock', 'scoop' def set_mode(self): super(MultiprocSCOOPNetlockTest, self).set_mode() self.mode = pypetconstants.WRAP_MODE_NETLOCK self.multiproc = True self.freeze_input = False self.ncores = 4 self.gc_interval = 3 self.niceness = check_nice(1) self.use_pool = False self.use_scoop = True self.port = None self.timeout = 1099.99 self.graceful_exit = False # self.port = 'tcp://127.0.0.1:22334' @unittest.skip('Does not work with scoop (fully), because scoop uses main frame.') def test_niceness(self): pass if __name__ == '__main__': opt_args = parse_args() run_suite(**opt_args)
""" These URL patterns are included in two different ways in the main urls.py, with an extra argument present in one case. Thus, there are two different ways for each name to resolve and Django must distinguish the possibilities based on the argument list. """ from django.conf.urls import url from .views import empty_view urlpatterns = [ url(r'^part/(?P<value>\w+)/$', empty_view, name="part"), url(r'^part2/(?:(?P<value>\w+)/)?$', empty_view, name="part2"), ]
""" New Drawing class to create new mark and style on axes. """ from decimal import Decimal import numpy as np import toyplot ITERABLE = (list, tuple, np.ndarray) class GridSetup: """ Returns Canvas and Cartesian axes objects to fit a grid of trees. """ def __init__(self, nrows, ncols, width, height, layout): # style args can include height/width, nrows, ncols, shared,... self.nrows = nrows self.ncols = ncols self.width = width self.height = height self.layout = layout # get .canvas and .axes self.get_tree_dims() self.get_canvas_and_axes() def get_canvas_and_axes(self): """ Set .canvas and .axes objects """ self.canvas = toyplot.Canvas( height=self.height, width=self.width, ) self.axes = [ self.canvas.cartesian( grid=(self.nrows, self.ncols, i), padding=10, margin=25, ) for i in range(self.nrows * self.ncols) ] def get_tree_dims(self): """ get height and width if not set by user """ if self.ncols * self.nrows < 4: minx = 250 miny = 250 else: minx = 200 miny = 140 # wider than tall if self.layout in ("d", "u"): self.width = ( self.width if self.width else min(750, minx * self.ncols) ) self.height = ( self.height if self.height else min(750, miny * self.nrows) ) else: self.height = ( self.height if self.height else min(750, minx * self.nrows) ) self.width = ( self.width if self.width else min(750, miny * self.ncols) ) class CanvasSetup: """ Returns Canvas and Cartesian axes objects """ def __init__(self, tree, axes, style): # args includes axes self.tree = tree self.axes = axes self.style = style self.canvas = None self.external_axis = False # get the longest name for dimension fitting self.lname = 0 if not all([i is None for i in self.style.tip_labels]): self.lname = max([len(str(i)) for i in self.style.tip_labels]) # ntips and shape to fit with provided args self.get_dims_from_tree_size() # fills canvas and axes self.get_canvas_and_axes() # expand the domain/extents for the text # self.fit_tip_labels() # ticks for tree and scalebar self.add_axes_style() def get_dims_from_tree_size(self): """ Calculate reasonable canvas height and width for tree given N tips """ if self.style.layout == "c": radius = max( [0] + [i for i in [self.style.height, self.style.width] if i]) if not radius: radius = 400 self.style.width = self.style.height = radius return if self.style.layout in ("r", "l"): # height fit by tree size if not self.style.height: self.style.height = max(275, min(1000, 18 * self.tree.ntips)) # width fit by name size if not self.style.width: self.style.width = max(250, min(500, 250 + 5 * self.lname)) else: # height fit by name size if not self.style.height: self.style.height = max(250, min(500, 250 + 5 * self.lname)) # width fit by tree size if not self.style.width: self.style.width = max(350, min(1000, 18 * self.tree.ntips)) def get_canvas_and_axes(self): """ """ if self.axes is not None: self.canvas = None self.external_axis = True else: self.canvas = toyplot.Canvas( height=self.style.height, width=self.style.width, ) self.axes = self.canvas.cartesian( padding=self.style.padding ) def add_axes_style(self): """ """ # style axes with padding and show axes self.axes.padding = self.style.padding if not self.external_axis: self.axes.show = True if not self.style.scalebar: self.axes.show = False # scalebar if self.style.scalebar: if self.style.layout in ("r", "l"): nticks = max((3, np.floor(self.style.width / 100).astype(int))) self.axes.y.show = False self.axes.x.show = True self.axes.x.ticks.show = True # generate locations if self.style.use_edge_lengths: th = self.tree.treenode.height else: th = self.tree.treenode.get_farthest_leaf(True)[1] + 1 if self.style.layout == "r": top = self.style.xbaseline - th else: top = self.style.xbaseline + th locs = np.linspace(self.style.xbaseline, top, nticks) # auto-formatter for axes ticks labels zer = abs(min(0, Decimal(locs[1]).adjusted())) fmt = "{:." + str(zer) + "f}" self.axes.x.ticks.locator = toyplot.locator.Explicit( locations=locs, labels=[fmt.format(i) for i in np.abs(locs)], ) elif self.style.layout in ("u", "d"): nticks = max((3, np.floor(self.style.height / 100).astype(int))) self.axes.x.show = False self.axes.y.show = True self.axes.y.ticks.show = True # generate locations if self.style.use_edge_lengths: th = self.tree.treenode.height else: th = self.tree.treenode.get_farthest_leaf(True)[1] + 1 if self.style.layout == "d": top = self.style.ybaseline + th else: top = self.style.ybaseline - th locs = np.linspace(self.style.ybaseline, top, nticks) # auto-formatter for axes ticks labels zer = abs(min(0, Decimal(locs[1]).adjusted())) fmt = "{:." + str(zer) + "f}" self.axes.y.ticks.locator = toyplot.locator.Explicit( locations=locs, labels=[fmt.format(i) for i in np.abs(locs)], ) # elif self.style.layout == "d": # nticks = max((3, np.floor(self.style.height / 100).astype(int))) # self.axes.x.show = False # self.axes.y.show = True # self.axes.y.ticks.show = True # # generate locations # locs = np.linspace(0, self.tree.treenode.height, nticks) # # auto-formatter for axes ticks labels # zer = abs(min(0, Decimal(locs[1]).adjusted())) # fmt = "{:." + str(zer) + "f}" # self.axes.y.ticks.locator = toyplot.locator.Explicit( # locations=locs, # labels=[fmt.format(i) for i in np.abs(locs)], # ) # def fit_tip_labels(self): # """ # DEPRECATED SINCE V2 since Mark now sets its own extents correctly. # Modifies display range to ensure tip labels fit. This is a bit hackish # still. The problem is that the 'extents' range of the rendered text # is not totally correct. So we add a little buffer here. Should add for # user to be able to modify this if needed. If not using edge lengths # then need to use unit length for treeheight. # """ # # bail on unrooted for now; TODO # if self.style.layout == "c": # return # # if names # if self.lname: # # get ratio of names to tree in plot # ratio = max(self.lname / 10, 0.15) # # have tree figure make up 85% of plot # if self.style.use_edge_lengths: # addon = self.tree.treenode.height # else: # addon = self.tree.treenode.get_farthest_leaf(True)[1] + 1 # addon *= ratio # # modify display for layout # if self.style.layout == "r": # self.axes.x.domain.max = (addon / 2.) + self.style.xbaseline # elif self.style.layout == "l": # self.axes.x.domain.min = (-addon / 2.) + self.style.xbaseline # # self.axes.x.domain.min -= self.style.xbaseline # elif self.style.layout == "d": # self.axes.y.domain.min = (-addon / 2.) + self.style.ybaseline # elif self.style.layout == "u": # self.axes.y.domain.max = (addon / 2.) + self.style.ybaseline # # print(addon, ratio, self.axes.x.domain.min, self.axes.x.domain.max)
import numpy import scipy.stats from collections import defaultdict def scores_to_probs(scores): scores = numpy.array(scores) scores -= scores.max() probs = numpy.exp(scores, out=scores) probs /= probs.sum() return probs def score_to_empirical_kl(score, count): """ Convert total log score to KL( empirical || model ), where the empirical pdf is uniform over `count` datapoints. """ count = float(count) return -score / count - numpy.log(count) def print_histogram(probs, counts): WIDTH = 60.0 max_count = max(counts) print '{: >8} {: >8}'.format('Prob', 'Count') for prob, count in sorted(zip(probs, counts), reverse=True): width = int(round(WIDTH * count / max_count)) print '{: >8.3f} {: >8d} {}'.format(prob, count, '-' * width) def multinomial_goodness_of_fit( probs, counts, total_count, truncated=False, plot=False): """ Pearson's chi^2 test, on possibly truncated data. http://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test Returns: p-value of truncated multinomial sample. """ assert len(probs) == len(counts) assert truncated or total_count == sum(counts) chi_squared = 0 dof = 0 if plot: print_histogram(probs, counts) for p, c in zip(probs, counts): if p == 1: return 1 if c == total_count else 0 assert p < 1, 'bad probability: %g' % p if p > 0: mean = total_count * p variance = total_count * p * (1 - p) assert variance > 1,\ 'WARNING goodness of fit is inaccurate; use more samples' chi_squared += (c - mean) ** 2 / variance dof += 1 else: print 'WARNING zero probability in goodness-of-fit test' if c > 0: return float('inf') if not truncated: dof -= 1 survival = scipy.stats.chi2.sf(chi_squared, dof) return survival def unif01_goodness_of_fit(samples, plot=False): """ Bin uniformly distributed samples and apply Pearson's chi^2 test. """ samples = numpy.array(samples, dtype=float) assert samples.min() >= 0.0 assert samples.max() <= 1.0 bin_count = int(round(len(samples) ** 0.333)) assert bin_count >= 7, 'WARNING imprecise test, use more samples' probs = numpy.ones(bin_count, dtype=numpy.float) / bin_count counts = numpy.zeros(bin_count, dtype=numpy.int) for sample in samples: counts[int(bin_count * sample)] += 1 return multinomial_goodness_of_fit(probs, counts, len(samples), plot=plot) def density_goodness_of_fit(samples, probs, plot=False): """ Transform arbitrary continuous samples to unif01 distribution and assess goodness of fit via Pearson's chi^2 test. Inputs: samples - a list of real-valued samples from a distribution probs - a list of probability densities evaluated at those samples """ assert len(samples) == len(probs) assert len(samples) > 100, 'WARNING imprecision; use more samples' pairs = zip(samples, probs) pairs.sort() samples = numpy.array([x for x, p in pairs]) probs = numpy.array([p for x, p in pairs]) density = numpy.sqrt(probs[1:] * probs[:-1]) gaps = samples[1:] - samples[:-1] unif01_samples = 1.0 - numpy.exp(-len(samples) * gaps * density) return unif01_goodness_of_fit(unif01_samples, plot=plot) def discrete_goodness_of_fit( samples, probs_dict, truncate_beyond=8, plot=False): """ Transform arbitrary discrete data to multinomial and assess goodness of fit via Pearson's chi^2 test. """ assert len(samples) > 100, 'WARNING imprecision; use more samples' counts = defaultdict(lambda: 0) for sample in samples: assert sample in probs_dict counts[sample] += 1 items = [(prob, counts.get(i, 0)) for i, prob in probs_dict.iteritems()] items.sort(reverse=True) truncated = (truncate_beyond and truncate_beyond < len(items)) if truncated: items = items[:truncate_beyond] probs = [prob for prob, count in items] counts = [count for prob, count in items] return multinomial_goodness_of_fit( probs, counts, len(samples), truncated=truncated, plot=plot) def bin_samples(samples, k=10, support=[]): """ Bins a collection of univariate samples into k bins of equal fill via the empirical cdf, to be used in goodness of fit testing. Returns counts : array k x 1 bin_ranges : arrary k x 2 each count is the number of samples in [bin_min, bin_max) except for the last bin which is [bin_min, bin_max] list partitioning algorithm adapted from Mark Dickinson: http://stackoverflow.com/questions/2659900 """ samples = sorted(samples) N = len(samples) q, r = divmod(N, k) #we need to distribute the remainder relatively evenly #tests will be inaccurate if we have small bins at the end indices = [i * q + min(r, i) for i in range(k + 1)] bins = [samples[indices[i]: indices[i + 1]] for i in range(k)] bin_ranges = [] counts = [] for i in range(k): bin_min = bins[i][0] try: bin_max = bins[i + 1][0] except IndexError: bin_max = bins[i][-1] bin_ranges.append([bin_min, bin_max]) counts.append(len(bins[i])) if support: bin_ranges[0][0] = support[0] bin_ranges[-1][1] = support[1] return numpy.array(counts), numpy.array(bin_ranges) def histogram(samples, bin_count=None): if bin_count is None: bin_count = numpy.max(samples) + 1 v = numpy.zeros(bin_count, dtype=int) for sample in samples: v[sample] += 1 return v
""" Read a dictionary from a JSON file, and add its contents to a Python dictionary. """ import json import types from instmakelib import rtimport INSTMAKE_SITE_DIR = "instmakesite" CONFIG_USAGE_LOGGER = "usage-logger" CONFIG_CLIDIFF_NORMPATH = "clidiff-normpath" def update(caller_config, json_filename): # This will throw errors fh = open(json_filename) file_config = json.load(fh) fh.close() assert type(file_config) == types.DictType caller_config.update(file_config) def load_site_plugin(name): """Import a plugin from the instmakesite directory. The import can throw exceptions that the caller has to catch.""" plugin_name = INSTMAKE_SITE_DIR + "." + name return rtimport.rtimport(plugin_name)
''' Renderers for various kinds of annotations that can be added to Bokeh plots ''' from __future__ import absolute_import from six import string_types from ..core.enums import (AngleUnits, Dimension, FontStyle, LegendClickPolicy, LegendLocation, Orientation, RenderMode, SpatialUnits, VerticalAlign, TextAlign) from ..core.has_props import abstract from ..core.properties import (Angle, AngleSpec, Auto, Bool, ColorSpec, Datetime, Dict, DistanceSpec, Either, Enum, Float, FontSizeSpec, Include, Instance, Int, List, NumberSpec, Override, Seq, String, StringSpec, Tuple, value) from ..core.property_mixins import FillProps, LineProps, TextProps from ..core.validation import error from ..core.validation.errors import BAD_COLUMN_NAME, NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS from ..model import Model from ..util.serialization import convert_datetime_type from .formatters import BasicTickFormatter, TickFormatter from .mappers import ContinuousColorMapper from .renderers import GlyphRenderer, Renderer from .sources import ColumnDataSource, DataSource from .tickers import BasicTicker, Ticker @abstract class Annotation(Renderer): ''' Base class for all annotation models. ''' plot = Instance(".models.plots.Plot", help=""" The plot to which this annotation is attached. """) level = Override(default="annotation") @abstract class TextAnnotation(Annotation): ''' Base class for text annotation models such as labels and titles. ''' class LegendItem(Model): ''' ''' def __init__(self, *args, **kwargs): super(LegendItem, self).__init__(*args, **kwargs) if isinstance(self.label, string_types): # Allow convenience of setting label as a string self.label = value(self.label) label = StringSpec(default=None, help=""" A label for this legend. Can be a string, or a column of a ColumnDataSource. If ``label`` is a field, then it must be in the renderers' data_source. """) renderers = List(Instance(GlyphRenderer), help=""" A list of the glyph renderers to draw in the legend. If ``label`` is a field, then all data_sources of renderers must be the same. """) @error(NON_MATCHING_DATA_SOURCES_ON_LEGEND_ITEM_RENDERERS) def _check_data_sources_on_renderers(self): if self.label and 'field' in self.label: if len({r.data_source for r in self.renderers}) != 1: return str(self) @error(BAD_COLUMN_NAME) def _check_field_label_on_data_source(self): if self.label and 'field' in self.label: if len(self.renderers) < 1: return str(self) source = self.renderers[0].data_source if self.label.get('field') not in source.column_names: return str(self) class Legend(Annotation): ''' Render informational legends for a plot. ''' location = Either(Enum(LegendLocation), Tuple(Float, Float), default="top_right", help=""" The location where the legend should draw itself. It's either one of ``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)`` tuple indicating an absolute location absolute location in screen coordinates (pixels from the bottom-left corner). """) orientation = Enum(Orientation, default="vertical", help=""" Whether the legend entries should be placed vertically or horizontally when they are drawn. """) border_props = Include(LineProps, help=""" The %s for the legend border outline. """) border_line_color = Override(default="#e5e5e5") border_line_alpha = Override(default=0.5) background_props = Include(FillProps, help=""" The %s for the legend background style. """) inactive_props = Include(FillProps, help=""" The %s for the legend background style when inactive. """) click_policy = Enum(LegendClickPolicy, default="none", help=""" Defines what happens when a lengend's item is clicked. """) background_fill_color = Override(default="#ffffff") background_fill_alpha = Override(default=0.95) inactive_fill_color = Override(default="white") inactive_fill_alpha = Override(default=0.9) label_props = Include(TextProps, help=""" The %s for the legend labels. """) label_text_baseline = Override(default='middle') label_text_font_size = Override(default={'value': '10pt'}) label_standoff = Int(5, help=""" The distance (in pixels) to separate the label from its associated glyph. """) label_height = Int(20, help=""" The minimum height (in pixels) of the area that legend labels should occupy. """) label_width = Int(20, help=""" The minimum width (in pixels) of the area that legend labels should occupy. """) glyph_height = Int(20, help=""" The height (in pixels) that the rendered legend glyph should occupy. """) glyph_width = Int(20, help=""" The width (in pixels) that the rendered legend glyph should occupy. """) margin = Int(10, help=""" Amount of margin around the legend. """) padding = Int(10, help=""" Amount of padding around the contents of the legend. Only applicable when when border is visible, otherwise collapses to 0. """) spacing = Int(3, help=""" Amount of spacing (in pixles) between legend entries. """) items = List(Instance(LegendItem), help=""" A list of :class:`~bokeh.model.annotations.LegendItem` instances to be rendered in the legend. This can be specified explicitly, for instance: .. code-block:: python legend = Legend(items=[ LegendItem(label="sin(x)" , renderers=[r0, r1]), LegendItem(label="2*sin(x)" , renderers=[r2]), LegendItem(label="3*sin(x)" , renderers=[r3, r4]) ]) But as a convenience, can also be given more compactly as a list of tuples: .. code-block:: python legend = Legend(items=[ ("sin(x)" , [r0, r1]), ("2*sin(x)" , [r2]), ("3*sin(x)" , [r3, r4]) ]) where each tuple is of the form: *(label, renderers)*. """).accepts(List(Tuple(String, List(Instance(GlyphRenderer)))), lambda items: [LegendItem(label=item[0], renderers=item[1]) for item in items]) class ColorBar(Annotation): ''' Render a color bar based on a color mapper. ''' location = Either(Enum(LegendLocation), Tuple(Float, Float), default="top_right", help=""" The location where the color bar should draw itself. It's either one of ``bokeh.core.enums.LegendLocation``'s enumerated values, or a ``(x, y)`` tuple indicating an absolute location absolute location in screen coordinates (pixels from the bottom-left corner). .. warning:: If the color bar is placed in a side panel, the location will likely have to be set to `(0,0)`. """) orientation = Enum(Orientation, default="vertical", help=""" Whether the color bar should be oriented vertically or horizontally. """) height = Either(Auto, Int(), help=""" The height (in pixels) that the color scale should occupy. """) width = Either(Auto, Int(), help=""" The width (in pixels) that the color scale should occupy. """) scale_alpha = Float(1.0, help=""" The alpha with which to render the color scale. """) title = String(help=""" The title text to render. """) title_props = Include(TextProps, help=""" The %s values for the title text. """) title_text_font_size = Override(default={'value': "10pt"}) title_text_font_style = Override(default="italic") title_standoff = Int(2, help=""" The distance (in pixels) to separate the title from the color bar. """) ticker = Instance(Ticker, default=lambda: BasicTicker(), help=""" A Ticker to use for computing locations of axis components. """) formatter = Instance(TickFormatter, default=lambda: BasicTickFormatter(), help=""" A TickFormatter to use for formatting the visual appearance of ticks. """) major_label_overrides = Dict(Either(Float, String), String, default={}, help=""" Provide explicit tick label values for specific tick locations that override normal formatting. """) color_mapper = Instance(ContinuousColorMapper, help=""" A continuous color mapper containing a color palette to render. .. warning:: If the `low` and `high` attributes of the ColorMapper aren't set, ticks and tick labels won't be rendered. Additionally, if a LogTicker is passed to the `ticker` argument and either or both of the logarithms of `low` and `high` values of the color_mapper are non-numeric (i.e. `low=0`), the tick and tick labels won't be rendered. """) margin = Int(30, help=""" Amount of margin (in pixels) around the outside of the color bar. """) padding = Int(10, help=""" Amount of padding (in pixels) between the color scale and color bar border. """) major_label_props = Include(TextProps, help=""" The %s of the major tick labels. """) major_label_text_align = Override(default="center") major_label_text_baseline = Override(default="middle") major_label_text_font_size = Override(default={'value': "8pt"}) label_standoff = Int(5, help=""" The distance (in pixels) to separate the tick labels from the color bar. """) major_tick_props = Include(LineProps, help=""" The %s of the major ticks. """) major_tick_line_color = Override(default="#ffffff") major_tick_in = Int(default=5, help=""" The distance (in pixels) that major ticks should extend into the main plot area. """) major_tick_out = Int(default=0, help=""" The distance (in pixels) that major ticks should extend out of the main plot area. """) minor_tick_props = Include(LineProps, help=""" The %s of the minor ticks. """) minor_tick_line_color = Override(default=None) minor_tick_in = Int(default=0, help=""" The distance (in pixels) that minor ticks should extend into the main plot area. """) minor_tick_out = Int(default=0, help=""" The distance (in pixels) that major ticks should extend out of the main plot area. """) bar_props = Include(LineProps, help=""" The %s for the color scale bar outline. """) bar_line_color = Override(default=None) border_props = Include(LineProps, help=""" The %s for the color bar border outline. """) border_line_color = Override(default=None) background_props = Include(FillProps, help=""" The %s for the color bar background style. """) background_fill_color = Override(default="#ffffff") background_fill_alpha = Override(default=0.95) def _DEFAULT_ARROW(): from .arrow_heads import OpenHead return OpenHead() class Arrow(Annotation): ''' Render an arrow as an annotation. ''' x_start = NumberSpec(help=""" The x-coordinates to locate the start of the arrows. """) y_start = NumberSpec(help=""" The y-coordinates to locate the start of the arrows. """) start_units = Enum(SpatialUnits, default='data', help=""" The unit type for the start_x and start_y attributes. Interpreted as "data space" units by default. """) start = Instance('.models.arrow_heads.ArrowHead', default=None, help=""" Instance of ArrowHead. """) x_end = NumberSpec(help=""" The x-coordinates to locate the end of the arrows. """) y_end = NumberSpec(help=""" The y-coordinates to locate the end of the arrows. """) end_units = Enum(SpatialUnits, default='data', help=""" The unit type for the end_x and end_y attributes. Interpreted as "data space" units by default. """) end = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_ARROW, help=""" Instance of ArrowHead. """) body_props = Include(LineProps, use_prefix=False, help=""" The %s values for the arrow body. """) source = Instance(DataSource, help=""" Local data source to use when rendering annotations on the plot. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default y-range. """) class BoxAnnotation(Annotation): ''' Render a shaded rectangular region as an annotation. ''' left = Either(Auto, NumberSpec(), default=None, help=""" The x-coordinates of the left edge of the box annotation. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """) left_units = Enum(SpatialUnits, default='data', help=""" The unit type for the left attribute. Interpreted as "data space" units by default. """) right = Either(Auto, NumberSpec(), default=None, help=""" The x-coordinates of the right edge of the box annotation. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """) right_units = Enum(SpatialUnits, default='data', help=""" The unit type for the right attribute. Interpreted as "data space" units by default. """) bottom = Either(Auto, NumberSpec(), default=None, help=""" The y-coordinates of the bottom edge of the box annotation. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """) bottom_units = Enum(SpatialUnits, default='data', help=""" The unit type for the bottom attribute. Interpreted as "data space" units by default. """) top = Either(Auto, NumberSpec(), default=None, help=""" The y-coordinates of the top edge of the box annotation. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """) top_units = Enum(SpatialUnits, default='data', help=""" The unit type for the top attribute. Interpreted as "data space" units by default. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering box annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering box annotations on the plot. If unset, use the default y-range. """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the box. """) line_alpha = Override(default=0.3) line_color = Override(default="#cccccc") fill_props = Include(FillProps, use_prefix=False, help=""" The %s values for the box. """) fill_alpha = Override(default=0.4) fill_color = Override(default="#fff9ba") render_mode = Enum(RenderMode, default="canvas", help=""" Specifies whether the box is rendered as a canvas element or as an css element overlaid on the canvas. The default mode is "canvas". .. warning:: The line_dash and line_dash_offset attributes aren't supported if the render_mode is set to "css" """) class Band(Annotation): ''' Render a filled area band along a dimension. ''' lower = DistanceSpec(help=""" The coordinates of the lower portion of the filled area band. """) upper = DistanceSpec(help=""" The coordinations of the upper portion of the filled area band. """) base = DistanceSpec(help=""" The orthogonal coordinates of the upper and lower values. """) dimension = Enum(Dimension, default='height', help=""" The direction of the band. """) source = Instance(DataSource, default=lambda: ColumnDataSource(), help=""" Local data source to use when rendering annotations on the plot. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default y-range. """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the band. """) line_alpha = Override(default=0.3) line_color = Override(default="#cccccc") fill_props = Include(FillProps, use_prefix=False, help=""" The %s values for the band. """) fill_alpha = Override(default=0.4) fill_color = Override(default="#fff9ba") class Label(TextAnnotation): ''' Render a single text label as an annotation. ``Label`` will render a single text label at given ``x`` and ``y`` coordinates, which can be in either screen (pixel) space, or data (axis range) space. The label can also be configured with a screen space offset from ``x`` and ``y``, by using the ``x_offset`` and ``y_offset`` properties. Additionally, the label can be rotated with the ``angle`` property. There are also standard text, fill, and line properties to control the appearance of the text, its background, as well as the rectangular bounding box border. ''' x = Float(help=""" The x-coordinate in screen coordinates to locate the text anchors. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """).accepts(Datetime, convert_datetime_type) x_units = Enum(SpatialUnits, default='data', help=""" The unit type for the x attribute. Interpreted as "data space" units by default. """) y = Float(help=""" The y-coordinate in screen coordinates to locate the text anchors. Datetime values are also accepted, but note that they are immediately converted to milliseconds-since-epoch. """).accepts(Datetime, convert_datetime_type) y_units = Enum(SpatialUnits, default='data', help=""" The unit type for the y attribute. Interpreted as "data space" units by default. """) text = String(help=""" The text value to render. """) angle = Angle(default=0, help=""" The angle to rotate the text, as measured from the horizontal. .. warning:: The center of rotation for canvas and css render_modes is different. For `render_mode="canvas"` the label is rotated from the top-left corner of the annotation, while for `render_mode="css"` the annotation is rotated around it's center. """) angle_units = Enum(AngleUnits, default='rad', help=""" Acceptable values for units are ``"rad"`` and ``"deg"`` """) x_offset = Float(default=0, help=""" Offset value to apply to the x-coordinate. This is useful, for instance, if it is desired to "float" text a fixed distance in screen units from a given data position. """) y_offset = Float(default=0, help=""" Offset value to apply to the y-coordinate. This is useful, for instance, if it is desired to "float" text a fixed distance in screen units from a given data position. """) # TODO (bev) these should probably not be dataspec properties text_props = Include(TextProps, use_prefix=False, help=""" The %s values for the text. """) # TODO (bev) these should probably not be dataspec properties background_props = Include(FillProps, use_prefix=True, help=""" The %s values for the text bounding box. """) background_fill_color = Override(default=None) # TODO (bev) these should probably not be dataspec properties border_props = Include(LineProps, use_prefix=True, help=""" The %s values for the text bounding box. """) border_line_color = Override(default=None) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen location when rendering an annotation on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen location when rendering an annotation on the plot. If unset, use the default y-range. """) render_mode = Enum(RenderMode, default="canvas", help=""" Specifies whether the text is rendered as a canvas element or as an css element overlaid on the canvas. The default mode is "canvas". .. note:: The CSS labels won't be present in the output using the "save" tool. .. warning:: Not all visual styling properties are supported if the render_mode is set to "css". The border_line_dash property isn't fully supported and border_line_dash_offset isn't supported at all. Setting text_alpha will modify the opacity of the entire background box and border in addition to the text. Finally, clipping Label annotations inside of the plot area isn't supported in "css" mode. """) class LabelSet(TextAnnotation): ''' Render multiple text labels as annotations. ``LabelSet`` will render multiple text labels at given ``x`` and ``y`` coordinates, which can be in either screen (pixel) space, or data (axis range) space. In this case (as opposed to the single ``Label`` model), ``x`` and ``y`` can also be the name of a column from a :class:`~bokeh.models.sources.ColumnDataSource`, in which case the labels will be "vectorized" using coordinate values from the specified columns. The label can also be configured with a screen space offset from ``x`` and ``y``, by using the ``x_offset`` and ``y_offset`` properties. These offsets may be vectorized by giving the name of a data source column. Additionally, the label can be rotated with the ``angle`` property (which may also be a column name.) There are also standard text, fill, and line properties to control the appearance of the text, its background, as well as the rectangular bounding box border. The data source is provided by setting the ``source`` property. ''' x = NumberSpec(help=""" The x-coordinates to locate the text anchors. """) x_units = Enum(SpatialUnits, default='data', help=""" The unit type for the xs attribute. Interpreted as "data space" units by default. """) y = NumberSpec(help=""" The y-coordinates to locate the text anchors. """) y_units = Enum(SpatialUnits, default='data', help=""" The unit type for the ys attribute. Interpreted as "data space" units by default. """) text = StringSpec("text", help=""" The text values to render. """) angle = AngleSpec(default=0, help=""" The angles to rotate the text, as measured from the horizontal. .. warning:: The center of rotation for canvas and css render_modes is different. For `render_mode="canvas"` the label is rotated from the top-left corner of the annotation, while for `render_mode="css"` the annotation is rotated around it's center. """) x_offset = NumberSpec(default=0, help=""" Offset values to apply to the x-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in screen units from a given data position. """) y_offset = NumberSpec(default=0, help=""" Offset values to apply to the y-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in screen units from a given data position. """) text_props = Include(TextProps, use_prefix=False, help=""" The %s values for the text. """) background_props = Include(FillProps, use_prefix=True, help=""" The %s values for the text bounding box. """) background_fill_color = Override(default=None) border_props = Include(LineProps, use_prefix=True, help=""" The %s values for the text bounding box. """) border_line_color = Override(default=None) source = Instance(DataSource, default=lambda: ColumnDataSource(), help=""" Local data source to use when rendering annotations on the plot. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default y-range. """) render_mode = Enum(RenderMode, default="canvas", help=""" Specifies whether the text is rendered as a canvas element or as an css element overlaid on the canvas. The default mode is "canvas". .. note:: The CSS labels won't be present in the output using the "save" tool. .. warning:: Not all visual styling properties are supported if the render_mode is set to "css". The border_line_dash property isn't fully supported and border_line_dash_offset isn't supported at all. Setting text_alpha will modify the opacity of the entire background box and border in addition to the text. Finally, clipping Label annotations inside of the plot area isn't supported in "css" mode. """) class PolyAnnotation(Annotation): ''' Render a shaded polygonal region as an annotation. ''' xs = Seq(Float, default=[], help=""" The x-coordinates of the region to draw. """) xs_units = Enum(SpatialUnits, default='data', help=""" The unit type for the xs attribute. Interpreted as "data space" units by default. """) ys = Seq(Float, default=[], help=""" The y-coordinates of the region to draw. """) ys_units = Enum(SpatialUnits, default='data', help=""" The unit type for the ys attribute. Interpreted as "data space" units by default. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering box annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering box annotations on the plot. If unset, use the default y-range. """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the polygon. """) line_alpha = Override(default=0.3) line_color = Override(default="#cccccc") fill_props = Include(FillProps, use_prefix=False, help=""" The %s values for the polygon. """) fill_alpha = Override(default=0.4) fill_color = Override(default="#fff9ba") class Span(Annotation): """ Render a horizontal or vertical line span. """ location = Float(help=""" The location of the span, along ``dimension``. """) location_units = Enum(SpatialUnits, default='data', help=""" The unit type for the location attribute. Interpreted as "data space" units by default. """) dimension = Enum(Dimension, default='width', help=""" The direction of the span. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default y-range. """) render_mode = Enum(RenderMode, default="canvas", help=""" Specifies whether the span is rendered as a canvas element or as an css element overlaid on the canvas. The default mode is "canvas". .. warning:: The line_dash and line_dash_offset attributes aren't supported if the render_mode is set to "css" """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the span. """) class Title(TextAnnotation): ''' Render a single title box as an annotation. ''' text = String(help=""" The text value to render. """) vertical_align = Enum(VerticalAlign, default='bottom', help=""" Aligment of the text in its enclosing space, *across* the direction of the text. """) align = Enum(TextAlign, default='left', help=""" Aligment of the text in its enclosing space, *along* the direction of the text. """) offset = Float(default=0, help=""" Offset the text by a number of pixels (can be positive or negative). Shifts the text in different directions based on the location of the title: * above: shifts title right * right: shifts title down * below: shifts title right * left: shifts title up """) text_font = String(default="helvetica", help=""" Name of a font to use for rendering text, e.g., ``'times'``, ``'helvetica'``. """) text_font_size = FontSizeSpec(default=value("10pt")) text_font_style = Enum(FontStyle, default="bold", help=""" A style to use for rendering text. Acceptable values are: - ``'normal'`` normal text - ``'italic'`` *italic text* - ``'bold'`` **bold text** """) text_color = ColorSpec(default="#444444", help=""" A color to use to fill text with. Acceptable values are: - any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'`` - an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'`` - a 3-tuple of integers (r,g,b) between 0 and 255 - a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1 .. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp """) text_alpha = NumberSpec(default=1.0, help=""" An alpha value to use to fill text with. Acceptable values are floating point numbers between 0 (transparent) and 1 (opaque). """) background_props = Include(FillProps, use_prefix=True, help=""" The %s values for the text bounding box. """) background_fill_color = Override(default=None) border_props = Include(LineProps, use_prefix=True, help=""" The %s values for the text bounding box. """) border_line_color = Override(default=None) render_mode = Enum(RenderMode, default="canvas", help=""" Specifies whether the text is rendered as a canvas element or as an css element overlaid on the canvas. The default mode is "canvas". .. note:: The CSS labels won't be present in the output using the "save" tool. .. warning:: Not all visual styling properties are supported if the render_mode is set to "css". The border_line_dash property isn't fully supported and border_line_dash_offset isn't supported at all. Setting text_alpha will modify the opacity of the entire background box and border in addition to the text. Finally, clipping Label annotations inside of the plot area isn't supported in "css" mode. """) class Tooltip(Annotation): ''' Render a tooltip. .. note:: This model is currently managed by BokehJS and is not useful directly from python. ''' level = Override(default="overlay") attachment = Enum("horizontal", "vertical", "left", "right", "above", "below", help=""" Whether the tooltip should display to the left or right off the cursor position or above or below it, or if it should be automatically placed in the horizontal or vertical dimension. """) inner_only = Bool(default=True, help=""" Whether to display outside a central plot frame area. """) show_arrow = Bool(default=True, help=""" Whether tooltip's arrow should be showed. """) def _DEFAULT_TEE(): from .arrow_heads import TeeHead return TeeHead(level="underlay", size=10) class Whisker(Annotation): ''' Render a whisker along a dimension. ''' lower = DistanceSpec(help=""" The coordinates of the lower end of the whiskers. """) lower_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help=""" Instance of ArrowHead. """) upper = DistanceSpec(help=""" The coordinations of the upper end of the whiskers. """) upper_head = Instance('.models.arrow_heads.ArrowHead', default=_DEFAULT_TEE, help=""" Instance of ArrowHead. """) base = DistanceSpec(help=""" The orthogonal coordinates of the upper and lower values. """) dimension = Enum(Dimension, default='height', help=""" The direction of the band. """) source = Instance(DataSource, default=lambda: ColumnDataSource(), help=""" Local data source to use when rendering annotations on the plot. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering annotations on the plot. If unset, use the default y-range. """) line_props = Include(LineProps, use_prefix=False, help=""" The %s values for the whisker body. """) level = Override(default="underlay") class ToolbarPanel(Annotation): # TODO: this shouldn't be an annotation toolbar = Instance(".models.tools.Toolbar", help=""" A toolbar to display. """)
import unittest from interval import interval, fpu class FpuTestCase(unittest.TestCase): def test_third(self): "Nearest rounding of 1/3 is downwards." self.assertEqual(1/3.0, fpu.down(lambda: 1.0 / 3.0)) self.assertTrue(1/3.0 < fpu.up(lambda: 1.0 / 3.0)) self.assertEqual(-1/3.0, fpu.up(lambda: 1.0 / -3.0)) self.assertTrue(-1/3.0 > fpu.down(lambda: 1.0 / -3.0)) def test_fourth(self): " 1/4 is exact." self.assertEqual(1/4.0, fpu.down(lambda: 1.0 / 4.0)) self.assertEqual(1/4.0, fpu.up(lambda: 1.0 / 4.0)) self.assertEqual(-1/4.0, fpu.up(lambda: 1.0 / -4.0)) self.assertEqual(-1/4.0, fpu.down(lambda: 1.0 / -4.0)) def test_fifth(self): "Nearest rounding of 1/5 is upwards." self.assertEqual(1/5.0, fpu.up(lambda: 1.0 / 5.0)) self.assertTrue(1/5.0 > fpu.down(lambda: 1.0 / 5.0)) self.assertEqual(-1/5.0, fpu.down(lambda: 1.0 / -5.0)) self.assertTrue(-1/5.0 < fpu.up(lambda: 1.0 / -5.0)) def test_ieee754(self): "fpu.float respect ieee754 semantics." self.assertEqual(fpu.infinity + fpu.infinity, fpu.infinity) self.assertTrue(fpu.isnan(fpu.nan)) self.assertTrue(fpu.isnan(0.0 * fpu.infinity)) self.assertTrue(fpu.isnan(fpu.infinity - fpu.infinity)) def test_float_coercion(self): "Only real-number scalars should be able to coerce as fpu.float" self.assertRaises(Exception, lambda: float(1,2)) self.assertRaises(Exception, lambda: float((1,2))) self.assertRaises(Exception, lambda: float([1,2])) self.assertRaises(Exception, lambda: float('a')) self.assertRaises(Exception, lambda: float(1+1j)) def test_min(self): "Verify corner cases with nan, -inf, +inf" self.assertEqual(fpu.min((1.0, 2.0)), 1.0) self.assertEqual(fpu.min((1.0, fpu.infinity)), 1.0) self.assertEqual(fpu.min((1.0, -fpu.infinity)), -fpu.infinity) self.assertTrue(fpu.isnan(fpu.min((1.0, -fpu.nan)))) def test_max(self): "Verify corner cases with nan, -inf, +inf" self.assertEqual(fpu.max((1.0, 2.0)), 2.0) self.assertEqual(fpu.max((1.0, fpu.infinity)), fpu.infinity) self.assertEqual(fpu.max((1.0, -fpu.infinity)), 1.0) self.assertTrue(fpu.isnan(fpu.max((1.0, fpu.nan)))) def test_power(self): x = 1/3.0 # The cube of one third should depend on the rounding mode self.assertTrue(fpu.down(lambda: x*x*x) < fpu.up(lambda: x*x*x)) # But using the built-in power operator, it doesn't necessarily do it # print fpu.down(lambda: x**3) < fpu.up(lambda: x**3)) # So we define an integer power methods that does self.assertTrue(fpu.power_rd(x, 3) < fpu.power_ru(x, 3)) self.assertTrue(fpu.power_rd(-x, 3) < fpu.power_ru(-x, 3)) self.assertTrue(fpu.power_rd(x, 4) < fpu.power_ru(x, 4)) self.assertTrue(fpu.power_rd(-x, 4) < fpu.power_ru(-x, 4)) self.assertEqual( (fpu.down(lambda: x*x*x), fpu.up(lambda: x*x*x)), (fpu.power_rd(x, 3), fpu.power_ru(x, 3))) class ModuleTestCase(unittest.TestCase): def test_namespace(self): import interval self.assertEqual( dir(interval), ['__builtins__', '__doc__', '__file__', '__name__', '__path__', 'fpu', 'imath', 'inf', 'interval']) class IntervalTestCase(unittest.TestCase): def test_trivial_constructor(self): self.assertEqual(interval[1], ((1, 1),)) self.assertEqual(interval(1), ((1, 1),)) self.assertEqual(interval[1, 2], ((1, 2),)) self.assertEqual(interval(1, 2), ((1, 1), (2, 2))) self.assertEqual(interval([1, 2], [3, 4]), ((1, 2), (3, 4))) self.assertEqual(interval([1,2]), interval(interval([1, 2]))) def test_nan_constructor(self): self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),)) self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),)) self.assertEqual(interval(2, fpu.nan, 9), ((-fpu.infinity, fpu.infinity),)) def test_failing_constructor(self): self.assertRaises(interval.ComponentError, lambda: interval[1, [2, 3]]) self.assertRaises(interval.ComponentError, lambda: interval[1, 2, 3]) self.assertRaises(interval.ComponentError, lambda: interval(0, [1, 2, 3])) self.assertRaises(interval.ComponentError, lambda: interval(0, [1, [2, 3]])) self.assertRaises(interval.ComponentError, lambda: interval['a', 1]) def test_canonical_constructor(self): self.assertEqual(interval([1, 3], [4, 6], [2, 5], 9), ((1, 6), (9, 9))) self.assertEqual(interval[2 ** (52 + 1) - 1], interval[9007199254740991.0]) self.assertEqual(interval[2 ** (52 + 1) + 1], interval[4503599627370496 * 2.0, 4503599627370497 * 2.0]) self.assertEqual(interval[-2 ** (52 + 1) + 1], interval[-9007199254740991.0]) self.assertEqual(interval[-2 ** (52 + 1) - 1], interval[-4503599627370497 * 2.0, -4503599627370496 * 2.0]) self.assertEqual(interval[2 ** (52 + 2) + 1], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0]) self.assertEqual(interval[2 ** (52 + 2) + 2], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0]) self.assertEqual(interval[2 ** (52 + 2) + 3], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0]) self.assertEqual(interval[-2 ** (52 + 2) - 1], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0]) self.assertEqual(interval[-2 ** (52 + 2) - 2], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0]) self.assertEqual(interval[-2 ** (52 + 2) - 3], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0]) def test_unary(self): self.assertEqual(interval[1, 2], +interval[1, 2]) self.assertEqual(interval[-2, -1], -interval[1, 2]) def test_sum(self): self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[fpu.infinity]) self.assertEqual(interval[4, 6], interval[1, 2] + interval[3, 4]) self.assertEqual(interval[3, fpu.infinity], interval[1, fpu.infinity] + interval[2]) self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity, -1] + interval[2, +fpu.infinity]) self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[8, +fpu.infinity]) self.assertEqual(interval([1, 2], [10, fpu.infinity]) + interval([1,9],[-2,-1]), interval([-1, 1], [2, fpu.infinity])) self.assertEqual(interval[1, 9] + interval([1, 2], [10, fpu.infinity]), interval[2, fpu.infinity]) def test_sum_coercion(self): self.assertEqual(interval[1,2] + 2, interval[3, 4]) self.assertRaises(TypeError, lambda: interval[1,2] + 1j) self.assertEqual(1 + interval[4, 5], interval[5, 6]) self.assertRaises(TypeError, lambda: (1, 2) + interval[1,2]) self.assertEqual(fpu.infinity + interval[4, 5], interval[fpu.infinity]) def test_sub(self): self.assertEqual(interval[1, 2] - interval[3, 4], interval[-3.0, -1.0]) self.assertEqual(interval[1, 2] - 0.5, interval[0.5, 1.5]) self.assertEqual(1.5 - interval[1, 2], interval[-0.5, 0.5]) def test_mul(self): self.assertEqual(interval[-fpu.infinity, +fpu.infinity], fpu.infinity * interval[0]) self.assertEqual(interval[+fpu.infinity], interval[+fpu.infinity] * interval[3]) self.assertEqual(interval[-8, +10], interval[1, 2] * interval[-4, 5]) self.assertEqual(interval[3, 8], interval[1, 2] * interval[3, 4]) self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[0,1 ] * interval[2, +fpu.infinity]) self.assertEqual(interval[2, fpu.infinity], interval[-fpu.infinity,-2] * interval[-fpu.infinity,-1]) self.assertEqual(interval([1, 2], [3, 4]) * interval[0.5, 2], interval[0.5, 8]) self.assertEqual(interval[1, 2] * 2, interval[2, 4]) def test_inverse(self): self.assertEqual(interval[0.5, 1], interval[1, 2].inverse()) self.assertEqual(interval[-1, -0.5],(-interval[1, 2]).inverse()) self.assertEqual(interval([-fpu.infinity, -1], [0.5, +fpu.infinity]), interval[-1,2].inverse()) self.assertEqual(interval(-fpu.infinity, [1, +fpu.infinity]), interval[0,1].inverse()) self.assertEqual(interval([-fpu.infinity, -2.0], [0.0, fpu.infinity]), interval([-0.5, 0.5], [0.2, fpu.infinity]).inverse()) def test_division(self): self.assertEqual(interval[-fpu.infinity, fpu.infinity], interval[0,1] / interval[0,1]) self.assertEqual(interval[0.5], interval[1] / 2) self.assertEqual(interval[0.5], 1 / interval[2]) def test_power(self): self.assertRaises(TypeError, lambda: interval[1, 2] ** (1.3)) self.assertEqual((-interval[1, 2]).inverse(), (-interval[1, 2]) ** -1) self.assertEqual(interval[0, 4], interval[-1, 2] ** 2) self.assertEqual(interval[-27, 8], interval[-3, 2] ** 3) self.assertEqual(interval[-1, 2], (interval[-1,2]**-1)**-1) self.assertEqual(interval([-0.38712442133802405]) ** 3, interval([-0.058016524353106828, -0.058016524353106808])) self.assertEqual( interval[fpu.down(lambda: (1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0))], (interval[1]/3.0) ** 2) self.assertEqual( interval[fpu.down(lambda: (1/3.0)*(1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0)*(1/3.0))], (interval[1]/3.0) ** 3) def test_format(self): for x in interval[1], interval[1,2], interval([1,2], [3,4]): self.assertEqual(x, eval(repr(x))) def test_intersection(self): self.assertEqual(interval[1, 2] & interval[0, 3], interval[1, 2]) self.assertEqual(interval[1.1, 1.9] & interval[1.3, 2.5], interval[1.3, 1.9]) self.assertEqual(interval[1.1, 1.9] & interval[0.3, 0.7], interval()) self.assertEqual(interval([1, 3], [4, 5]) & interval[2], interval[2]) self.assertEqual(interval([1, 3], [4, 5]) & interval(2,4.5), interval(2, 4.5)) self.assertEqual(interval[1, 2] & 1.2, interval(1.2)) self.assertEqual(2.1 & interval[1, 2], interval()) def test_union(self): self.assertEqual(interval([1, 6], 9), interval([1, 3], [4, 6]) | interval([2, 5], 9)) self.assertEqual(interval[1, 2] | 2.1, interval([1, 2], 2.1)) self.assertEqual(2.1 | interval[1, 2], interval([1, 2], 2.1)) def test_hull(self): self.assertEqual(interval([1, 9]), interval.hull((interval([1, 3], [4, 6]), interval([2, 5], 9)))) def test_inclusion(self): def verify_in(x, y): self.assertTrue(x in y) self.assertEqual(x & y, interval(x)) verify_in(1.5, interval[1, 2]) verify_in(1, interval[1, 2]) verify_in(2, interval[1, 2]) verify_in(interval[1, 2], interval[1, 2]) verify_in(interval[1.1, 2], interval[1, 2]) verify_in(interval[1, 1.8], interval[1, 2]) verify_in(interval([1.1, 2.2], [3.3, 4.4]), interval(-1, [0, 2.5], [3, 5], [7, 9])) def verify_out(x, y): self.assertFalse(x in y) self.assertNotEqual(x & y, x) verify_out(0, interval[1, 2]) verify_out(4, interval[1, 2]) verify_out(interval[1, 3], interval[2, 4]) verify_out(interval(1, 3), interval(2, 4)) def test_extrema(self): self.assertEqual(interval(1, [2, 3], 4).extrema, interval(1, 2, 3, 4)) def test_midpoint(self): self.assertEqual(interval[0, 4].midpoint, interval[2]) self.assertEqual(interval(-1, 1, 4), interval(-1, [0, 2], [3, 5]).midpoint) class NewtonTestCase(unittest.TestCase): def test_opts(self): self.assertRaises(TypeError, lambda: interval(0,1).newton(None, None, nonexisting=True)) def test_cubic(self): self.assertEqual( interval[-2, 2].newton(lambda x: x**3 - x, lambda x: 3*x**2-1), interval(-1, 0, 1)) self.assertEqual( interval[-5, 5].newton(lambda x: x**3 + x - 10, lambda x: 3*x**2 + 1), interval[2]) self.assertEqual( interval[-5, 5].newton(lambda x: x**3 + x - 15, lambda x: 3*x**2 + 1), interval[5249383869325653 * 2.0 ** -51, 5249383869325655 * 2.0 ** -51]) # The sharpest result would be with 5249383869325654 * 2.0 ** -51 as sup. def test_sqrt2(self): import math f, p = lambda x: x**2 - 2, lambda x: 2 * x u, v = 6369051672525772 * 2.0 **-52, 6369051672525773 * 2.0 **-52 self.assertEqual(v, math.sqrt(2)) s = interval[u, v] self.assertEqual(s, interval[0.1, 5].newton(f, p)) self.assertEqual(s, interval[0, 2].newton(f, p)) self.assertEqual(s, interval[-1, 10].newton(f, p)) self.assertEqual(interval(), interval[2, 5].newton(f, p)) self.assertEqual(-s, interval[-5, 0].newton(f, p)) self.assertEqual(-s|s, interval[-5, +5].newton(f, p)) if __name__ == '__main__': unittest.main()
import threading from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t from django.contrib.gis.geos.base import GEOSBase from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory from django.contrib.gis.geos.prototypes.errcheck import ( check_geom, check_sized_string, check_string, ) from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p from django.utils import six from django.utils.encoding import force_bytes class WKTReader_st(Structure): pass class WKTWriter_st(Structure): pass class WKBReader_st(Structure): pass class WKBWriter_st(Structure): pass WKT_READ_PTR = POINTER(WKTReader_st) WKT_WRITE_PTR = POINTER(WKTWriter_st) WKB_READ_PTR = POINTER(WKBReader_st) WKB_WRITE_PTR = POINTER(WKBReader_st) wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR) wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR]) wkt_reader_read = GEOSFuncFactory( 'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom ) wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR) wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR]) wkt_writer_write = GEOSFuncFactory( 'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string ) wkt_writer_get_outdim = GEOSFuncFactory( 'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int ) wkt_writer_set_outdim = GEOSFuncFactory( 'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int] ) wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char]) wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int]) wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR) wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR]) class WKBReadFunc(GEOSFuncFactory): # Although the function definitions take `const unsigned char *` # as their parameter, we use c_char_p here so the function may # take Python strings directly as parameters. Inside Python there # is not a difference between signed and unsigned characters, so # it is not a problem. argtypes = [WKB_READ_PTR, c_char_p, c_size_t] restype = GEOM_PTR errcheck = staticmethod(check_geom) wkb_reader_read = WKBReadFunc('GEOSWKBReader_read') wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX') wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR) wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR]) class WKBWriteFunc(GEOSFuncFactory): argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)] restype = c_uchar_p errcheck = staticmethod(check_sized_string) wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write') wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX') class WKBWriterGet(GEOSFuncFactory): argtypes = [WKB_WRITE_PTR] restype = c_int class WKBWriterSet(GEOSFuncFactory): argtypes = [WKB_WRITE_PTR, c_int] wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder') wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder') wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension') wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension') wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char) wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char]) class IOBase(GEOSBase): "Base class for GEOS I/O objects." def __init__(self): # Getting the pointer with the constructor. self.ptr = self._constructor() # Loading the real destructor function at this point as doing it in # __del__ is too late (import error). self._destructor.func = self._destructor.get_func( *self._destructor.args, **self._destructor.kwargs ) def __del__(self): # Cleaning up with the appropriate destructor. try: self._destructor(self._ptr) except (AttributeError, TypeError): pass # Some part might already have been garbage collected class _WKTReader(IOBase): _constructor = wkt_reader_create _destructor = wkt_reader_destroy ptr_type = WKT_READ_PTR def read(self, wkt): if not isinstance(wkt, (bytes, six.string_types)): raise TypeError return wkt_reader_read(self.ptr, force_bytes(wkt)) class _WKBReader(IOBase): _constructor = wkb_reader_create _destructor = wkb_reader_destroy ptr_type = WKB_READ_PTR def read(self, wkb): "Returns a _pointer_ to C GEOS Geometry object from the given WKB." if isinstance(wkb, six.memoryview): wkb_s = bytes(wkb) return wkb_reader_read(self.ptr, wkb_s, len(wkb_s)) elif isinstance(wkb, (bytes, six.string_types)): return wkb_reader_read_hex(self.ptr, wkb, len(wkb)) else: raise TypeError class WKTWriter(IOBase): _constructor = wkt_writer_create _destructor = wkt_writer_destroy ptr_type = WKT_WRITE_PTR _trim = False _precision = None def __init__(self, dim=2, trim=False, precision=None): super(WKTWriter, self).__init__() if bool(trim) != self._trim: self.trim = trim if precision is not None: self.precision = precision self.outdim = dim def write(self, geom): "Returns the WKT representation of the given geometry." return wkt_writer_write(self.ptr, geom.ptr) @property def outdim(self): return wkt_writer_get_outdim(self.ptr) @outdim.setter def outdim(self, new_dim): if new_dim not in (2, 3): raise ValueError('WKT output dimension must be 2 or 3') wkt_writer_set_outdim(self.ptr, new_dim) @property def trim(self): return self._trim @trim.setter def trim(self, flag): if bool(flag) != self._trim: self._trim = bool(flag) wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00') @property def precision(self): return self._precision @precision.setter def precision(self, precision): if (not isinstance(precision, int) or precision < 0) and precision is not None: raise AttributeError('WKT output rounding precision must be non-negative integer or None.') if precision != self._precision: self._precision = precision wkt_writer_set_precision(self.ptr, -1 if precision is None else precision) class WKBWriter(IOBase): _constructor = wkb_writer_create _destructor = wkb_writer_destroy ptr_type = WKB_WRITE_PTR def __init__(self, dim=2): super(WKBWriter, self).__init__() self.outdim = dim def _handle_empty_point(self, geom): from django.contrib.gis.geos import Point if isinstance(geom, Point) and geom.empty: if self.srid: # PostGIS uses POINT(NaN NaN) for WKB representation of empty # points. Use it for EWKB as it's a PostGIS specific format. # https://trac.osgeo.org/postgis/ticket/3181 geom = Point(float('NaN'), float('NaN'), srid=geom.srid) else: raise ValueError('Empty point is not representable in WKB.') return geom def write(self, geom): "Returns the WKB representation of the given geometry." from django.contrib.gis.geos import Polygon geom = self._handle_empty_point(geom) wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())) if isinstance(geom, Polygon) and geom.empty: # Fix GEOS output for empty polygon. # See https://trac.osgeo.org/geos/ticket/680. wkb = wkb[:-8] + b'\0' * 4 return six.memoryview(wkb) def write_hex(self, geom): "Returns the HEXEWKB representation of the given geometry." from django.contrib.gis.geos.polygon import Polygon geom = self._handle_empty_point(geom) wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t())) if isinstance(geom, Polygon) and geom.empty: wkb = wkb[:-16] + b'0' * 8 return wkb # ### WKBWriter Properties ### # Property for getting/setting the byteorder. def _get_byteorder(self): return wkb_writer_get_byteorder(self.ptr) def _set_byteorder(self, order): if order not in (0, 1): raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).') wkb_writer_set_byteorder(self.ptr, order) byteorder = property(_get_byteorder, _set_byteorder) # Property for getting/setting the output dimension. @property def outdim(self): return wkb_writer_get_outdim(self.ptr) @outdim.setter def outdim(self, new_dim): if new_dim not in (2, 3): raise ValueError('WKB output dimension must be 2 or 3') wkb_writer_set_outdim(self.ptr, new_dim) # Property for getting/setting the include srid flag. @property def srid(self): return bool(ord(wkb_writer_get_include_srid(self.ptr))) @srid.setter def srid(self, include): if include: flag = b'\x01' else: flag = b'\x00' wkb_writer_set_include_srid(self.ptr, flag) class ThreadLocalIO(threading.local): wkt_r = None wkt_w = None wkb_r = None wkb_w = None ewkb_w = None thread_context = ThreadLocalIO() def wkt_r(): if not thread_context.wkt_r: thread_context.wkt_r = _WKTReader() return thread_context.wkt_r def wkt_w(dim=2, trim=False, precision=None): if not thread_context.wkt_w: thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision) else: thread_context.wkt_w.outdim = dim thread_context.wkt_w.trim = trim thread_context.wkt_w.precision = precision return thread_context.wkt_w def wkb_r(): if not thread_context.wkb_r: thread_context.wkb_r = _WKBReader() return thread_context.wkb_r def wkb_w(dim=2): if not thread_context.wkb_w: thread_context.wkb_w = WKBWriter(dim=dim) else: thread_context.wkb_w.outdim = dim return thread_context.wkb_w def ewkb_w(dim=2): if not thread_context.ewkb_w: thread_context.ewkb_w = WKBWriter(dim=dim) thread_context.ewkb_w.srid = True else: thread_context.ewkb_w.outdim = dim return thread_context.ewkb_w
""" Downloads the following: - Korean Wikipedia texts - Korean """ from sqlparse import parsestream from sqlparse.sql import Parenthesis for statement in parsestream(open('data/test.sql')): texts = [str(token.tokens[1].tokens[-1]).decode('string_escape') for token in statement.tokens if isinstance(token, Parenthesis)] print texts texts = [text for text in texts if text[0] != '#'] if texts: print "\n===\n".join(texts)
""" Methods to characterize image textures. """ import numpy as np from ._texture import _glcm_loop, _local_binary_pattern def greycomatrix(image, distances, angles, levels=256, symmetric=False, normed=False): """Calculate the grey-level co-occurrence matrix. A grey level co-occurence matrix is a histogram of co-occuring greyscale values at a given offset over an image. Parameters ---------- image : array_like of uint8 Integer typed input image. The image will be cast to uint8, so the maximum value must be less than 256. distances : array_like List of pixel pair distance offsets. angles : array_like List of pixel pair angles in radians. levels : int, optional The input image should contain integers in [0, levels-1], where levels indicate the number of grey-levels counted (typically 256 for an 8-bit image). The maximum value is 256. symmetric : bool, optional If True, the output matrix `P[:, :, d, theta]` is symmetric. This is accomplished by ignoring the order of value pairs, so both (i, j) and (j, i) are accumulated when (i, j) is encountered for a given offset. The default is False. normed : bool, optional If True, normalize each matrix `P[:, :, d, theta]` by dividing by the total number of accumulated co-occurrences for the given offset. The elements of the resulting matrix sum to 1. The default is False. Returns ------- P : 4-D ndarray The grey-level co-occurrence histogram. The value `P[i,j,d,theta]` is the number of times that grey-level `j` occurs at a distance `d` and at an angle `theta` from grey-level `i`. If `normed` is `False`, the output is of type uint32, otherwise it is float64. References ---------- .. [1] The GLCM Tutorial Home Page, http://www.fp.ucalgary.ca/mhallbey/tutorial.htm .. [2] Pattern Recognition Engineering, Morton Nadler & Eric P. Smith .. [3] Wikipedia, http://en.wikipedia.org/wiki/Co-occurrence_matrix Examples -------- Compute 2 GLCMs: One for a 1-pixel offset to the right, and one for a 1-pixel offset upwards. >>> image = np.array([[0, 0, 1, 1], ... [0, 0, 1, 1], ... [0, 2, 2, 2], ... [2, 2, 3, 3]], dtype=np.uint8) >>> result = greycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=4) >>> result[:, :, 0, 0] array([[2, 2, 1, 0], [0, 2, 0, 0], [0, 0, 3, 1], [0, 0, 0, 1]], dtype=uint32) >>> result[:, :, 0, 1] array([[1, 1, 3, 0], [0, 1, 1, 0], [0, 0, 0, 2], [0, 0, 0, 0]], dtype=uint32) >>> result[:, :, 0, 2] array([[3, 0, 2, 0], [0, 2, 2, 0], [0, 0, 1, 2], [0, 0, 0, 0]], dtype=uint32) >>> result[:, :, 0, 3] array([[2, 0, 0, 0], [1, 1, 2, 0], [0, 0, 2, 1], [0, 0, 0, 0]], dtype=uint32) """ assert levels <= 256 image = np.ascontiguousarray(image) assert image.ndim == 2 assert image.min() >= 0 assert image.max() < levels image = image.astype(np.uint8) distances = np.ascontiguousarray(distances, dtype=np.float64) angles = np.ascontiguousarray(angles, dtype=np.float64) assert distances.ndim == 1 assert angles.ndim == 1 P = np.zeros((levels, levels, len(distances), len(angles)), dtype=np.uint32, order='C') # count co-occurences _glcm_loop(image, distances, angles, levels, P) # make each GLMC symmetric if symmetric: Pt = np.transpose(P, (1, 0, 2, 3)) P = P + Pt # normalize each GLMC if normed: P = P.astype(np.float64) glcm_sums = np.apply_over_axes(np.sum, P, axes=(0, 1)) glcm_sums[glcm_sums == 0] = 1 P /= glcm_sums return P def greycoprops(P, prop='contrast'): """Calculate texture properties of a GLCM. Compute a feature of a grey level co-occurrence matrix to serve as a compact summary of the matrix. The properties are computed as follows: - 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2` - 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|` - 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}` - 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2` - 'energy': :math:`\\sqrt{ASM}` - 'correlation': .. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\ (j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right] Parameters ---------- P : ndarray Input array. `P` is the grey-level co-occurrence histogram for which to compute the specified property. The value `P[i,j,d,theta]` is the number of times that grey-level j occurs at a distance d and at an angle theta from grey-level i. prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \ 'correlation', 'ASM'}, optional The property of the GLCM to compute. The default is 'contrast'. Returns ------- results : 2-D ndarray 2-dimensional array. `results[d, a]` is the property 'prop' for the d'th distance and the a'th angle. References ---------- .. [1] The GLCM Tutorial Home Page, http://www.fp.ucalgary.ca/mhallbey/tutorial.htm Examples -------- Compute the contrast for GLCMs with distances [1, 2] and angles [0 degrees, 90 degrees] >>> image = np.array([[0, 0, 1, 1], ... [0, 0, 1, 1], ... [0, 2, 2, 2], ... [2, 2, 3, 3]], dtype=np.uint8) >>> g = greycomatrix(image, [1, 2], [0, np.pi/2], levels=4, ... normed=True, symmetric=True) >>> contrast = greycoprops(g, 'contrast') >>> contrast array([[ 0.58333333, 1. ], [ 1.25 , 2.75 ]]) """ assert P.ndim == 4 (num_level, num_level2, num_dist, num_angle) = P.shape assert num_level == num_level2 assert num_dist > 0 assert num_angle > 0 # create weights for specified property I, J = np.ogrid[0:num_level, 0:num_level] if prop == 'contrast': weights = (I - J) ** 2 elif prop == 'dissimilarity': weights = np.abs(I - J) elif prop == 'homogeneity': weights = 1. / (1. + (I - J) ** 2) elif prop in ['ASM', 'energy', 'correlation']: pass else: raise ValueError('%s is an invalid property' % (prop)) # compute property for each GLCM if prop == 'energy': asm = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0] results = np.sqrt(asm) elif prop == 'ASM': results = np.apply_over_axes(np.sum, (P ** 2), axes=(0, 1))[0, 0] elif prop == 'correlation': results = np.zeros((num_dist, num_angle), dtype=np.float64) I = np.array(range(num_level)).reshape((num_level, 1, 1, 1)) J = np.array(range(num_level)).reshape((1, num_level, 1, 1)) diff_i = I - np.apply_over_axes(np.sum, (I * P), axes=(0, 1))[0, 0] diff_j = J - np.apply_over_axes(np.sum, (J * P), axes=(0, 1))[0, 0] std_i = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_i) ** 2), axes=(0, 1))[0, 0]) std_j = np.sqrt(np.apply_over_axes(np.sum, (P * (diff_j) ** 2), axes=(0, 1))[0, 0]) cov = np.apply_over_axes(np.sum, (P * (diff_i * diff_j)), axes=(0, 1))[0, 0] # handle the special case of standard deviations near zero mask_0 = std_i < 1e-15 mask_0[std_j < 1e-15] = True results[mask_0] = 1 # handle the standard case mask_1 = mask_0 == False results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1]) elif prop in ['contrast', 'dissimilarity', 'homogeneity']: weights = weights.reshape((num_level, num_level, 1, 1)) results = np.apply_over_axes(np.sum, (P * weights), axes=(0, 1))[0, 0] return results def local_binary_pattern(image, P, R, method='default'): """Gray scale and rotation invariant LBP (Local Binary Patterns). LBP is an invariant descriptor that can be used for texture classification. Parameters ---------- image : (N, M) array Graylevel image. P : int Number of circularly symmetric neighbour set points (quantization of the angular space). R : float Radius of circle (spatial resolution of the operator). method : {'default', 'ror', 'uniform', 'var'} Method to determine the pattern. * 'default': original local binary pattern which is gray scale but not rotation invariant. * 'ror': extension of default implementation which is gray scale and rotation invariant. * 'uniform': improved rotation invariance with uniform patterns and finer quantization of the angular space which is gray scale and rotation invariant. * 'nri_uniform': non rotation-invariant uniform patterns variant which is only gray scale invariant [2]. * 'var': rotation invariant variance measures of the contrast of local image texture which is rotation but not gray scale invariant. Returns ------- output : (N, M) array LBP image. References ---------- .. [1] Multiresolution Gray-Scale and Rotation Invariant Texture Classification with Local Binary Patterns. Timo Ojala, Matti Pietikainen, Topi Maenpaa. http://www.rafbis.it/biplab15/images/stories/docenti/Danielriccio/\ Articoliriferimento/LBP.pdf, 2002. .. [2] Face recognition with local binary patterns. Timo Ahonen, Abdenour Hadid, Matti Pietikainen, http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851, 2004. """ methods = { 'default': ord('D'), 'ror': ord('R'), 'uniform': ord('U'), 'nri_uniform': ord('N'), 'var': ord('V') } image = np.ascontiguousarray(image, dtype=np.double) output = _local_binary_pattern(image, P, R, methods[method.lower()]) return output
import sys INSTALLED_APPS = [ 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.admin', 'mptt', 'cms', 'menus', 'djangocms_inherit', 'south', ] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:', } } TEMPLATE_CONTEXT_PROCESSORS = [ 'django.core.context_processors.auth', 'django.core.context_processors.i18n', 'django.core.context_processors.request', 'django.core.context_processors.media', 'django.core.context_processors.static', 'cms.context_processors.media', 'sekizai.context_processors.sekizai', ] ROOT_URLCONF = 'cms.urls' def schemamigration(): # turn ``schemamigration.py --initial`` into # ``manage.py schemamigration cmsplugin_disqus --initial`` and setup the # enviroment from django.conf import settings from django.core.management import ManagementUtility settings.configure( INSTALLED_APPS=INSTALLED_APPS, ROOT_URLCONF=ROOT_URLCONF, DATABASES=DATABASES, TEMPLATE_CONTEXT_PROCESSORS=TEMPLATE_CONTEXT_PROCESSORS ) argv = list(sys.argv) argv.insert(1, 'schemamigration') argv.insert(2, 'djangocms_inherit') utility = ManagementUtility(argv) utility.execute() if __name__ == "__main__": schemamigration()
from ...models import GPModel import numpy as np class CostModel(object): """ Class to handle the cost of evaluating the function. param cost_withGradients: function that returns the cost of evaluating the function and its gradient. By default no cost is used. Options are: - cost_withGradients is some pre-defined cost function. Should return numpy array as outputs. - cost_withGradients = 'evaluation_time'. .. Note:: if cost_withGradients = 'evaluation time' the evaluation time of the function is used to model a GP whose mean is used as cost. """ def __init__(self, cost_withGradients): super(CostModel, self).__init__() self.cost_type = cost_withGradients # --- Set-up evaluation cost if self.cost_type is None: self.cost_withGradients = constant_cost_withGradients self.cost_type = 'Constant cost' elif self.cost_type == 'evaluation_time': self.cost_model = GPModel() self.cost_withGradients = self._cost_gp_withGradients self.num_updates = 0 else: self.cost_withGradients = cost_withGradients self.cost_type = 'User defined cost' def _cost_gp(self,x): """ Predicts the time cost of evaluating the function at x. """ m, _, _, _ = self.cost_model.predict_withGradients(x) return np.exp(m) def _cost_gp_withGradients(self,x): """ Predicts the time cost and its gradient of evaluating the function at x. """ m, _, dmdx, _= self.cost_model.predict_withGradients(x) return np.exp(m), np.exp(m)*dmdx def update_cost_model(self, x, cost_x): """ Updates the GP used to handle the cost. param x: input of the GP for the cost model. param x_cost: values of the time cost at the input locations. """ if self.cost_type == 'evaluation_time': cost_evals = np.log(np.atleast_2d(np.asarray(cost_x)).T) if self.num_updates == 0: X_all = x costs_all = cost_evals else: X_all = np.vstack((self.cost_model.model.X,x)) costs_all = np.vstack((self.cost_model.model.Y,cost_evals)) self.num_updates += 1 self.cost_model.updateModel(X_all, costs_all, None, None) def constant_cost_withGradients(x): """ Constant cost function used by default: cost = 1, d_cost = 0. """ return np.ones(x.shape[0])[:,None], np.zeros(x.shape)
""" Copyright (C) 2015, MuChu Hsu Contributed by Muchu Hsu (muchu1983@gmail.com) This file is part of BSD license <https://opensource.org/licenses/BSD-3-Clause> """ from selenium import webdriver import os import time import logging import re import random from cameo.utility import Utility from cameo.localdb import LocalDbForTECHORANGE """ 抓取 科技報橘 html 存放到 source_html """ class SpiderForTECHORANGE: #建構子 def __init__(self): self.SOURCE_HTML_BASE_FOLDER_PATH = u"cameo_res\\source_html" self.PARSED_RESULT_BASE_FOLDER_PATH = u"cameo_res\\parsed_result" self.strWebsiteDomain = u"http://buzzorange.com/techorange" self.dicSubCommandHandler = { "index":self.downloadIndexPage, "tag":self.downloadTagPag, "news":self.downloadNewsPage } self.utility = Utility() self.db = LocalDbForTECHORANGE() self.driver = None #取得 spider 使用資訊 def getUseageMessage(self): return ("- TECHORANGE -\n" "useage:\n" "index - download entry page of TECHORANGE \n" "tag - download not obtained tag page \n" "news [tag] - download not obtained news [of given tag] \n") #取得 selenium driver 物件 def getDriver(self): chromeDriverExeFilePath = "cameo_res\\chromedriver.exe" driver = webdriver.Chrome(chromeDriverExeFilePath) return driver #初始化 selenium driver 物件 def initDriver(self): if self.driver is None: self.driver = self.getDriver() #終止 selenium driver 物件 def quitDriver(self): self.driver.quit() self.driver = None #執行 spider def runSpider(self, lstSubcommand=None): strSubcommand = lstSubcommand[0] strArg1 = None if len(lstSubcommand) == 2: strArg1 = lstSubcommand[1] self.initDriver() #init selenium driver self.dicSubCommandHandler[strSubcommand](strArg1) self.quitDriver() #quit selenium driver #下載 index 頁面 def downloadIndexPage(self, uselessArg1=None): logging.info("download index page") strIndexHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE" if not os.path.exists(strIndexHtmlFolderPath): os.mkdir(strIndexHtmlFolderPath) #mkdir source_html/TECHORANGE/ #科技報橘首頁 self.driver.get("https://buzzorange.com/techorange/") #儲存 html strIndexHtmlFilePath = strIndexHtmlFolderPath + u"\\index.html" self.utility.overwriteSaveAs(strFilePath=strIndexHtmlFilePath, unicodeData=self.driver.page_source) #下載 tag 頁面 def downloadTagPag(self, uselessArg1=None): logging.info("download tag page") strTagHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\tag" if not os.path.exists(strTagHtmlFolderPath): os.mkdir(strTagHtmlFolderPath) #mkdir source_html/TECHORANGE/tag/ strTagWebsiteDomain = self.strWebsiteDomain + u"/tag" #取得 Db 中尚未下載的 Tag 名稱 lstStrNotObtainedTagName = self.db.fetchallNotObtainedTagName() for strNotObtainedTagName in lstStrNotObtainedTagName: #略過名稱太長的 tag if len(strNotObtainedTagName) > 60: continue strTagUrl = strTagWebsiteDomain + u"/" + strNotObtainedTagName #tag 第0頁 intPageNum = 0 time.sleep(random.randint(2,5)) #sleep random time self.driver.get(strTagUrl) #儲存 html strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName) self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source) #tag 下一頁 elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers") while len(elesNextPageA) != 0: time.sleep(random.randint(2,5)) #sleep random time intPageNum = intPageNum+1 strTagUrl = elesNextPageA[0].get_attribute("href") self.driver.get(strTagUrl) #儲存 html strTagHtmlFilePath = strTagHtmlFolderPath + u"\\%d_%s_tag.html"%(intPageNum, strNotObtainedTagName) self.utility.overwriteSaveAs(strFilePath=strTagHtmlFilePath, unicodeData=self.driver.page_source) #tag 再下一頁 elesNextPageA = self.driver.find_elements_by_css_selector("div.nav-links a.next.page-numbers") #更新tag DB 為已抓取 (isGot = 1) self.db.updateTagStatusIsGot(strTagName=strNotObtainedTagName) logging.info("got tag %s"%strNotObtainedTagName) #限縮 字串長度低於 128 字元 def limitStrLessThen128Char(self, strStr=None): if len(strStr) > 128: logging.info("limit str less then 128 char") return strStr[:127] + u"_" else: return strStr #下載 news 頁面 (strTagName == None 會自動找尋已下載完成之 tag,但若未先執行 parser tag 即使 tag 已下載完成亦無法下載 news) def downloadNewsPage(self, strTagName=None): if strTagName is None: #未指定 tag lstStrObtainedTagName = self.db.fetchallCompletedObtainedTagName() for strObtainedTagName in lstStrObtainedTagName: self.downloadNewsPageWithGivenTagName(strTagName=strObtainedTagName) else: #有指定 tag 名稱 self.downloadNewsPageWithGivenTagName(strTagName=strTagName) #下載 news 頁面 (指定 tag 名稱) def downloadNewsPageWithGivenTagName(self, strTagName=None): logging.info("download news page with tag %s"%strTagName) strNewsHtmlFolderPath = self.SOURCE_HTML_BASE_FOLDER_PATH + u"\\TECHORANGE\\news" if not os.path.exists(strNewsHtmlFolderPath): os.mkdir(strNewsHtmlFolderPath) #mkdir source_html/TECHORANGE/news/ #取得 DB 紀錄中,指定 strTagName tag 的 news url lstStrNewsUrl = self.db.fetchallNewsUrlByTagName(strTagName=strTagName) intDownloadedNewsCount = 0#紀錄下載 news 頁面數量 timeStart = time.time() #計時開始時間點 timeEnd = None #計時結束時間點 for strNewsUrl in lstStrNewsUrl: #檢查是否已下載 if not self.db.checkNewsIsGot(strNewsUrl=strNewsUrl): if intDownloadedNewsCount%10 == 0: #計算下載10筆news所需時間 timeEnd = time.time() timeCost = timeEnd - timeStart logging.info("download 10 news cost %f sec"%timeCost) timeStart = timeEnd intDownloadedNewsCount = intDownloadedNewsCount+1 time.sleep(random.randint(2,5)) #sleep random time self.driver.get(strNewsUrl) #儲存 html strNewsName = re.match("^https://buzzorange.com/techorange/[\d]{4}/[\d]{2}/[\d]{2}/(.*)/$", strNewsUrl).group(1) strNewsName = self.limitStrLessThen128Char(strStr=strNewsName) #將名稱縮短小於128字完 strNewsHtmlFilePath = strNewsHtmlFolderPath + u"\\%s_news.html"%strNewsName self.utility.overwriteSaveAs(strFilePath=strNewsHtmlFilePath, unicodeData=self.driver.page_source) #更新news DB 為已抓取 (isGot = 1) self.db.updateNewsStatusIsGot(strNewsUrl=strNewsUrl)
import itertools import os import re from abc import ABC, abstractmethod from glob import glob from pathlib import Path import numpy as np import torch from PIL import Image from ..io.image import _read_png_16 from .utils import verify_str_arg from .vision import VisionDataset __all__ = ( "KittiFlow", "Sintel", "FlyingThings3D", "FlyingChairs", "HD1K", ) class FlowDataset(ABC, VisionDataset): # Some datasets like Kitti have a built-in valid_flow_mask, indicating which flow values are valid # For those we return (img1, img2, flow, valid_flow_mask), and for the rest we return (img1, img2, flow), # and it's up to whatever consumes the dataset to decide what valid_flow_mask should be. _has_builtin_flow_mask = False def __init__(self, root, transforms=None): super().__init__(root=root) self.transforms = transforms self._flow_list = [] self._image_list = [] def _read_img(self, file_name): img = Image.open(file_name) if img.mode != "RGB": img = img.convert("RGB") return img @abstractmethod def _read_flow(self, file_name): # Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True pass def __getitem__(self, index): img1 = self._read_img(self._image_list[index][0]) img2 = self._read_img(self._image_list[index][1]) if self._flow_list: # it will be empty for some dataset when split="test" flow = self._read_flow(self._flow_list[index]) if self._has_builtin_flow_mask: flow, valid_flow_mask = flow else: valid_flow_mask = None else: flow = valid_flow_mask = None if self.transforms is not None: img1, img2, flow, valid_flow_mask = self.transforms(img1, img2, flow, valid_flow_mask) if self._has_builtin_flow_mask or valid_flow_mask is not None: # The `or valid_flow_mask is not None` part is here because the mask can be generated within a transform return img1, img2, flow, valid_flow_mask else: return img1, img2, flow def __len__(self): return len(self._image_list) def __rmul__(self, v): return torch.utils.data.ConcatDataset([self] * v) class Sintel(FlowDataset): """`Sintel <http://sintel.is.tue.mpg.de/>`_ Dataset for optical flow. The dataset is expected to have the following structure: :: root Sintel testing clean scene_1 scene_2 ... final scene_1 scene_2 ... training clean scene_1 scene_2 ... final scene_1 scene_2 ... flow scene_1 scene_2 ... Args: root (string): Root directory of the Sintel Dataset. split (string, optional): The dataset split, either "train" (default) or "test" pass_name (string, optional): The pass to use, either "clean" (default), "final", or "both". See link above for details on the different passes. transforms (callable, optional): A function/transform that takes in ``img1, img2, flow, valid_flow_mask`` and returns a transformed version. ``valid_flow_mask`` is expected for consistency with other datasets which return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ def __init__(self, root, split="train", pass_name="clean", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = ["clean", "final"] if pass_name == "both" else [pass_name] root = Path(root) / "Sintel" flow_root = root / "training" / "flow" for pass_name in passes: split_dir = "training" if split == "train" else split image_root = root / split_dir / pass_name for scene in os.listdir(image_root): image_list = sorted(glob(str(image_root / scene / "*.png"))) for i in range(len(image_list) - 1): self._image_list += [[image_list[i], image_list[i + 1]]] if split == "train": self._flow_list += sorted(glob(str(flow_root / scene / "*.flo"))) def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 3-tuple with ``(img1, img2, flow)``. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` is None if ``split="test"``. If a valid flow mask is generated within the ``transforms`` parameter, a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned. """ return super().__getitem__(index) def _read_flow(self, file_name): return _read_flo(file_name) class KittiFlow(FlowDataset): """`KITTI <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow>`__ dataset for optical flow (2015). The dataset is expected to have the following structure: :: root KittiFlow testing image_2 training image_2 flow_occ Args: root (string): Root directory of the KittiFlow Dataset. split (string, optional): The dataset split, either "train" (default) or "test" transforms (callable, optional): A function/transform that takes in ``img1, img2, flow, valid_flow_mask`` and returns a transformed version. """ _has_builtin_flow_mask = True def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) root = Path(root) / "KittiFlow" / (split + "ing") images1 = sorted(glob(str(root / "image_2" / "*_10.png"))) images2 = sorted(glob(str(root / "image_2" / "*_11.png"))) if not images1 or not images2: raise FileNotFoundError( "Could not find the Kitti flow images. Please make sure the directory structure is correct." ) for img1, img2 in zip(images1, images2): self._image_list += [[img1, img2]] if split == "train": self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png"))) def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W) indicating which flow values are valid. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if ``split="test"``. """ return super().__getitem__(index) def _read_flow(self, file_name): return _read_16bits_png_with_flow_and_valid_mask(file_name) class FlyingChairs(FlowDataset): """`FlyingChairs <https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs>`_ Dataset for optical flow. You will also need to download the FlyingChairs_train_val.txt file from the dataset page. The dataset is expected to have the following structure: :: root FlyingChairs data 00001_flow.flo 00001_img1.ppm 00001_img2.ppm ... FlyingChairs_train_val.txt Args: root (string): Root directory of the FlyingChairs Dataset. split (string, optional): The dataset split, either "train" (default) or "val" transforms (callable, optional): A function/transform that takes in ``img1, img2, flow, valid_flow_mask`` and returns a transformed version. ``valid_flow_mask`` is expected for consistency with other datasets which return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "val")) root = Path(root) / "FlyingChairs" images = sorted(glob(str(root / "data" / "*.ppm"))) flows = sorted(glob(str(root / "data" / "*.flo"))) split_file_name = "FlyingChairs_train_val.txt" if not os.path.exists(root / split_file_name): raise FileNotFoundError( "The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)." ) split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32) for i in range(len(flows)): split_id = split_list[i] if (split == "train" and split_id == 1) or (split == "val" and split_id == 2): self._flow_list += [flows[i]] self._image_list += [[images[2 * i], images[2 * i + 1]]] def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 3-tuple with ``(img1, img2, flow)``. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` is None if ``split="val"``. If a valid flow mask is generated within the ``transforms`` parameter, a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned. """ return super().__getitem__(index) def _read_flow(self, file_name): return _read_flo(file_name) class FlyingThings3D(FlowDataset): """`FlyingThings3D <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ dataset for optical flow. The dataset is expected to have the following structure: :: root FlyingThings3D frames_cleanpass TEST TRAIN frames_finalpass TEST TRAIN optical_flow TEST TRAIN Args: root (string): Root directory of the intel FlyingThings3D Dataset. split (string, optional): The dataset split, either "train" (default) or "test" pass_name (string, optional): The pass to use, either "clean" (default) or "final" or "both". See link above for details on the different passes. camera (string, optional): Which camera to return images from. Can be either "left" (default) or "right" or "both". transforms (callable, optional): A function/transform that takes in ``img1, img2, flow, valid_flow_mask`` and returns a transformed version. ``valid_flow_mask`` is expected for consistency with other datasets which return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`. """ def __init__(self, root, split="train", pass_name="clean", camera="left", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) split = split.upper() verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both")) passes = { "clean": ["frames_cleanpass"], "final": ["frames_finalpass"], "both": ["frames_cleanpass", "frames_finalpass"], }[pass_name] verify_str_arg(camera, "camera", valid_values=("left", "right", "both")) cameras = ["left", "right"] if camera == "both" else [camera] root = Path(root) / "FlyingThings3D" directions = ("into_future", "into_past") for pass_name, camera, direction in itertools.product(passes, cameras, directions): image_dirs = sorted(glob(str(root / pass_name / split / "*/*"))) image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs) flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*"))) flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs) if not image_dirs or not flow_dirs: raise FileNotFoundError( "Could not find the FlyingThings3D flow images. " "Please make sure the directory structure is correct." ) for image_dir, flow_dir in zip(image_dirs, flow_dirs): images = sorted(glob(str(image_dir / "*.png"))) flows = sorted(glob(str(flow_dir / "*.pfm"))) for i in range(len(flows) - 1): if direction == "into_future": self._image_list += [[images[i], images[i + 1]]] self._flow_list += [flows[i]] elif direction == "into_past": self._image_list += [[images[i + 1], images[i]]] self._flow_list += [flows[i + 1]] def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 3-tuple with ``(img1, img2, flow)``. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` is None if ``split="test"``. If a valid flow mask is generated within the ``transforms`` parameter, a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned. """ return super().__getitem__(index) def _read_flow(self, file_name): return _read_pfm(file_name) class HD1K(FlowDataset): """`HD1K <http://hci-benchmark.iwr.uni-heidelberg.de/>`__ dataset for optical flow. The dataset is expected to have the following structure: :: root hd1k hd1k_challenge image_2 hd1k_flow_gt flow_occ hd1k_input image_2 Args: root (string): Root directory of the HD1K Dataset. split (string, optional): The dataset split, either "train" (default) or "test" transforms (callable, optional): A function/transform that takes in ``img1, img2, flow, valid_flow_mask`` and returns a transformed version. """ _has_builtin_flow_mask = True def __init__(self, root, split="train", transforms=None): super().__init__(root=root, transforms=transforms) verify_str_arg(split, "split", valid_values=("train", "test")) root = Path(root) / "hd1k" if split == "train": # There are 36 "sequences" and we don't want seq i to overlap with seq i + 1, so we need this for loop for seq_idx in range(36): flows = sorted(glob(str(root / "hd1k_flow_gt" / "flow_occ" / f"{seq_idx:06d}_*.png"))) images = sorted(glob(str(root / "hd1k_input" / "image_2" / f"{seq_idx:06d}_*.png"))) for i in range(len(flows) - 1): self._flow_list += [flows[i]] self._image_list += [[images[i], images[i + 1]]] else: images1 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*10.png"))) images2 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*11.png"))) for image1, image2 in zip(images1, images2): self._image_list += [[image1, image2]] if not self._image_list: raise FileNotFoundError( "Could not find the HD1K images. Please make sure the directory structure is correct." ) def _read_flow(self, file_name): return _read_16bits_png_with_flow_and_valid_mask(file_name) def __getitem__(self, index): """Return example at given index. Args: index(int): The index of the example to retrieve Returns: tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W) indicating which flow values are valid. The flow is a numpy array of shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if ``split="test"``. """ return super().__getitem__(index) def _read_flo(file_name): """Read .flo file in Middlebury format""" # Code adapted from: # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy # Everything needs to be in little Endian according to # https://vision.middlebury.edu/flow/code/flow-code/README.txt with open(file_name, "rb") as f: magic = np.fromfile(f, "c", count=4).tobytes() if magic != b"PIEH": raise ValueError("Magic number incorrect. Invalid .flo file") w = int(np.fromfile(f, "<i4", count=1)) h = int(np.fromfile(f, "<i4", count=1)) data = np.fromfile(f, "<f4", count=2 * w * h) return data.reshape(h, w, 2).transpose(2, 0, 1) def _read_16bits_png_with_flow_and_valid_mask(file_name): flow_and_valid = _read_png_16(file_name).to(torch.float32) flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :] flow = (flow - 2 ** 15) / 64 # This conversion is explained somewhere on the kitti archive valid_flow_mask = valid_flow_mask.bool() # For consistency with other datasets, we convert to numpy return flow.numpy(), valid_flow_mask.numpy() def _read_pfm(file_name): """Read flow in .pfm format""" with open(file_name, "rb") as f: header = f.readline().rstrip() if header != b"PF": raise ValueError("Invalid PFM file") dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline()) if not dim_match: raise Exception("Malformed PFM header.") w, h = (int(dim) for dim in dim_match.groups()) scale = float(f.readline().rstrip()) if scale < 0: # little-endian endian = "<" scale = -scale else: endian = ">" # big-endian data = np.fromfile(f, dtype=endian + "f") data = data.reshape(h, w, 3).transpose(2, 0, 1) data = np.flip(data, axis=1) # flip on h dimension data = data[:2, :, :] return data.astype(np.float32)
__version__ = "1.2.0.11"
from __future__ import absolute_import class BadOption(Exception): """ Incorrect HTTP API arguments """ pass class RenderError(Exception): """ Error rendering page """ pass class InternalError(Exception): """ Unhandled internal error """ pass class GlobalTimeoutError(Exception): """ Timeout exceeded rendering page """ pass class UnsupportedContentType(Exception): """ Request Content-Type is not supported """ pass class ExpiredArguments(Exception): """ Arguments stored with ``save_args`` are expired """ pass class ScriptError(BadOption): """ Error happened while executing Lua script """ LUA_INIT_ERROR = 'LUA_INIT_ERROR' # error happened before coroutine starts LUA_ERROR = 'LUA_ERROR' # lua error() is called from the coroutine LUA_CONVERT_ERROR = 'LUA_CONVERT_ERROR' # result can't be converted to Python SPLASH_LUA_ERROR = 'SPLASH_LUA_ERROR' # custom error raised by Splash BAD_MAIN_ERROR = 'BAD_MAIN_ERROR' # main() definition is incorrect MAIN_NOT_FOUND_ERROR = 'MAIN_NOT_FOUND_ERROR' # main() is not found SYNTAX_ERROR = 'SYNTAX_ERROR' # XXX: unused; reported as INIT_ERROR now JS_ERROR = 'JS_ERROR' # error in a wrapped JS function UNKNOWN_ERROR = 'UNKNOWN_ERROR' class JsError(Exception): """ Error occured in JavaScript code """ pass class OneShotCallbackError(Exception): """ A one shot callback was called more than once. """ pass
"Yang/Wu's OEP implementation, in PyQuante." from math import sqrt import settings from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\ array,solve from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK from PyQuante.LA2 import geigh,mkdens,trace2,simx from PyQuante.hartree_fock import get_fock from PyQuante.CGBF import three_center from PyQuante.optimize import fminBFGS from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\ get_entropy,mkdens_fermi import logging logger = logging.getLogger("pyquante") gradcall=0 class EXXSolver: "EXXSolver(solver)" def __init__(self,solver): # Solver is a pointer to a HF or a DFT calculation that has # already converged self.solver = solver self.bfs = self.solver.bfs self.nbf = len(self.bfs) self.S = self.solver.S self.h = self.solver.h self.Ints = self.solver.Ints self.molecule = self.solver.molecule self.nel = self.molecule.get_nel() self.nclosed, self.nopen = self.molecule.get_closedopen() self.Enuke = self.molecule.get_enuke() self.norb = self.nbf self.orbs = self.solver.orbs self.orbe = self.solver.orbe self.Gij = [] for g in xrange(self.nbf): gmat = zeros((self.nbf,self.nbf),'d') self.Gij.append(gmat) gbf = self.bfs[g] for i in xrange(self.nbf): ibf = self.bfs[i] for j in xrange(i+1): jbf = self.bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij D0 = mkdens(self.orbs,0,self.nclosed) J0 = getJ(self.Ints,D0) Vfa = (2.0*(self.nel-1.0)/self.nel)*J0 self.H0 = self.h + Vfa self.b = zeros(self.nbf,'d') return def iterate(self,**kwargs): self.iter = 0 self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature) logging.debug("iter Energy <b|b>") logging.debug("---- ------ -----") self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging) return def get_energy(self,b): self.iter += 1 self.Hoep = get_Hoep(b,self.H0,self.Gij) self.orbe,self.orbs = geigh(self.Hoep,self.S) if self.etemp: self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs, self.etemp) else: self.D = mkdens(self.orbs,0,self.nclosed) self.entropy=0 self.F = get_fock(self.D,self.Ints,self.h) self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy if self.iter == 1 or self.iter % 10 == 0: logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b))) return self.energy def get_gradient(self,b): energy = self.get_energy(b) Fmo = simx(self.F,self.orbs) bp = zeros(self.nbf,'d') for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbs) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nclosed): for a in xrange(self.nclosed,self.norb): bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return bp class UEXXSolver: "EXXSolver(solver)" def __init__(self,solver): # Solver is a pointer to a UHF calculation that has # already converged self.solver = solver self.bfs = self.solver.bfs self.nbf = len(self.bfs) self.S = self.solver.S self.h = self.solver.h self.Ints = self.solver.Ints self.molecule = self.solver.molecule self.nel = self.molecule.get_nel() self.nalpha, self.nbeta = self.molecule.get_alphabeta() self.Enuke = self.molecule.get_enuke() self.norb = self.nbf self.orbsa = self.solver.orbsa self.orbsb = self.solver.orbsb self.orbea = self.solver.orbea self.orbeb = self.solver.orbeb self.Gij = [] for g in xrange(self.nbf): gmat = zeros((self.nbf,self.nbf),'d') self.Gij.append(gmat) gbf = self.bfs[g] for i in xrange(self.nbf): ibf = self.bfs[i] for j in xrange(i+1): jbf = self.bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta) J0 = getJ(self.Ints,D0) Vfa = ((self.nel-1.)/self.nel)*J0 self.H0 = self.h + Vfa self.b = zeros(2*self.nbf,'d') return def iterate(self,**kwargs): self.etemp = kwargs.get("etemp",settings.DFTElectronTemperature) self.iter = 0 logging.debug("iter Energy <b|b>") logging.debug("---- ------ -----") self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging) return def get_energy(self,b): self.iter += 1 ba = b[:self.nbf] bb = b[self.nbf:] self.Hoepa = get_Hoep(ba,self.H0,self.Gij) self.Hoepb = get_Hoep(bb,self.H0,self.Gij) self.orbea,self.orbsa = geigh(self.Hoepa,self.S) self.orbeb,self.orbsb = geigh(self.Hoepb,self.S) if self.etemp: self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa, self.etemp) self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb, self.etemp) self.entropy = 0.5*(entropya+entropyb) else: self.Da = mkdens(self.orbsa,0,self.nalpha) self.Db = mkdens(self.orbsb,0,self.nbeta) self.entropy=0 J = getJ(self.Ints,self.Da+self.Db) Ka = getK(self.Ints,self.Da) Kb = getK(self.Ints,self.Db) self.Fa = self.h + J - Ka self.Fb = self.h + J - Kb self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) + trace2(self.h+self.Fb,self.Db))\ + self.Enuke + self.entropy if self.iter == 1 or self.iter % 10 == 0: logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b))) return self.energy def get_gradient(self,b): energy = self.get_energy(b) Fmoa = simx(self.Fa,self.orbsa) Fmob = simx(self.Fb,self.orbsb) bp = zeros(2*self.nbf,'d') for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbsa) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nalpha): for a in xrange(self.nalpha,self.norb): bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a]) for g in xrange(self.nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = simx(self.Gij[g],self.orbsb) # Now sum the appropriate terms to get the b gradient for i in xrange(self.nbeta): for a in xrange(self.nbeta,self.norb): bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return bp def exx(atoms,orbs,**kwargs): return oep_hf(atoms,orbs,**kwargs) def oep_hf(atoms,orbs,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. See notes on options and other args in oep routine. """ return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**kwargs) def oep(atoms,orbs,energy_func,grad_func=None,**kwargs): """oep - Form the optimized effective potential for a given energy expression oep(atoms,orbs,energy_func,grad_func=None,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals energy_func The function that returns the energy for the given method grad_func The function that returns the force for the given method Options ------- verbose False Output terse information to stdout (default) True Print out additional information ETemp False Use ETemp value for finite temperature DFT (default) float Use (float) for the electron temperature bfs None The basis functions to use. List of CGBF's basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ verbose = kwargs.get('verbose') ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature) opt_method = kwargs.get('opt_method',settings.OEPOptMethod) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms,**kwargs) nel = atoms.get_nel() nocc,nopen = atoms.get_closedopen() Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf bp = zeros(nbf,'d') bvec = kwargs.get('bvec') if bvec: assert len(bvec) == npbf b = array(bvec) else: b = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbs,0,nocc) J0 = getJ(Ints,D0) Vfa = (2*(nel-1.)/nel)*J0 H0 = h + Vfa b = fminBFGS(energy_func,b,grad_func, (nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij), logger=logging) energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke, S,h,Ints,H0,Gij,return_flag=1) return energy,orbe,orbs def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs): """Computes the energy for the OEP/HF functional Options: return_flag 0 Just return the energy 1 Return energy, orbe, orbs 2 Return energy, orbe, orbs, F """ return_flag = kwargs.get('return_flag') Hoep = get_Hoep(b,H0,Gij) orbe,orbs = geigh(Hoep,S) if ETemp: efermi = get_efermi(nel,orbe,ETemp) occs = get_fermi_occs(efermi,orbe,ETemp) D = mkdens_occs(orbs,occs) entropy = get_entropy(occs,ETemp) else: D = mkdens(orbs,0,nocc) F = get_fock(D,Ints,h) energy = trace2(h+F,D)+Enuke if ETemp: energy += entropy iref = nel/2 gap = 627.51*(orbe[iref]-orbe[iref-1]) logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f" % (energy,sqrt(dot(b,b)),gap)) #logging.debug("%s" % orbe) if return_flag == 1: return energy,orbe,orbs elif return_flag == 2: return energy,orbe,orbs,F return energy def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**kwargs): """Computes the gradient for the OEP/HF functional. return_flag 0 Just return gradient 1 Return energy,gradient 2 Return energy,gradient,orbe,orbs """ # Dump the gradient every 10 steps so we can restart... global gradcall gradcall += 1 #if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b) # Form the new potential and the new orbitals energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke, S,h,Ints,H0,Gij,return_flag=2) Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs)) norb = nbf bp = zeros(nbf,'d') # dE/db for g in xrange(nbf): # Transform Gij[g] to MOs. This is done over the whole # space rather than just the parts we need. I can speed # this up later by only forming the i,a elements required Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs)) # Now sum the appropriate terms to get the b gradient for i in xrange(nocc): for a in xrange(nocc,norb): bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a]) #logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp)))) return_flag = kwargs.get('return_flag') if return_flag == 1: return energy,bp elif return_flag == 2: return energy,bp,orbe,orbs return bp def get_Hoep(b,H0,Gij): Hoep = H0 # Add the contributions from the gaussian potential functions # H[ij] += b[g]*<ibf|g|jbf> for g in xrange(len(b)): Hoep = Hoep + b[g]*Gij[g] return Hoep def oep_hf_an(atoms,orbs,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. Implementation of Wu and Yang's Approximate Newton Scheme from J. Theor. Comp. Chem. 2, 627 (2003). oep_hf(atoms,orbs,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals Options ------- bfs None The basis functions to use for the wfn pbfs None The basis functions to use for the pot basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ maxiter = kwargs.get('maxiter',settings.OEPIters) tol = kwargs.get('tol',settings.OEPTolerance) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms) nel = atoms.get_nel() nocc,nopen = atoms.get_closedopen() Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf bp = zeros(nbf,'d') bvec = kwargs.get('bvec') if bvec: assert len(bvec) == npbf b = array(bvec) else: b = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbs,0,nocc) J0 = getJ(Ints,D0) Vfa = (2*(nel-1.)/nel)*J0 H0 = h + Vfa b = zeros(nbf,'d') eold = 0 for iter in xrange(maxiter): Hoep = get_Hoep(b,H0,Gij) orbe,orbs = geigh(Hoep,S) D = mkdens(orbs,0,nocc) Vhf = get2JmK(Ints,D) energy = trace2(2*h+Vhf,D)+Enuke if abs(energy-eold) < tol: break else: eold = energy logging.debug("OEP AN Opt: %d %f" % (iter,energy)) dV_ao = Vhf-Vfa dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs)) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') Gkt = zeros((nbf,nbf),'d') for k in xrange(nbf): # This didn't work; in fact, it made things worse: Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs)) for i in xrange(nocc): for a in xrange(nocc,norb): c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a]) for l in xrange(nbf): Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs)) for i in xrange(nocc): for a in xrange(nocc,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a]) # This should actually be a pseudoinverse... b = solve(X,c) logger.info("Final OEP energy = %f" % energy) return energy,orbe,orbs def oep_uhf_an(atoms,orbsa,orbsb,**kwargs): """oep_hf - Form the optimized effective potential for HF exchange. Implementation of Wu and Yang's Approximate Newton Scheme from J. Theor. Comp. Chem. 2, 627 (2003). oep_uhf(atoms,orbs,**kwargs) atoms A Molecule object containing a list of the atoms orbs A matrix of guess orbitals Options ------- bfs None The basis functions to use for the wfn pbfs None The basis functions to use for the pot basis_data None The basis data to use to construct bfs integrals None The one- and two-electron integrals to use If not None, S,h,Ints """ maxiter = kwargs.get('maxiter',settings.OEPIters) tol = kwargs.get('tol',settings.OEPTolerance) ETemp = kwargs.get('ETemp',settings.DFTElectronTemperature) bfs = getbasis(atoms,**kwargs) # The basis set for the potential can be set different from # that used for the wave function pbfs = kwargs.get('pbfs') if not pbfs: pbfs = bfs npbf = len(pbfs) S,h,Ints = getints(bfs,atoms,**kwargs) nel = atoms.get_nel() nclosed,nopen = atoms.get_closedopen() nalpha,nbeta = nclosed+nopen,nclosed Enuke = atoms.get_enuke() # Form the OEP using Yang/Wu, PRL 89 143002 (2002) nbf = len(bfs) norb = nbf ba = zeros(npbf,'d') bb = zeros(npbf,'d') # Form and store all of the three-center integrals # we're going to need. # These are <ibf|gbf|jbf> (where 'bf' indicates basis func, # as opposed to MO) # N^3 storage -- obviously you don't want to do this for # very large systems Gij = [] for g in xrange(npbf): gmat = zeros((nbf,nbf),'d') Gij.append(gmat) gbf = pbfs[g] for i in xrange(nbf): ibf = bfs[i] for j in xrange(i+1): jbf = bfs[j] gij = three_center(ibf,gbf,jbf) gmat[i,j] = gij gmat[j,i] = gij # Compute the Fermi-Amaldi potential based on the LDA density. # We're going to form this matrix from the Coulombic matrix that # arises from the input orbitals. D0 and J0 refer to the density # matrix and corresponding Coulomb matrix D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta) J0 = getJ(Ints,D0) Vfa = ((nel-1.)/nel)*J0 H0 = h + Vfa eold = 0 for iter in xrange(maxiter): Hoepa = get_Hoep(ba,H0,Gij) Hoepb = get_Hoep(ba,H0,Gij) orbea,orbsa = geigh(Hoepa,S) orbeb,orbsb = geigh(Hoepb,S) if ETemp: efermia = get_efermi(2*nalpha,orbea,ETemp) occsa = get_fermi_occs(efermia,orbea,ETemp) Da = mkdens_occs(orbsa,occsa) efermib = get_efermi(2*nbeta,orbeb,ETemp) occsb = get_fermi_occs(efermib,orbeb,ETemp) Db = mkdens_occs(orbsb,occsb) entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp)) else: Da = mkdens(orbsa,0,nalpha) Db = mkdens(orbsb,0,nbeta) J = getJ(Ints,Da) + getJ(Ints,Db) Ka = getK(Ints,Da) Kb = getK(Ints,Db) energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\ +Enuke if ETemp: energy += entropy if abs(energy-eold) < tol: break else: eold = energy logging.debug("OEP AN Opt: %d %f" % (iter,energy)) # Do alpha and beta separately # Alphas dV_ao = J-Ka-Vfa dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa))) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') for k in xrange(nbf): Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k], transpose(orbsa))) for i in xrange(nalpha): for a in xrange(nalpha,norb): c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a]) for l in xrange(nbf): Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l], transpose(orbsa))) for i in xrange(nalpha): for a in xrange(nalpha,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a]) # This should actually be a pseudoinverse... ba = solve(X,c) # Betas dV_ao = J-Kb-Vfa dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb))) X = zeros((nbf,nbf),'d') c = zeros(nbf,'d') for k in xrange(nbf): Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k], transpose(orbsb))) for i in xrange(nbeta): for a in xrange(nbeta,norb): c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a]) for l in xrange(nbf): Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l], transpose(orbsb))) for i in xrange(nbeta): for a in xrange(nbeta,norb): X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a]) # This should actually be a pseudoinverse... bb = solve(X,c) logger.info("Final OEP energy = %f" % energy) return energy,(orbea,orbeb),(orbsa,orbsb) def test_old(): from PyQuante.Molecule import Molecule from PyQuante.Ints import getbasis,getints from PyQuante.hartree_fock import rhf logging.basicConfig(level=logging.DEBUG,format="%(message)s") #mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))], # units='Angstrom') mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr') bfs = getbasis(mol) S,h,Ints = getints(bfs,mol) print "after integrals" E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True) print "RHF energy = ",E_hf E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints)) return def test(): from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver logging.basicConfig(level=logging.DEBUG,format="%(message)s") mol = Molecule("He",[(2,(0,0,0))]) solver = HFSolver(mol) solver.iterate() print "HF energy = ",solver.energy dft_solver = DFTSolver(mol) dft_solver.iterate() print "DFT energy = ",dft_solver.energy oep = EXXSolver(solver) # Testing 0 temp oep.iterate() # Testing finite temp oep.iterate(etemp=40000) return def utest(): from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver logging.basicConfig(level=logging.DEBUG,format="%(message)s") mol = Molecule("He",[(2,(0,0,0))]) mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2) solver = UHFSolver(mol) solver.iterate() print "HF energy = ",solver.energy dft_solver = DFTSolver(mol) dft_solver.iterate() print "DFT energy = ",dft_solver.energy oep = UEXXSolver(solver) # Testing 0 temp oep.iterate() # Testing finite temp oep.iterate(etemp=10000) return if __name__ == '__main__': test() utest()
def extractLittlebambooHomeBlog(item): ''' Parser for 'littlebamboo.home.blog' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('FW', 'Fortunate Wife', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
from __future__ import print_function, division from sympy.core import S, sympify, cacheit, pi, I, Rational from sympy.core.add import Add from sympy.core.function import Function, ArgumentIndexError, _coeff_isneg from sympy.functions.combinatorial.factorials import factorial, RisingFactorial from sympy.functions.elementary.exponential import exp, log, match_real_imag from sympy.functions.elementary.miscellaneous import sqrt from sympy.functions.elementary.integers import floor from sympy import pi, Eq from sympy.logic import Or, And from sympy.core.logic import fuzzy_or, fuzzy_and, fuzzy_bool def _rewrite_hyperbolics_as_exp(expr): expr = sympify(expr) return expr.xreplace({h: h.rewrite(exp) for h in expr.atoms(HyperbolicFunction)}) class HyperbolicFunction(Function): """ Base class for hyperbolic functions. See Also ======== sinh, cosh, tanh, coth """ unbranched = True def _peeloff_ipi(arg): """ Split ARG into two parts, a "rest" and a multiple of I*pi/2. This assumes ARG to be an Add. The multiple of I*pi returned in the second position is always a Rational. Examples ======== >>> from sympy.functions.elementary.hyperbolic import _peeloff_ipi as peel >>> from sympy import pi, I >>> from sympy.abc import x, y >>> peel(x + I*pi/2) (x, I*pi/2) >>> peel(x + I*2*pi/3 + I*pi*y) (x + I*pi*y + I*pi/6, I*pi/2) """ for a in Add.make_args(arg): if a == S.Pi*S.ImaginaryUnit: K = S.One break elif a.is_Mul: K, p = a.as_two_terms() if p == S.Pi*S.ImaginaryUnit and K.is_Rational: break else: return arg, S.Zero m1 = (K % S.Half)*S.Pi*S.ImaginaryUnit m2 = K*S.Pi*S.ImaginaryUnit - m1 return arg - m2, m2 class sinh(HyperbolicFunction): r""" The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`. * sinh(x) -> Returns the hyperbolic sine of x See Also ======== cosh, tanh, asinh """ def fdiff(self, argindex=1): """ Returns the first derivative of this function. """ if argindex == 1: return cosh(self.args[0]) else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return asinh @classmethod def eval(cls, arg): from sympy import sin arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.NegativeInfinity elif arg.is_zero: return S.Zero elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.NaN i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: return S.ImaginaryUnit * sin(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.is_Add: x, m = _peeloff_ipi(arg) if m: return sinh(m)*cosh(x) + cosh(m)*sinh(x) if arg.is_zero: return S.Zero if arg.func == asinh: return arg.args[0] if arg.func == acosh: x = arg.args[0] return sqrt(x - 1) * sqrt(x + 1) if arg.func == atanh: x = arg.args[0] return x/sqrt(1 - x**2) if arg.func == acoth: x = arg.args[0] return 1/(sqrt(x - 1) * sqrt(x + 1)) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): """ Returns the next term in the Taylor series expansion. """ if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) if len(previous_terms) > 2: p = previous_terms[-2] return p * x**2 / (n*(n - 1)) else: return x**(n) / factorial(n) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): """ Returns this function as a complex coordinate. """ from sympy import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() return (sinh(re)*cos(im), cosh(re)*sin(im)) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=deep, **hints) return re_part + im_part*S.ImaginaryUnit def _eval_expand_trig(self, deep=True, **hints): if deep: arg = self.args[0].expand(deep, **hints) else: arg = self.args[0] x = None if arg.is_Add: # TODO, implement more if deep stuff here x, y = arg.as_two_terms() else: coeff, terms = arg.as_coeff_Mul(rational=True) if coeff is not S.One and coeff.is_Integer and terms is not S.One: x = terms y = (coeff - 1)*x if x is not None: return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True) return sinh(arg) def _eval_rewrite_as_tractable(self, arg, **kwargs): return (exp(arg) - exp(-arg)) / 2 def _eval_rewrite_as_exp(self, arg, **kwargs): return (exp(arg) - exp(-arg)) / 2 def _eval_rewrite_as_cosh(self, arg, **kwargs): return -S.ImaginaryUnit*cosh(arg + S.Pi*S.ImaginaryUnit/2) def _eval_rewrite_as_tanh(self, arg, **kwargs): tanh_half = tanh(S.Half*arg) return 2*tanh_half/(1 - tanh_half**2) def _eval_rewrite_as_coth(self, arg, **kwargs): coth_half = coth(S.Half*arg) return 2*coth_half/(coth_half**2 - 1) def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_is_real(self): arg = self.args[0] if arg.is_real: return True # if `im` is of the form n*pi # else, check if it is a number re, im = arg.as_real_imag() return (im%pi).is_zero def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_positive(self): if self.args[0].is_extended_real: return self.args[0].is_positive def _eval_is_negative(self): if self.args[0].is_extended_real: return self.args[0].is_negative def _eval_is_finite(self): arg = self.args[0] return arg.is_finite def _eval_is_zero(self): arg = self.args[0] if arg.is_zero: return True class cosh(HyperbolicFunction): r""" The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`. * cosh(x) -> Returns the hyperbolic cosine of x See Also ======== sinh, tanh, acosh """ def fdiff(self, argindex=1): if argindex == 1: return sinh(self.args[0]) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from sympy import cos arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.Infinity elif arg.is_zero: return S.One elif arg.is_negative: return cls(-arg) else: if arg is S.ComplexInfinity: return S.NaN i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: return cos(i_coeff) else: if _coeff_isneg(arg): return cls(-arg) if arg.is_Add: x, m = _peeloff_ipi(arg) if m: return cosh(m)*cosh(x) + sinh(m)*sinh(x) if arg.is_zero: return S.One if arg.func == asinh: return sqrt(1 + arg.args[0]**2) if arg.func == acosh: return arg.args[0] if arg.func == atanh: return 1/sqrt(1 - arg.args[0]**2) if arg.func == acoth: x = arg.args[0] return x/(sqrt(x - 1) * sqrt(x + 1)) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 1: return S.Zero else: x = sympify(x) if len(previous_terms) > 2: p = previous_terms[-2] return p * x**2 / (n*(n - 1)) else: return x**(n)/factorial(n) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from sympy import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() return (cosh(re)*cos(im), sinh(re)*sin(im)) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=deep, **hints) return re_part + im_part*S.ImaginaryUnit def _eval_expand_trig(self, deep=True, **hints): if deep: arg = self.args[0].expand(deep, **hints) else: arg = self.args[0] x = None if arg.is_Add: # TODO, implement more if deep stuff here x, y = arg.as_two_terms() else: coeff, terms = arg.as_coeff_Mul(rational=True) if coeff is not S.One and coeff.is_Integer and terms is not S.One: x = terms y = (coeff - 1)*x if x is not None: return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True) return cosh(arg) def _eval_rewrite_as_tractable(self, arg, **kwargs): return (exp(arg) + exp(-arg)) / 2 def _eval_rewrite_as_exp(self, arg, **kwargs): return (exp(arg) + exp(-arg)) / 2 def _eval_rewrite_as_sinh(self, arg, **kwargs): return -S.ImaginaryUnit*sinh(arg + S.Pi*S.ImaginaryUnit/2) def _eval_rewrite_as_tanh(self, arg, **kwargs): tanh_half = tanh(S.Half*arg)**2 return (1 + tanh_half)/(1 - tanh_half) def _eval_rewrite_as_coth(self, arg, **kwargs): coth_half = coth(S.Half*arg)**2 return (coth_half + 1)/(coth_half - 1) def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return S.One else: return self.func(arg) def _eval_is_real(self): arg = self.args[0] # `cosh(x)` is real for real OR purely imaginary `x` if arg.is_real or arg.is_imaginary: return True # cosh(a+ib) = cos(b)*cosh(a) + i*sin(b)*sinh(a) # the imaginary part can be an expression like n*pi # if not, check if the imaginary part is a number re, im = arg.as_real_imag() return (im%pi).is_zero def _eval_is_positive(self): # cosh(x+I*y) = cos(y)*cosh(x) + I*sin(y)*sinh(x) # cosh(z) is positive iff it is real and the real part is positive. # So we need sin(y)*sinh(x) = 0 which gives x=0 or y=n*pi # Case 1 (y=n*pi): cosh(z) = (-1)**n * cosh(x) -> positive for n even # Case 2 (x=0): cosh(z) = cos(y) -> positive when cos(y) is positive z = self.args[0] x, y = z.as_real_imag() ymod = y % (2*pi) yzero = ymod.is_zero # shortcut if ymod is zero if yzero: return True xzero = x.is_zero # shortcut x is not zero if xzero is False: return yzero return fuzzy_or([ # Case 1: yzero, # Case 2: fuzzy_and([ xzero, fuzzy_or([ymod < pi/2, ymod > 3*pi/2]) ]) ]) def _eval_is_nonnegative(self): z = self.args[0] x, y = z.as_real_imag() ymod = y % (2*pi) yzero = ymod.is_zero # shortcut if ymod is zero if yzero: return True xzero = x.is_zero # shortcut x is not zero if xzero is False: return yzero return fuzzy_or([ # Case 1: yzero, # Case 2: fuzzy_and([ xzero, fuzzy_or([ymod <= pi/2, ymod >= 3*pi/2]) ]) ]) def _eval_is_finite(self): arg = self.args[0] return arg.is_finite class tanh(HyperbolicFunction): r""" The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`. * tanh(x) -> Returns the hyperbolic tangent of x See Also ======== sinh, cosh, atanh """ def fdiff(self, argindex=1): if argindex == 1: return S.One - tanh(self.args[0])**2 else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return atanh @classmethod def eval(cls, arg): from sympy import tan arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.One elif arg is S.NegativeInfinity: return S.NegativeOne elif arg.is_zero: return S.Zero elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.NaN i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: if _coeff_isneg(i_coeff): return -S.ImaginaryUnit * tan(-i_coeff) return S.ImaginaryUnit * tan(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.is_Add: x, m = _peeloff_ipi(arg) if m: tanhm = tanh(m) if tanhm is S.ComplexInfinity: return coth(x) else: # tanhm == 0 return tanh(x) if arg.is_zero: return S.Zero if arg.func == asinh: x = arg.args[0] return x/sqrt(1 + x**2) if arg.func == acosh: x = arg.args[0] return sqrt(x - 1) * sqrt(x + 1) / x if arg.func == atanh: return arg.args[0] if arg.func == acoth: return 1/arg.args[0] @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from sympy import bernoulli if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) a = 2**(n + 1) B = bernoulli(n + 1) F = factorial(n + 1) return a*(a - 1) * B/F * x**n def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from sympy import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() denom = sinh(re)**2 + cos(im)**2 return (sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom) def _eval_rewrite_as_tractable(self, arg, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_exp(self, arg, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_sinh(self, arg, **kwargs): return S.ImaginaryUnit*sinh(arg)/sinh(S.Pi*S.ImaginaryUnit/2 - arg) def _eval_rewrite_as_cosh(self, arg, **kwargs): return S.ImaginaryUnit*cosh(S.Pi*S.ImaginaryUnit/2 - arg)/cosh(arg) def _eval_rewrite_as_coth(self, arg, **kwargs): return 1/coth(arg) def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_is_real(self): from sympy import cos, sinh arg = self.args[0] if arg.is_real: return True re, im = arg.as_real_imag() # if denom = 0, tanh(arg) = zoo if re == 0 and im % pi == pi/2: return None # check if im is of the form n*pi/2 to make sin(2*im) = 0 # if not, im could be a number, return False in that case return (im % (pi/2)).is_zero def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_positive(self): if self.args[0].is_extended_real: return self.args[0].is_positive def _eval_is_negative(self): if self.args[0].is_extended_real: return self.args[0].is_negative def _eval_is_finite(self): from sympy import sinh, cos arg = self.args[0] re, im = arg.as_real_imag() denom = cos(im)**2 + sinh(re)**2 if denom == 0: return False elif denom.is_number: return True if arg.is_extended_real: return True def _eval_is_zero(self): arg = self.args[0] if arg.is_zero: return True class coth(HyperbolicFunction): r""" The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`. * coth(x) -> Returns the hyperbolic cotangent of x """ def fdiff(self, argindex=1): if argindex == 1: return -1/sinh(self.args[0])**2 else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return acoth @classmethod def eval(cls, arg): from sympy import cot arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.One elif arg is S.NegativeInfinity: return S.NegativeOne elif arg.is_zero: return S.ComplexInfinity elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.NaN i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: if _coeff_isneg(i_coeff): return S.ImaginaryUnit * cot(-i_coeff) return -S.ImaginaryUnit * cot(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.is_Add: x, m = _peeloff_ipi(arg) if m: cothm = coth(m) if cothm is S.ComplexInfinity: return coth(x) else: # cothm == 0 return tanh(x) if arg.is_zero: return S.ComplexInfinity if arg.func == asinh: x = arg.args[0] return sqrt(1 + x**2)/x if arg.func == acosh: x = arg.args[0] return x/(sqrt(x - 1) * sqrt(x + 1)) if arg.func == atanh: return 1/arg.args[0] if arg.func == acoth: return arg.args[0] @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from sympy import bernoulli if n == 0: return 1 / sympify(x) elif n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) B = bernoulli(n + 1) F = factorial(n + 1) return 2**(n + 1) * B/F * x**n def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from sympy import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() denom = sinh(re)**2 + sin(im)**2 return (sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom) def _eval_rewrite_as_tractable(self, arg, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp + neg_exp)/(pos_exp - neg_exp) def _eval_rewrite_as_exp(self, arg, **kwargs): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp + neg_exp)/(pos_exp - neg_exp) def _eval_rewrite_as_sinh(self, arg, **kwargs): return -S.ImaginaryUnit*sinh(S.Pi*S.ImaginaryUnit/2 - arg)/sinh(arg) def _eval_rewrite_as_cosh(self, arg, **kwargs): return -S.ImaginaryUnit*cosh(arg)/cosh(S.Pi*S.ImaginaryUnit/2 - arg) def _eval_rewrite_as_tanh(self, arg, **kwargs): return 1/tanh(arg) def _eval_is_positive(self): if self.args[0].is_extended_real: return self.args[0].is_positive def _eval_is_negative(self): if self.args[0].is_extended_real: return self.args[0].is_negative def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return 1/arg else: return self.func(arg) class ReciprocalHyperbolicFunction(HyperbolicFunction): """Base class for reciprocal functions of hyperbolic functions. """ #To be defined in class _reciprocal_of = None _is_even = None _is_odd = None @classmethod def eval(cls, arg): if arg.could_extract_minus_sign(): if cls._is_even: return cls(-arg) if cls._is_odd: return -cls(-arg) t = cls._reciprocal_of.eval(arg) if hasattr(arg, 'inverse') and arg.inverse() == cls: return arg.args[0] return 1/t if t is not None else t def _call_reciprocal(self, method_name, *args, **kwargs): # Calls method_name on _reciprocal_of o = self._reciprocal_of(self.args[0]) return getattr(o, method_name)(*args, **kwargs) def _calculate_reciprocal(self, method_name, *args, **kwargs): # If calling method_name on _reciprocal_of returns a value != None # then return the reciprocal of that value t = self._call_reciprocal(method_name, *args, **kwargs) return 1/t if t is not None else t def _rewrite_reciprocal(self, method_name, arg): # Special handling for rewrite functions. If reciprocal rewrite returns # unmodified expression, then return None t = self._call_reciprocal(method_name, arg) if t is not None and t != self._reciprocal_of(arg): return 1/t def _eval_rewrite_as_exp(self, arg, **kwargs): return self._rewrite_reciprocal("_eval_rewrite_as_exp", arg) def _eval_rewrite_as_tractable(self, arg, **kwargs): return self._rewrite_reciprocal("_eval_rewrite_as_tractable", arg) def _eval_rewrite_as_tanh(self, arg, **kwargs): return self._rewrite_reciprocal("_eval_rewrite_as_tanh", arg) def _eval_rewrite_as_coth(self, arg, **kwargs): return self._rewrite_reciprocal("_eval_rewrite_as_coth", arg) def as_real_imag(self, deep = True, **hints): return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=True, **hints) return re_part + S.ImaginaryUnit*im_part def _eval_as_leading_term(self, x): return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x) def _eval_is_extended_real(self): return self._reciprocal_of(self.args[0]).is_extended_real def _eval_is_finite(self): return (1/self._reciprocal_of(self.args[0])).is_finite class csch(ReciprocalHyperbolicFunction): r""" The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}` * csch(x) -> Returns the hyperbolic cosecant of x See Also ======== sinh, cosh, tanh, sech, asinh, acosh """ _reciprocal_of = sinh _is_odd = True def fdiff(self, argindex=1): """ Returns the first derivative of this function """ if argindex == 1: return -coth(self.args[0]) * csch(self.args[0]) else: raise ArgumentIndexError(self, argindex) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): """ Returns the next term in the Taylor series expansion """ from sympy import bernoulli if n == 0: return 1/sympify(x) elif n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) B = bernoulli(n + 1) F = factorial(n + 1) return 2 * (1 - 2**n) * B/F * x**n def _eval_rewrite_as_cosh(self, arg, **kwargs): return S.ImaginaryUnit / cosh(arg + S.ImaginaryUnit * S.Pi / 2) def _eval_is_positive(self): if self.args[0].is_extended_real: return self.args[0].is_positive def _eval_is_negative(self): if self.args[0].is_extended_real: return self.args[0].is_negative def _sage_(self): import sage.all as sage return sage.csch(self.args[0]._sage_()) class sech(ReciprocalHyperbolicFunction): r""" The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}` * sech(x) -> Returns the hyperbolic secant of x See Also ======== sinh, cosh, tanh, coth, csch, asinh, acosh """ _reciprocal_of = cosh _is_even = True def fdiff(self, argindex=1): if argindex == 1: return - tanh(self.args[0])*sech(self.args[0]) else: raise ArgumentIndexError(self, argindex) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from sympy.functions.combinatorial.numbers import euler if n < 0 or n % 2 == 1: return S.Zero else: x = sympify(x) return euler(n) / factorial(n) * x**(n) def _eval_rewrite_as_sinh(self, arg, **kwargs): return S.ImaginaryUnit / sinh(arg + S.ImaginaryUnit * S.Pi /2) def _eval_is_positive(self): if self.args[0].is_extended_real: return True def _sage_(self): import sage.all as sage return sage.sech(self.args[0]._sage_()) class InverseHyperbolicFunction(Function): """Base class for inverse hyperbolic functions.""" pass class asinh(InverseHyperbolicFunction): """ The inverse hyperbolic sine function. * asinh(x) -> Returns the inverse hyperbolic sine of x See Also ======== acosh, atanh, sinh """ def fdiff(self, argindex=1): if argindex == 1: return 1/sqrt(self.args[0]**2 + 1) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from sympy import asin arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.NegativeInfinity elif arg.is_zero: return S.Zero elif arg is S.One: return log(sqrt(2) + 1) elif arg is S.NegativeOne: return log(sqrt(2) - 1) elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.ComplexInfinity if arg.is_zero: return S.Zero i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: return S.ImaginaryUnit * asin(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if isinstance(arg, sinh) and arg.args[0].is_number: z = arg.args[0] if z.is_real: return z r, i = match_real_imag(z) if r is not None and i is not None: f = floor((i + pi/2)/pi) m = z - I*pi*f even = f.is_even if even is True: return m elif even is False: return -m @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) if len(previous_terms) >= 2 and n > 2: p = previous_terms[-2] return -p * (n - 2)**2/(n*(n - 1)) * x**2 else: k = (n - 1) // 2 R = RisingFactorial(S.Half, k) F = factorial(k) return (-1)**k * R / F * x**n / n def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_rewrite_as_log(self, x, **kwargs): return log(x + sqrt(x**2 + 1)) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return sinh def _eval_is_zero(self): arg = self.args[0] if arg.is_zero: return True class acosh(InverseHyperbolicFunction): """ The inverse hyperbolic cosine function. * acosh(x) -> Returns the inverse hyperbolic cosine of x See Also ======== asinh, atanh, cosh """ def fdiff(self, argindex=1): if argindex == 1: return 1/sqrt(self.args[0]**2 - 1) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Infinity elif arg is S.NegativeInfinity: return S.Infinity elif arg.is_zero: return S.Pi*S.ImaginaryUnit / 2 elif arg is S.One: return S.Zero elif arg is S.NegativeOne: return S.Pi*S.ImaginaryUnit if arg.is_number: cst_table = { S.ImaginaryUnit: log(S.ImaginaryUnit*(1 + sqrt(2))), -S.ImaginaryUnit: log(-S.ImaginaryUnit*(1 + sqrt(2))), S.Half: S.Pi/3, Rational(-1, 2): S.Pi*Rational(2, 3), sqrt(2)/2: S.Pi/4, -sqrt(2)/2: S.Pi*Rational(3, 4), 1/sqrt(2): S.Pi/4, -1/sqrt(2): S.Pi*Rational(3, 4), sqrt(3)/2: S.Pi/6, -sqrt(3)/2: S.Pi*Rational(5, 6), (sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(5, 12), -(sqrt(3) - 1)/sqrt(2**3): S.Pi*Rational(7, 12), sqrt(2 + sqrt(2))/2: S.Pi/8, -sqrt(2 + sqrt(2))/2: S.Pi*Rational(7, 8), sqrt(2 - sqrt(2))/2: S.Pi*Rational(3, 8), -sqrt(2 - sqrt(2))/2: S.Pi*Rational(5, 8), (1 + sqrt(3))/(2*sqrt(2)): S.Pi/12, -(1 + sqrt(3))/(2*sqrt(2)): S.Pi*Rational(11, 12), (sqrt(5) + 1)/4: S.Pi/5, -(sqrt(5) + 1)/4: S.Pi*Rational(4, 5) } if arg in cst_table: if arg.is_extended_real: return cst_table[arg]*S.ImaginaryUnit return cst_table[arg] if arg is S.ComplexInfinity: return S.ComplexInfinity if arg == S.ImaginaryUnit*S.Infinity: return S.Infinity + S.ImaginaryUnit*S.Pi/2 if arg == -S.ImaginaryUnit*S.Infinity: return S.Infinity - S.ImaginaryUnit*S.Pi/2 if arg.is_zero: return S.Pi*S.ImaginaryUnit*S.Half if isinstance(arg, cosh) and arg.args[0].is_number: z = arg.args[0] if z.is_real: from sympy.functions.elementary.complexes import Abs return Abs(z) r, i = match_real_imag(z) if r is not None and i is not None: f = floor(i/pi) m = z - I*pi*f even = f.is_even if even is True: if r.is_nonnegative: return m elif r.is_negative: return -m elif even is False: m -= I*pi if r.is_nonpositive: return -m elif r.is_positive: return m @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n == 0: return S.Pi*S.ImaginaryUnit / 2 elif n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) if len(previous_terms) >= 2 and n > 2: p = previous_terms[-2] return p * (n - 2)**2/(n*(n - 1)) * x**2 else: k = (n - 1) // 2 R = RisingFactorial(S.Half, k) F = factorial(k) return -R / F * S.ImaginaryUnit * x**n / n def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return S.ImaginaryUnit*S.Pi/2 else: return self.func(arg) def _eval_rewrite_as_log(self, x, **kwargs): return log(x + sqrt(x + 1) * sqrt(x - 1)) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return cosh class atanh(InverseHyperbolicFunction): """ The inverse hyperbolic tangent function. * atanh(x) -> Returns the inverse hyperbolic tangent of x See Also ======== asinh, acosh, tanh """ def fdiff(self, argindex=1): if argindex == 1: return 1/(1 - self.args[0]**2) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from sympy import atan arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg.is_zero: return S.Zero elif arg is S.One: return S.Infinity elif arg is S.NegativeOne: return S.NegativeInfinity elif arg is S.Infinity: return -S.ImaginaryUnit * atan(arg) elif arg is S.NegativeInfinity: return S.ImaginaryUnit * atan(-arg) elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: from sympy.calculus.util import AccumBounds return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2) i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: return S.ImaginaryUnit * atan(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.is_zero: return S.Zero if isinstance(arg, tanh) and arg.args[0].is_number: z = arg.args[0] if z.is_real: return z r, i = match_real_imag(z) if r is not None and i is not None: f = floor(2*i/pi) even = f.is_even m = z - I*f*pi/2 if even is True: return m elif even is False: return m - I*pi/2 @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) return x**n / n def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_rewrite_as_log(self, x, **kwargs): return (log(1 + x) - log(1 - x)) / 2 def _eval_is_zero(self): arg = self.args[0] if arg.is_zero: return True def inverse(self, argindex=1): """ Returns the inverse of this function. """ return tanh class acoth(InverseHyperbolicFunction): """ The inverse hyperbolic cotangent function. * acoth(x) -> Returns the inverse hyperbolic cotangent of x """ def fdiff(self, argindex=1): if argindex == 1: return 1/(1 - self.args[0]**2) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from sympy import acot arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Zero elif arg is S.NegativeInfinity: return S.Zero elif arg.is_zero: return S.Pi*S.ImaginaryUnit / 2 elif arg is S.One: return S.Infinity elif arg is S.NegativeOne: return S.NegativeInfinity elif arg.is_negative: return -cls(-arg) else: if arg is S.ComplexInfinity: return S.Zero i_coeff = arg.as_coefficient(S.ImaginaryUnit) if i_coeff is not None: return -S.ImaginaryUnit * acot(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.is_zero: return S.Pi*S.ImaginaryUnit*S.Half @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n == 0: return S.Pi*S.ImaginaryUnit / 2 elif n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) return x**n / n def _eval_as_leading_term(self, x): from sympy import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return S.ImaginaryUnit*S.Pi/2 else: return self.func(arg) def _eval_rewrite_as_log(self, x, **kwargs): return (log(1 + 1/x) - log(1 - 1/x)) / 2 def inverse(self, argindex=1): """ Returns the inverse of this function. """ return coth class asech(InverseHyperbolicFunction): """ The inverse hyperbolic secant function. * asech(x) -> Returns the inverse hyperbolic secant of x Examples ======== >>> from sympy import asech, sqrt, S >>> from sympy.abc import x >>> asech(x).diff(x) -1/(x*sqrt(1 - x**2)) >>> asech(1).diff(x) 0 >>> asech(1) 0 >>> asech(S(2)) I*pi/3 >>> asech(-sqrt(2)) 3*I*pi/4 >>> asech((sqrt(6) - sqrt(2))) I*pi/12 See Also ======== asinh, atanh, cosh, acoth References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function .. [2] http://dlmf.nist.gov/4.37 .. [3] http://functions.wolfram.com/ElementaryFunctions/ArcSech/ """ def fdiff(self, argindex=1): if argindex == 1: z = self.args[0] return -1/(z*sqrt(1 - z**2)) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Pi*S.ImaginaryUnit / 2 elif arg is S.NegativeInfinity: return S.Pi*S.ImaginaryUnit / 2 elif arg.is_zero: return S.Infinity elif arg is S.One: return S.Zero elif arg is S.NegativeOne: return S.Pi*S.ImaginaryUnit if arg.is_number: cst_table = { S.ImaginaryUnit: - (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)), -S.ImaginaryUnit: (S.Pi*S.ImaginaryUnit / 2) + log(1 + sqrt(2)), (sqrt(6) - sqrt(2)): S.Pi / 12, (sqrt(2) - sqrt(6)): 11*S.Pi / 12, sqrt(2 - 2/sqrt(5)): S.Pi / 10, -sqrt(2 - 2/sqrt(5)): 9*S.Pi / 10, 2 / sqrt(2 + sqrt(2)): S.Pi / 8, -2 / sqrt(2 + sqrt(2)): 7*S.Pi / 8, 2 / sqrt(3): S.Pi / 6, -2 / sqrt(3): 5*S.Pi / 6, (sqrt(5) - 1): S.Pi / 5, (1 - sqrt(5)): 4*S.Pi / 5, sqrt(2): S.Pi / 4, -sqrt(2): 3*S.Pi / 4, sqrt(2 + 2/sqrt(5)): 3*S.Pi / 10, -sqrt(2 + 2/sqrt(5)): 7*S.Pi / 10, S(2): S.Pi / 3, -S(2): 2*S.Pi / 3, sqrt(2*(2 + sqrt(2))): 3*S.Pi / 8, -sqrt(2*(2 + sqrt(2))): 5*S.Pi / 8, (1 + sqrt(5)): 2*S.Pi / 5, (-1 - sqrt(5)): 3*S.Pi / 5, (sqrt(6) + sqrt(2)): 5*S.Pi / 12, (-sqrt(6) - sqrt(2)): 7*S.Pi / 12, } if arg in cst_table: if arg.is_extended_real: return cst_table[arg]*S.ImaginaryUnit return cst_table[arg] if arg is S.ComplexInfinity: from sympy.calculus.util import AccumBounds return S.ImaginaryUnit*AccumBounds(-S.Pi/2, S.Pi/2) if arg.is_zero: return S.Infinity @staticmethod @cacheit def expansion_term(n, x, *previous_terms): if n == 0: return log(2 / x) elif n < 0 or n % 2 == 1: return S.Zero else: x = sympify(x) if len(previous_terms) > 2 and n > 2: p = previous_terms[-2] return p * (n - 1)**2 // (n // 2)**2 * x**2 / 4 else: k = n // 2 R = RisingFactorial(S.Half , k) * n F = factorial(k) * n // 2 * n // 2 return -1 * R / F * x**n / 4 def inverse(self, argindex=1): """ Returns the inverse of this function. """ return sech def _eval_rewrite_as_log(self, arg, **kwargs): return log(1/arg + sqrt(1/arg - 1) * sqrt(1/arg + 1)) class acsch(InverseHyperbolicFunction): """ The inverse hyperbolic cosecant function. * acsch(x) -> Returns the inverse hyperbolic cosecant of x Examples ======== >>> from sympy import acsch, sqrt, S >>> from sympy.abc import x >>> acsch(x).diff(x) -1/(x**2*sqrt(1 + x**(-2))) >>> acsch(1).diff(x) 0 >>> acsch(1) log(1 + sqrt(2)) >>> acsch(S.ImaginaryUnit) -I*pi/2 >>> acsch(-2*S.ImaginaryUnit) I*pi/6 >>> acsch(S.ImaginaryUnit*(sqrt(6) - sqrt(2))) -5*I*pi/12 References ========== .. [1] https://en.wikipedia.org/wiki/Hyperbolic_function .. [2] http://dlmf.nist.gov/4.37 .. [3] http://functions.wolfram.com/ElementaryFunctions/ArcCsch/ """ def fdiff(self, argindex=1): if argindex == 1: z = self.args[0] return -1/(z**2*sqrt(1 + 1/z**2)) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): arg = sympify(arg) if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.Zero elif arg is S.NegativeInfinity: return S.Zero elif arg.is_zero: return S.ComplexInfinity elif arg is S.One: return log(1 + sqrt(2)) elif arg is S.NegativeOne: return - log(1 + sqrt(2)) if arg.is_number: cst_table = { S.ImaginaryUnit: -S.Pi / 2, S.ImaginaryUnit*(sqrt(2) + sqrt(6)): -S.Pi / 12, S.ImaginaryUnit*(1 + sqrt(5)): -S.Pi / 10, S.ImaginaryUnit*2 / sqrt(2 - sqrt(2)): -S.Pi / 8, S.ImaginaryUnit*2: -S.Pi / 6, S.ImaginaryUnit*sqrt(2 + 2/sqrt(5)): -S.Pi / 5, S.ImaginaryUnit*sqrt(2): -S.Pi / 4, S.ImaginaryUnit*(sqrt(5)-1): -3*S.Pi / 10, S.ImaginaryUnit*2 / sqrt(3): -S.Pi / 3, S.ImaginaryUnit*2 / sqrt(2 + sqrt(2)): -3*S.Pi / 8, S.ImaginaryUnit*sqrt(2 - 2/sqrt(5)): -2*S.Pi / 5, S.ImaginaryUnit*(sqrt(6) - sqrt(2)): -5*S.Pi / 12, S(2): -S.ImaginaryUnit*log((1+sqrt(5))/2), } if arg in cst_table: return cst_table[arg]*S.ImaginaryUnit if arg is S.ComplexInfinity: return S.Zero if arg.is_zero: return S.ComplexInfinity if _coeff_isneg(arg): return -cls(-arg) def inverse(self, argindex=1): """ Returns the inverse of this function. """ return csch def _eval_rewrite_as_log(self, arg, **kwargs): return log(1/arg + sqrt(1/arg**2 + 1))
""" Single trace Analysis """ __author__ = "Yanlong Yin (yyin2@iit.edu)" __version__ = "$Revision: 1.4$" __date__ = "$Date: 02/08/2014 $" __copyright__ = "Copyright (c) 2010-2014 SCS Lab, IIT" __license__ = "Python" import sys, os, string, getopt, gc, multiprocessing from sig import * from access import * from accList import * from prop import * from util import * def detectSignature(filename): # the list contains all the accesses rlist = AccList() wlist = AccList() accList = AccList() # all lines with "accList" are commentted out # because the figure drawing using accList # is replaced with rlist and wlist # open the trace file f = open(filename, 'r') # skip the first several lines # Maybe the skipped lines are table heads for i in range(int(sig._format_prop['skip_lines'])): line = f.readline() # scan the file and put the access item into list i = 0 j = 0 op_index = int(sig._format_prop['op']) debugPrint ('op_index: ', op_index) op = '' # TODO: add while 1 loop here for i in range(sig._range): line = f.readline() if not line: break words = string.split(line) # there might be some blank lines if len(words) < 6: j+=1 continue ## only "READ" and "WRITE" will be saved #if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0: # to test chomob, only use write # if words[-1].count('WRITE') == 0: # j+=1 # continue ## save to list op = words[op_index].upper(); acc = Access(words) if acc.size >= 1: accList.append(acc) if op.count('READ')>0 or op == 'R': debugPrint("one READ") rlist.append(acc) if op.count('WRITE')>0 or op == 'W': debugPrint("one WRITE") wlist.append(acc) ## close the opened file f.close() rlist.trace = filename wlist.trace = filename accList.trace = filename # print the time summary print 'Total read time: ', sig._total_read_time print 'Total write time: ', sig._total_write_time print 'Numbers of operations - ', 'Read: ', len(rlist), ' write: ', len(wlist) ## deal with the list rlist.detect_signature(0, min(sig._range-j-1, len(rlist)-1) ) wlist.detect_signature(0, min(sig._range-j-1, len(wlist)-1) ) ## Done with the whole process of detecting ## Print the whole signature if len(rlist.signatures) > 0 or len(wlist.signatures) > 0: print '----------------------------------------' print 'The following signatures are detected:' if len(rlist.signatures) > 0: rlist.print_signature() rlist.gen_protobuf(sig._out_path) rlist.makeup_output(sig._out_path) if len(wlist.signatures) > 0: wlist.print_signature() wlist.gen_protobuf(sig._out_path) wlist.makeup_output(sig._out_path) #if len(accList) > 0: accList.gen_iorates(sig._out_path) def generateCSVs(single_trace_filename): """Generate the Read/Write Bandwidth figures""" trace_path, trace_filename = os.path.split(single_trace_filename) # the list contains all the accesses rlist = AccList() wlist = AccList() rlistEmpty = 1 wlistEmpty = 1 total_read_count = 0 total_write_count = 0 total_read_time = 0.0 total_write_time = 0.0 # Create and empty each CSV files, write the CSV title line output = os.path.join(sig._out_path, trace_filename + ".read.rate.csv") f = open(output, 'w') f.write("Time,Rate\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".write.rate.csv") f = open(output, 'w') f.write("Time,Rate\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".read.interval.csv") f = open(output, 'w') f.write("Begin,End\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".write.interval.csv") f = open(output, 'w') f.write("Begin,End\n") f.close() output = os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv") f = open(output, 'w') f.write("Time,Size\n") f.close() # open the trace file f = open(single_trace_filename, 'r') # skip the first several lines # Maybe the skipped lines are table heads for i in range(int(sig._format_prop['skip_lines'])): line = f.readline() # scan the file and put the access item into list i = 0 j = 0 eof = 0 # reaching the EOF? op_index = int(sig._format_prop['op']) debugPrint ('op_index: ', op_index) op = '' while 1: # handle 5000 operations once for i in range(sig._range): line = f.readline() if not line: eof = 1 break words = string.split(line) # there might be some blank lines if len(words) < 6: j+=1 continue ## only "READ" and "WRITE" will be saved #if words[-1].count('READ') == 0 and words[-1].count('WRITE') == 0: # to test chomob, only use write # if words[-1].count('WRITE') == 0: # j+=1 # continue ## save to list op = words[op_index].upper(); acc = Access(words) if acc.size >= 1: if op.count('READ')>0 or op == 'R': debugPrint("one READ") rlist.append(acc) total_read_count += 1 total_read_time += acc.endTime - acc.startTime if op.count('WRITE')>0 or op == 'W': debugPrint("one WRITE") wlist.append(acc) total_write_count += 1 total_write_time += acc.endTime - acc.startTime # finish reading a batch of 5000 lines of the trace file # Generate all kinds of CSV files using the rlist and wlist # here the write operation should be "append" # because it's handling 5000 lines each time if (len(rlist) > 0): rlist.toIORStep(trace_filename, 'r') # 'r' for read rlist.toDataAccessHoleSizes(trace_filename, 'r') rlistEmpty = 0 if (len(wlist) > 0): wlist.toIORStep(trace_filename, 'w') # 'w' for write wlistEmpty = 0 # empty the two lists rlist = AccList() wlist = AccList() gc.collect() # garbage collection # reached EOF? exit the "while 1" loop if eof == 1: break ## close the opened file f.close() if (rlistEmpty == 1): readF = open( os.path.join(sig._out_path, trace_filename + ".read.rate.csv"), 'a+') readF.write( "{0},{1}\n".format(0, 0) ) readF.close() readF = open( os.path.join(sig._out_path, trace_filename + ".read.hole.sizes.csv"), 'a+') readF.write( "{0},{1}\n".format(0, 0) ) readF.close() if (wlistEmpty == 1): writeF = open( os.path.join(sig._out_path, trace_filename + ".write.rate.csv"), 'a+') writeF.write( "{0},{1}\n".format(0, 0) ) writeF.close() # TODO: gnuplot for read and write rates # save the statistics information to files output = os.path.join(sig._out_path, trace_filename + ".stat.properties") f = open(output, 'a+') f.write("total_read_time: {0}\n".format(total_read_time)) f.write("total_read_count: {0}\n".format(total_read_count)) f.write("total_write_time: {0}\n".format(total_write_time)) f.write("total_write_count: {0}\n".format(total_write_count)) #f.write("global_total_read_time: {0}\n".format(sig._total_read_time)) #f.write("global_total_write_time: {0}\n".format(sig._total_write_time))
""" This package contains various tools for Japanese NLP tasks, although some may be applicable to any python project. See documentation of each module for details. """ __all__ = [ 'alternations', 'common', 'enum', 'exceptions', 'kana_table', 'maps', 'scripts', 'smart_cache', 'resources', ]
import errno import os import types import typing as t from werkzeug.utils import import_string class ConfigAttribute: """Makes an attribute forward to the config""" def __init__(self, name: str, get_converter: t.Optional[t.Callable] = None) -> None: self.__name__ = name self.get_converter = get_converter def __get__(self, obj: t.Any, owner: t.Any = None) -> t.Any: if obj is None: return self rv = obj.config[self.__name__] if self.get_converter is not None: rv = self.get_converter(rv) return rv def __set__(self, obj: t.Any, value: t.Any) -> None: obj.config[self.__name__] = value class Config(dict): """Works exactly like a dict but provides ways to fill it from files or special dictionaries. There are two common patterns to populate the config. Either you can fill the config from a config file:: app.config.from_pyfile('yourconfig.cfg') Or alternatively you can define the configuration options in the module that calls :meth:`from_object` or provide an import path to a module that should be loaded. It is also possible to tell it to use the same module and with that provide the configuration values just before the call:: DEBUG = True SECRET_KEY = 'development key' app.config.from_object(__name__) In both cases (loading from any Python file or loading from modules), only uppercase keys are added to the config. This makes it possible to use lowercase values in the config file for temporary values that are not added to the config or to define the config keys in the same file that implements the application. Probably the most interesting way to load configurations is from an environment variable pointing to a file:: app.config.from_envvar('YOURAPPLICATION_SETTINGS') In this case before launching the application you have to set this environment variable to the file you want to use. On Linux and OS X use the export statement:: export YOURAPPLICATION_SETTINGS='/path/to/config/file' On windows use `set` instead. :param root_path: path to which files are read relative from. When the config object is created by the application, this is the application's :attr:`~flask.Flask.root_path`. :param defaults: an optional dictionary of default values """ def __init__(self, root_path: str, defaults: t.Optional[dict] = None) -> None: dict.__init__(self, defaults or {}) self.root_path = root_path def from_envvar(self, variable_name: str, silent: bool = False) -> bool: """Loads a configuration from an environment variable pointing to a configuration file. This is basically just a shortcut with nicer error messages for this line of code:: app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) :param variable_name: name of the environment variable :param silent: set to ``True`` if you want silent failure for missing files. :return: bool. ``True`` if able to load config, ``False`` otherwise. """ rv = os.environ.get(variable_name) if not rv: if silent: return False raise RuntimeError( f"The environment variable {variable_name!r} is not set" " and as such configuration could not be loaded. Set" " this variable and make it point to a configuration" " file" ) return self.from_pyfile(rv, silent=silent) def from_pyfile(self, filename: str, silent: bool = False) -> bool: """Updates the values in the config from a Python file. This function behaves as if the file was imported as module with the :meth:`from_object` function. :param filename: the filename of the config. This can either be an absolute filename or a filename relative to the root path. :param silent: set to ``True`` if you want silent failure for missing files. .. versionadded:: 0.7 `silent` parameter. """ filename = os.path.join(self.root_path, filename) d = types.ModuleType("config") d.__file__ = filename try: with open(filename, mode="rb") as config_file: exec(compile(config_file.read(), filename, "exec"), d.__dict__) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise self.from_object(d) return True def from_object(self, obj: t.Union[object, str]) -> None: """Updates the values from the given object. An object can be of one of the following two types: - a string: in this case the object with that name will be imported - an actual object reference: that object is used directly Objects are usually either modules or classes. :meth:`from_object` loads only the uppercase attributes of the module/class. A ``dict`` object will not work with :meth:`from_object` because the keys of a ``dict`` are not attributes of the ``dict`` class. Example of module-based configuration:: app.config.from_object('yourapplication.default_config') from yourapplication import default_config app.config.from_object(default_config) Nothing is done to the object before loading. If the object is a class and has ``@property`` attributes, it needs to be instantiated before being passed to this method. You should not use this function to load the actual configuration but rather configuration defaults. The actual config should be loaded with :meth:`from_pyfile` and ideally from a location not within the package because the package might be installed system wide. See :ref:`config-dev-prod` for an example of class-based configuration using :meth:`from_object`. :param obj: an import name or object """ if isinstance(obj, str): obj = import_string(obj) for key in dir(obj): if key.isupper(): self[key] = getattr(obj, key) def from_file( self, filename: str, load: t.Callable[[t.IO[t.Any]], t.Mapping], silent: bool = False, ) -> bool: """Update the values in the config from a file that is loaded using the ``load`` parameter. The loaded data is passed to the :meth:`from_mapping` method. .. code-block:: python import toml app.config.from_file("config.toml", load=toml.load) :param filename: The path to the data file. This can be an absolute path or relative to the config root path. :param load: A callable that takes a file handle and returns a mapping of loaded data from the file. :type load: ``Callable[[Reader], Mapping]`` where ``Reader`` implements a ``read`` method. :param silent: Ignore the file if it doesn't exist. .. versionadded:: 2.0 """ filename = os.path.join(self.root_path, filename) try: with open(filename) as f: obj = load(f) except OSError as e: if silent and e.errno in (errno.ENOENT, errno.EISDIR): return False e.strerror = f"Unable to load configuration file ({e.strerror})" raise return self.from_mapping(obj) def from_json(self, filename: str, silent: bool = False) -> bool: """Update the values in the config from a JSON file. The loaded data is passed to the :meth:`from_mapping` method. :param filename: The path to the JSON file. This can be an absolute path or relative to the config root path. :param silent: Ignore the file if it doesn't exist. .. deprecated:: 2.0.0 Will be removed in Flask 2.1. Use :meth:`from_file` instead. This was removed early in 2.0.0, was added back in 2.0.1. .. versionadded:: 0.11 """ import warnings from . import json warnings.warn( "'from_json' is deprecated and will be removed in Flask" " 2.1. Use 'from_file(path, json.load)' instead.", DeprecationWarning, stacklevel=2, ) return self.from_file(filename, json.load, silent=silent) def from_mapping( self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any ) -> bool: """Updates the config like :meth:`update` ignoring items with non-upper keys. .. versionadded:: 0.11 """ mappings: t.Dict[str, t.Any] = {} if mapping is not None: mappings.update(mapping) mappings.update(kwargs) for key, value in mappings.items(): if key.isupper(): self[key] = value return True def get_namespace( self, namespace: str, lowercase: bool = True, trim_namespace: bool = True ) -> t.Dict[str, t.Any]: """Returns a dictionary containing a subset of configuration options that match the specified namespace/prefix. Example usage:: app.config['IMAGE_STORE_TYPE'] = 'fs' app.config['IMAGE_STORE_PATH'] = '/var/app/images' app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' image_store_config = app.config.get_namespace('IMAGE_STORE_') The resulting dictionary `image_store_config` would look like:: { 'type': 'fs', 'path': '/var/app/images', 'base_url': 'http://img.website.com' } This is often useful when configuration options map directly to keyword arguments in functions or class constructors. :param namespace: a configuration namespace :param lowercase: a flag indicating if the keys of the resulting dictionary should be lowercase :param trim_namespace: a flag indicating if the keys of the resulting dictionary should not include the namespace .. versionadded:: 0.11 """ rv = {} for k, v in self.items(): if not k.startswith(namespace): continue if trim_namespace: key = k[len(namespace) :] else: key = k if lowercase: key = key.lower() rv[key] = v return rv def __repr__(self) -> str: return f"<{type(self).__name__} {dict.__repr__(self)}>"
from sympy.core.numbers import comp, Rational from sympy.physics.optics.utils import (refraction_angle, fresnel_coefficients, deviation, brewster_angle, critical_angle, lens_makers_formula, mirror_formula, lens_formula, hyperfocal_distance, transverse_magnification) from sympy.physics.optics.medium import Medium from sympy.physics.units import e0 from sympy import symbols, sqrt, Matrix, oo from sympy.geometry.point import Point3D from sympy.geometry.line import Ray3D from sympy.geometry.plane import Plane from sympy.utilities.pytest import raises ae = lambda a, b, n: comp(a, b, 10**-n) def test_refraction_angle(): n1, n2 = symbols('n1, n2') m1 = Medium('m1') m2 = Medium('m2') r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) i = Matrix([1, 1, 1]) n = Matrix([0, 0, 1]) normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1)) P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) assert refraction_angle(r1, 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle([1, 1, 1], 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle((1, 1, 1), 1, 1, n) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, [0, 0, 1]) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, (0, 0, 1)) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, normal_ray) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(i, 1, 1, plane=P) == Matrix([ [ 1], [ 1], [-1]]) assert refraction_angle(r1, 1, 1, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) assert refraction_angle(r1, m1, 1.33, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(Rational(100, 133), Rational(100, 133), -789378201649271*sqrt(3)/1000000000000000)) assert refraction_angle(r1, 1, m2, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) assert refraction_angle(r1, n1, n2, plane=P) == \ Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1))) assert refraction_angle(r1, 1.33, 1, plane=P) == 0 # TIR assert refraction_angle(r1, 1, 1, normal_ray) == \ Ray3D(Point3D(0, 0, 0), direction_ratio=[1, 1, -1]) assert ae(refraction_angle(0.5, 1, 2), 0.24207, 5) assert ae(refraction_angle(0.5, 2, 1), 1.28293, 5) raises(ValueError, lambda: refraction_angle(r1, m1, m2, normal_ray, P)) raises(TypeError, lambda: refraction_angle(m1, m1, m2)) # can add other values for arg[0] raises(TypeError, lambda: refraction_angle(r1, m1, m2, None, i)) raises(TypeError, lambda: refraction_angle(r1, m1, m2, m2)) def test_fresnel_coefficients(): assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.5, 1, 1.33), [0.11163, -0.17138, 0.83581, 0.82862])) assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.5, 1.33, 1), [-0.07726, 0.20482, 1.22724, 1.20482])) m1 = Medium('m1') m2 = Medium('m2', n=2) assert all(ae(i, j, 5) for i, j in zip( fresnel_coefficients(0.3, m1, m2), [0.31784, -0.34865, 0.65892, 0.65135])) ans = [[-0.23563, -0.97184], [0.81648, -0.57738]] got = fresnel_coefficients(0.6, m2, m1) for i, j in zip(got, ans): for a, b in zip(i.as_real_imag(), j): assert ae(a, b, 5) def test_deviation(): n1, n2 = symbols('n1, n2') r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) n = Matrix([0, 0, 1]) i = Matrix([-1, -1, -1]) normal_ray = Ray3D(Point3D(0, 0, 0), Point3D(0, 0, 1)) P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) assert deviation(r1, 1, 1, normal=n) == 0 assert deviation(r1, 1, 1, plane=P) == 0 assert deviation(r1, 1, 1.1, plane=P).evalf(3) + 0.119 < 1e-3 assert deviation(i, 1, 1.1, normal=normal_ray).evalf(3) + 0.119 < 1e-3 assert deviation(r1, 1.33, 1, plane=P) is None # TIR assert deviation(r1, 1, 1, normal=[0, 0, 1]) == 0 assert deviation([-1, -1, -1], 1, 1, normal=[0, 0, 1]) == 0 assert ae(deviation(0.5, 1, 2), -0.25793, 5) assert ae(deviation(0.5, 2, 1), 0.78293, 5) def test_brewster_angle(): m1 = Medium('m1', n=1) m2 = Medium('m2', n=1.33) assert ae(brewster_angle(m1, m2), 0.93, 2) m1 = Medium('m1', permittivity=e0, n=1) m2 = Medium('m2', permittivity=e0, n=1.33) assert ae(brewster_angle(m1, m2), 0.93, 2) assert ae(brewster_angle(1, 1.33), 0.93, 2) def test_critical_angle(): m1 = Medium('m1', n=1) m2 = Medium('m2', n=1.33) assert ae(critical_angle(m2, m1), 0.85, 2) def test_lens_makers_formula(): n1, n2 = symbols('n1, n2') m1 = Medium('m1', permittivity=e0, n=1) m2 = Medium('m2', permittivity=e0, n=1.33) assert lens_makers_formula(n1, n2, 10, -10) == 5*n2/(n1 - n2) assert ae(lens_makers_formula(m1, m2, 10, -10), -20.15, 2) assert ae(lens_makers_formula(1.33, 1, 10, -10), 15.15, 2) def test_mirror_formula(): u, v, f = symbols('u, v, f') assert mirror_formula(focal_length=f, u=u) == f*u/(-f + u) assert mirror_formula(focal_length=f, v=v) == f*v/(-f + v) assert mirror_formula(u=u, v=v) == u*v/(u + v) assert mirror_formula(u=oo, v=v) == v assert mirror_formula(u=oo, v=oo) is oo assert mirror_formula(focal_length=oo, u=u) == -u assert mirror_formula(u=u, v=oo) == u assert mirror_formula(focal_length=oo, v=oo) is oo assert mirror_formula(focal_length=f, v=oo) == f assert mirror_formula(focal_length=oo, v=v) == -v assert mirror_formula(focal_length=oo, u=oo) is oo assert mirror_formula(focal_length=f, u=oo) == f assert mirror_formula(focal_length=oo, u=u) == -u raises(ValueError, lambda: mirror_formula(focal_length=f, u=u, v=v)) def test_lens_formula(): u, v, f = symbols('u, v, f') assert lens_formula(focal_length=f, u=u) == f*u/(f + u) assert lens_formula(focal_length=f, v=v) == f*v/(f - v) assert lens_formula(u=u, v=v) == u*v/(u - v) assert lens_formula(u=oo, v=v) == v assert lens_formula(u=oo, v=oo) is oo assert lens_formula(focal_length=oo, u=u) == u assert lens_formula(u=u, v=oo) == -u assert lens_formula(focal_length=oo, v=oo) is -oo assert lens_formula(focal_length=oo, v=v) == v assert lens_formula(focal_length=f, v=oo) == -f assert lens_formula(focal_length=oo, u=oo) is oo assert lens_formula(focal_length=oo, u=u) == u assert lens_formula(focal_length=f, u=oo) == f raises(ValueError, lambda: lens_formula(focal_length=f, u=u, v=v)) def test_hyperfocal_distance(): f, N, c = symbols('f, N, c') assert hyperfocal_distance(f=f, N=N, c=c) == f**2/(N*c) assert ae(hyperfocal_distance(f=0.5, N=8, c=0.0033), 9.47, 2) def test_transverse_magnification(): si, so = symbols('si, so') assert transverse_magnification(si, so) == -si/so assert transverse_magnification(30, 15) == -2
"""Provides a layer of abstraction for the issue tracker API.""" import logging from apiclient import discovery from apiclient import errors import httplib2 _DISCOVERY_URI = ('https://monorail-prod.appspot.com' '/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest') class IssueTrackerService(object): """Class for updating bug issues.""" def __init__(self, http=None, additional_credentials=None): """Initializes an object for adding and updating bugs on the issue tracker. This object can be re-used to make multiple requests without calling apliclient.discovery.build multiple times. This class makes requests to the Monorail API. API explorer: https://goo.gl/xWd0dX Args: http: A Http object to pass to request.execute; this should be an Http object that's already authenticated via OAuth2. additional_credentials: A credentials object, e.g. an instance of oauth2client.client.SignedJwtAssertionCredentials. This includes the email and secret key of a service account. """ self._http = http or httplib2.Http() if additional_credentials: additional_credentials.authorize(self._http) self._service = discovery.build( 'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI, http=self._http) def AddBugComment(self, bug_id, comment, status=None, cc_list=None, merge_issue=None, labels=None, owner=None): """Adds a comment with the bisect results to the given bug. Args: bug_id: Bug ID of the issue to update. comment: Bisect results information. status: A string status for bug, e.g. Assigned, Duplicate, WontFix, etc. cc_list: List of email addresses of users to add to the CC list. merge_issue: ID of the issue to be merged into; specifying this option implies that the status should be "Duplicate". labels: List of labels for bug. owner: Owner of the bug. Returns: True if successful, False otherwise. """ if not bug_id or bug_id < 0: return False body = {'content': comment} updates = {} # Mark issue as duplicate when relevant bug ID is found in the datastore. # Avoid marking an issue as duplicate of itself. if merge_issue and int(merge_issue) != bug_id: status = 'Duplicate' updates['mergedInto'] = merge_issue logging.info('Bug %s marked as duplicate of %s', bug_id, merge_issue) if status: updates['status'] = status if cc_list: updates['cc'] = cc_list if labels: updates['labels'] = labels if owner: updates['owner'] = owner body['updates'] = updates return self._MakeCommentRequest(bug_id, body) def List(self, **kwargs): """Make a request to the issue tracker to list bugs.""" request = self._service.issues().list(projectId='chromium', **kwargs) return self._ExecuteRequest(request) def _MakeCommentRequest(self, bug_id, body): """Make a request to the issue tracker to update a bug.""" request = self._service.issues().comments().insert( projectId='chromium', issueId=bug_id, body=body) response = self._ExecuteRequest(request) if not response: logging.error('Error updating bug %s with body %s', bug_id, body) return False return True def NewBug(self, title, description, labels=None, components=None, owner=None): """Creates a new bug. Args: title: The short title text of the bug. description: The body text for the bug. labels: Starting labels for the bug. components: Starting components for the bug. owner: Starting owner account name. Returns: The new bug ID if successfully created, or None. """ body = { 'title': title, 'summary': title, 'description': description, 'labels': labels or [], 'components': components or [], 'status': 'Assigned', } if owner: body['owner'] = {'name': owner} return self._MakeCreateRequest(body) def _MakeCreateRequest(self, body): """Makes a request to create a new bug. Args: body: The request body parameter dictionary. Returns: A bug ID if successful, or None otherwise. """ request = self._service.issues().insert(projectId='chromium', body=body) response = self._ExecuteRequest(request) if response and 'id' in response: return response['id'] return None def GetLastBugCommentsAndTimestamp(self, bug_id): """Gets last updated comments and timestamp in the given bug. Args: bug_id: Bug ID of the issue to update. Returns: A dictionary with last comment and timestamp, or None on failure. """ if not bug_id or bug_id < 0: return None response = self._MakeGetCommentsRequest(bug_id) if response and all(v in response.keys() for v in ['totalResults', 'items']): bug_comments = response.get('items')[response.get('totalResults') - 1] if bug_comments.get('content') and bug_comments.get('published'): return { 'comment': bug_comments.get('content'), 'timestamp': bug_comments.get('published') } return None def _MakeGetCommentsRequest(self, bug_id): """Make a request to the issue tracker to get comments in the bug.""" # TODO (prasadv): By default the max number of comments retrieved in # one request is 100. Since bisect-fyi jobs may have more then 100 # comments for now we set this maxResults count as 10000. # Remove this max count once we find a way to clear old comments # on FYI issues. request = self._service.issues().comments().list( projectId='chromium', issueId=bug_id, maxResults=10000) return self._ExecuteRequest(request) def _ExecuteRequest(self, request): """Make a request to the issue tracker. Args: request: The request object, which has a execute method. Returns: The response if there was one, or else None. """ try: response = request.execute(http=self._http) return response except errors.HttpError as e: logging.error(e) return None
from flask import render_template, redirect, url_for, flash, abort from purchasing.decorators import requires_roles from purchasing.data.stages import Stage from purchasing.data.flows import Flow from purchasing.conductor.forms import FlowForm, NewFlowForm from purchasing.conductor.manager import blueprint @blueprint.route('/flow/new', methods=['GET', 'POST']) @requires_roles('conductor', 'admin', 'superadmin') def new_flow(): '''Create a new flow :status 200: Render the new flow template :status 302: Try to create a new flow using the :py:class:`~purchasing.conductor.forms.NewFlowForm`, redirect to the flows list view if successful ''' stages = Stage.choices_factory() form = NewFlowForm(stages=stages) if form.validate_on_submit(): stage_order = [] for entry in form.stage_order.entries: # try to evaluate the return value as an ID try: stage_id = int(entry.data) # otherwise it's a new stage except ValueError: new_stage = Stage.create(name=entry.data) stage_id = new_stage.id stage_order.append(stage_id) Flow.create(flow_name=form.flow_name.data, stage_order=stage_order) flash('Flow created successfully!', 'alert-success') return redirect(url_for('conductor.flows_list')) return render_template('conductor/flows/new.html', stages=stages, form=form) @blueprint.route('/flows') @requires_roles('conductor', 'admin', 'superadmin') def flows_list(): '''List all flows :status 200: Render the all flows list template ''' flows = Flow.query.order_by(Flow.flow_name).all() active, archived = [], [] for flow in flows: if flow.is_archived: archived.append(flow) else: active.append(flow) return render_template('conductor/flows/browse.html', active=active, archived=archived) @blueprint.route('/flow/<int:flow_id>', methods=['GET', 'POST']) @requires_roles('conductor', 'admin', 'superadmin') def flow_detail(flow_id): '''View/edit a flow's details :status 200: Render the flow edit template :status 302: Post changes to the a flow using the submitted :py:class:`~purchasing.conductor.forms.FlowForm`, redirect back to the current flow's detail page if successful ''' flow = Flow.query.get(flow_id) if flow: form = FlowForm(obj=flow) if form.validate_on_submit(): flow.update( flow_name=form.data['flow_name'], is_archived=form.data['is_archived'] ) flash('Flow successfully updated', 'alert-success') return redirect(url_for('conductor.flow_detail', flow_id=flow.id)) return render_template('conductor/flows/edit.html', form=form, flow=flow) abort(404)
import threading from collections import defaultdict from funcy import once, decorator from django.db import DEFAULT_DB_ALIAS, DatabaseError from django.db.backends.utils import CursorWrapper from django.db.transaction import Atomic, get_connection, on_commit from .utils import monkey_mix __all__ = ('queue_when_in_transaction', 'install_cacheops_transaction_support', 'transaction_states') class TransactionState(list): def begin(self): self.append({'cbs': [], 'dirty': False}) def commit(self): context = self.pop() if self: # savepoint self[-1]['cbs'].extend(context['cbs']) self[-1]['dirty'] = self[-1]['dirty'] or context['dirty'] else: # transaction for func, args, kwargs in context['cbs']: func(*args, **kwargs) def rollback(self): self.pop() def push(self, item): self[-1]['cbs'].append(item) def mark_dirty(self): self[-1]['dirty'] = True def is_dirty(self): return any(context['dirty'] for context in self) class TransactionStates(threading.local): def __init__(self): super(TransactionStates, self).__init__() self._states = defaultdict(TransactionState) def __getitem__(self, key): return self._states[key or DEFAULT_DB_ALIAS] def is_dirty(self, dbs): return any(self[db].is_dirty() for db in dbs) transaction_states = TransactionStates() @decorator def queue_when_in_transaction(call): if transaction_states[call.using]: transaction_states[call.using].push((call, (), {})) else: return call() class AtomicMixIn(object): def __enter__(self): entering = not transaction_states[self.using] transaction_states[self.using].begin() self._no_monkey.__enter__(self) if entering: on_commit(transaction_states[self.using].commit, self.using) def __exit__(self, exc_type, exc_value, traceback): connection = get_connection(self.using) try: self._no_monkey.__exit__(self, exc_type, exc_value, traceback) except DatabaseError: transaction_states[self.using].rollback() else: if not connection.closed_in_transaction and exc_type is None and \ not connection.needs_rollback: if transaction_states[self.using]: transaction_states[self.using].commit() else: transaction_states[self.using].rollback() class CursorWrapperMixin(object): def callproc(self, procname, params=None): result = self._no_monkey.callproc(self, procname, params) if transaction_states[self.db.alias]: transaction_states[self.db.alias].mark_dirty() return result def execute(self, sql, params=None): result = self._no_monkey.execute(self, sql, params) if transaction_states[self.db.alias] and is_sql_dirty(sql): transaction_states[self.db.alias].mark_dirty() return result def executemany(self, sql, param_list): result = self._no_monkey.executemany(self, sql, param_list) if transaction_states[self.db.alias] and is_sql_dirty(sql): transaction_states[self.db.alias].mark_dirty() return result CHARS = set('abcdefghijklmnoprqstuvwxyz_') def is_sql_dirty(sql): # This should not happen as using bytes in Python 3 is against db protocol, # but some people will pass it anyway if isinstance(sql, bytes): sql = sql.decode() # NOTE: not using regex here for speed sql = sql.lower() for action in ('update', 'insert', 'delete'): p = sql.find(action) if p == -1: continue start, end = p - 1, p + len(action) if (start < 0 or sql[start] not in CHARS) and (end >= len(sql) or sql[end] not in CHARS): return True else: return False @once def install_cacheops_transaction_support(): monkey_mix(Atomic, AtomicMixIn) monkey_mix(CursorWrapper, CursorWrapperMixin)
""" flaskbb.management.views ~~~~~~~~~~~~~~~~~~~~~~~~ This module handles the management views. :copyright: (c) 2014 by the FlaskBB Team. :license: BSD, see LICENSE for more details. """ import sys from flask import (Blueprint, current_app, request, redirect, url_for, flash, jsonify, __version__ as flask_version) from flask_login import current_user, login_fresh from flask_plugins import get_all_plugins, get_plugin, get_plugin_from_all from flask_babelplus import gettext as _ from flask_allows import Permission, Not from flaskbb import __version__ as flaskbb_version from flaskbb._compat import iteritems from flaskbb.forum.forms import UserSearchForm from flaskbb.utils.settings import flaskbb_config from flaskbb.utils.requirements import (IsAtleastModerator, IsAdmin, CanBanUser, CanEditUser, IsAtleastSuperModerator) from flaskbb.extensions import db, allows from flaskbb.utils.helpers import (render_template, time_diff, time_utcnow, get_online_users) from flaskbb.user.models import Guest, User, Group from flaskbb.forum.models import Post, Topic, Forum, Category, Report from flaskbb.management.models import Setting, SettingsGroup from flaskbb.management.forms import (AddUserForm, EditUserForm, AddGroupForm, EditGroupForm, EditForumForm, AddForumForm, CategoryForm) management = Blueprint("management", __name__) @management.before_request def check_fresh_login(): """Checks if the login is fresh for the current user, otherwise the user has to reauthenticate.""" if not login_fresh(): return current_app.login_manager.needs_refresh() @management.route("/") @allows.requires(IsAtleastModerator) def overview(): # user and group stats banned_users = User.query.filter( Group.banned == True, Group.id == User.primary_group_id ).count() if not current_app.config["REDIS_ENABLED"]: online_users = User.query.filter(User.lastseen >= time_diff()).count() else: online_users = len(get_online_users()) stats = { # user stats "all_users": User.query.count(), "banned_users": banned_users, "online_users": online_users, "all_groups": Group.query.count(), # forum stats "report_count": Report.query.count(), "topic_count": Topic.query.count(), "post_count": Post.query.count(), # misc stats "plugins": get_all_plugins(), "python_version": "%s.%s" % (sys.version_info[0], sys.version_info[1]), "flask_version": flask_version, "flaskbb_version": flaskbb_version } return render_template("management/overview.html", **stats) @management.route("/settings", methods=["GET", "POST"]) @management.route("/settings/<path:slug>", methods=["GET", "POST"]) @allows.requires(IsAdmin) def settings(slug=None): slug = slug if slug else "general" # get the currently active group active_group = SettingsGroup.query.filter_by(key=slug).first_or_404() # get all groups - used to build the navigation all_groups = SettingsGroup.query.all() SettingsForm = Setting.get_form(active_group) old_settings = Setting.get_settings(active_group) new_settings = {} form = SettingsForm() if form.validate_on_submit(): for key, values in iteritems(old_settings): try: # check if the value has changed if values['value'] == form[key].data: continue else: new_settings[key] = form[key].data except KeyError: pass Setting.update(settings=new_settings, app=current_app) flash(_("Settings saved."), "success") else: for key, values in iteritems(old_settings): try: form[key].data = values['value'] except (KeyError, ValueError): pass return render_template("management/settings.html", form=form, all_groups=all_groups, active_group=active_group) @management.route("/users", methods=['GET', 'POST']) @allows.requires(IsAtleastModerator) def users(): page = request.args.get("page", 1, type=int) search_form = UserSearchForm() if search_form.validate(): users = search_form.get_results().\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/users.html", users=users, search_form=search_form) users = User.query. \ order_by(User.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/users.html", users=users, search_form=search_form) @management.route("/users/<int:user_id>/edit", methods=["GET", "POST"]) @allows.requires(IsAtleastModerator) def edit_user(user_id): user = User.query.filter_by(id=user_id).first_or_404() if not Permission(CanEditUser, identity=current_user): flash(_("You are not allowed to edit this user."), "danger") return redirect(url_for("management.users")) member_group = db.and_(*[db.not_(getattr(Group, p)) for p in ['admin', 'mod', 'super_mod', 'banned', 'guest']]) filt = db.or_( Group.id.in_(g.id for g in current_user.groups), member_group ) if Permission(IsAtleastSuperModerator, identity=current_user): filt = db.or_(filt, Group.mod) if Permission(IsAdmin, identity=current_user): filt = db.or_(filt, Group.admin, Group.super_mod) if Permission(CanBanUser, identity=current_user): filt = db.or_(filt, Group.banned) group_query = Group.query.filter(filt) form = EditUserForm(user) form.primary_group.query = group_query form.secondary_groups.query = group_query if form.validate_on_submit(): form.populate_obj(user) user.primary_group_id = form.primary_group.data.id # Don't override the password if form.password.data: user.password = form.password.data user.save(groups=form.secondary_groups.data) flash(_("User updated."), "success") return redirect(url_for("management.edit_user", user_id=user.id)) return render_template("management/user_form.html", form=form, title=_("Edit User")) @management.route("/users/delete", methods=["POST"]) @management.route("/users/<int:user_id>/delete", methods=["POST"]) @allows.requires(IsAdmin) def delete_user(user_id=None): # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] for user in User.query.filter(User.id.in_(ids)).all(): # do not delete current user if current_user.id == user.id: continue if user.delete(): data.append({ "id": user.id, "type": "delete", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} users deleted.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() if current_user.id == user.id: flash(_("You cannot delete yourself.", "danger")) return redirect(url_for("management.users")) user.delete() flash(_("User deleted."), "success") return redirect(url_for("management.users")) @management.route("/users/add", methods=["GET", "POST"]) @allows.requires(IsAdmin) def add_user(): form = AddUserForm() if form.validate_on_submit(): form.save() flash(_("User added."), "success") return redirect(url_for("management.users")) return render_template("management/user_form.html", form=form, title=_("Add User")) @management.route("/users/banned", methods=["GET", "POST"]) @allows.requires(IsAtleastModerator) def banned_users(): page = request.args.get("page", 1, type=int) search_form = UserSearchForm() users = User.query.filter( Group.banned == True, Group.id == User.primary_group_id ).paginate(page, flaskbb_config['USERS_PER_PAGE'], False) if search_form.validate(): users = search_form.get_results().\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/banned_users.html", users=users, search_form=search_form) return render_template("management/banned_users.html", users=users, search_form=search_form) @management.route("/users/ban", methods=["POST"]) @management.route("/users/<int:user_id>/ban", methods=["POST"]) @allows.requires(IsAtleastModerator) def ban_user(user_id=None): if not Permission(CanBanUser, identity=current_user): flash(_("You do not have the permissions to ban this user."), "danger") return redirect(url_for("management.overview")) # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] users = User.query.filter(User.id.in_(ids)).all() for user in users: # don't let a user ban himself and do not allow a moderator to ban # a admin user if ( current_user.id == user.id or Permission(IsAdmin, identity=user) and Permission(Not(IsAdmin), current_user) ): continue elif user.ban(): data.append({ "id": user.id, "type": "ban", "reverse": "unban", "reverse_name": _("Unban"), "reverse_url": url_for("management.unban_user", user_id=user.id) }) return jsonify( message="{} users banned.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() # Do not allow moderators to ban admins if Permission(IsAdmin, identity=user) and \ Permission(Not(IsAdmin), identity=current_user): flash(_("A moderator cannot ban an admin user."), "danger") return redirect(url_for("management.overview")) if not current_user.id == user.id and user.ban(): flash(_("User is now banned."), "success") else: flash(_("Could not ban user."), "danger") return redirect(url_for("management.banned_users")) @management.route("/users/unban", methods=["POST"]) @management.route("/users/<int:user_id>/unban", methods=["POST"]) @allows.requires(IsAtleastModerator) def unban_user(user_id=None): if not Permission(CanBanUser, identity=current_user): flash(_("You do not have the permissions to unban this user."), "danger") return redirect(url_for("management.overview")) # ajax request if request.is_xhr: ids = request.get_json()["ids"] data = [] for user in User.query.filter(User.id.in_(ids)).all(): if user.unban(): data.append({ "id": user.id, "type": "unban", "reverse": "ban", "reverse_name": _("Ban"), "reverse_url": url_for("management.ban_user", user_id=user.id) }) return jsonify( message="{} users unbanned.".format(len(data)), category="success", data=data, status=200 ) user = User.query.filter_by(id=user_id).first_or_404() if user.unban(): flash(_("User is now unbanned."), "success") else: flash(_("Could not unban user."), "danger") return redirect(url_for("management.banned_users")) @management.route("/reports") @allows.requires(IsAtleastModerator) def reports(): page = request.args.get("page", 1, type=int) reports = Report.query.\ order_by(Report.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/reports.html", reports=reports) @management.route("/reports/unread") @allows.requires(IsAtleastModerator) def unread_reports(): page = request.args.get("page", 1, type=int) reports = Report.query.\ filter(Report.zapped == None).\ order_by(Report.id.desc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/unread_reports.html", reports=reports) @management.route("/reports/<int:report_id>/markread", methods=["POST"]) @management.route("/reports/markread", methods=["POST"]) @allows.requires(IsAtleastModerator) def report_markread(report_id=None): # AJAX request if request.is_xhr: ids = request.get_json()["ids"] data = [] for report in Report.query.filter(Report.id.in_(ids)).all(): report.zapped_by = current_user.id report.zapped = time_utcnow() report.save() data.append({ "id": report.id, "type": "read", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} reports marked as read.".format(len(data)), category="success", data=data, status=200 ) # mark single report as read if report_id: report = Report.query.filter_by(id=report_id).first_or_404() if report.zapped: flash(_("Report %(id)s is already marked as read.", id=report.id), "success") return redirect(url_for("management.reports")) report.zapped_by = current_user.id report.zapped = time_utcnow() report.save() flash(_("Report %(id)s marked as read.", id=report.id), "success") return redirect(url_for("management.reports")) # mark all as read reports = Report.query.filter(Report.zapped == None).all() report_list = [] for report in reports: report.zapped_by = current_user.id report.zapped = time_utcnow() report_list.append(report) db.session.add_all(report_list) db.session.commit() flash(_("All reports were marked as read."), "success") return redirect(url_for("management.reports")) @management.route("/groups") @allows.requires(IsAdmin) def groups(): page = request.args.get("page", 1, type=int) groups = Group.query.\ order_by(Group.id.asc()).\ paginate(page, flaskbb_config['USERS_PER_PAGE'], False) return render_template("management/groups.html", groups=groups) @management.route("/groups/<int:group_id>/edit", methods=["GET", "POST"]) @allows.requires(IsAdmin) def edit_group(group_id): group = Group.query.filter_by(id=group_id).first_or_404() form = EditGroupForm(group) if form.validate_on_submit(): form.populate_obj(group) group.save() if group.guest: Guest.invalidate_cache() flash(_("Group updated."), "success") return redirect(url_for("management.groups", group_id=group.id)) return render_template("management/group_form.html", form=form, title=_("Edit Group")) @management.route("/groups/<int:group_id>/delete", methods=["POST"]) @management.route("/groups/delete", methods=["POST"]) @allows.requires(IsAdmin) def delete_group(group_id=None): if request.is_xhr: ids = request.get_json()["ids"] if not (set(ids) & set(["1", "2", "3", "4", "5"])): data = [] for group in Group.query.filter(Group.id.in_(ids)).all(): group.delete() data.append({ "id": group.id, "type": "delete", "reverse": False, "reverse_name": None, "reverse_url": None }) return jsonify( message="{} groups deleted.".format(len(data)), category="success", data=data, status=200 ) return jsonify( message=_("You cannot delete one of the standard groups."), category="danger", data=None, status=404 ) if group_id is not None: if group_id <= 5: # there are 5 standard groups flash(_("You cannot delete the standard groups. " "Try renaming it instead.", "danger")) return redirect(url_for("management.groups")) group = Group.query.filter_by(id=group_id).first_or_404() group.delete() flash(_("Group deleted."), "success") return redirect(url_for("management.groups")) flash(_("No group chosen."), "danger") return redirect(url_for("management.groups")) @management.route("/groups/add", methods=["GET", "POST"]) @allows.requires(IsAdmin) def add_group(): form = AddGroupForm() if form.validate_on_submit(): form.save() flash(_("Group added."), "success") return redirect(url_for("management.groups")) return render_template("management/group_form.html", form=form, title=_("Add Group")) @management.route("/forums") @allows.requires(IsAdmin) def forums(): categories = Category.query.order_by(Category.position.asc()).all() return render_template("management/forums.html", categories=categories) @management.route("/forums/<int:forum_id>/edit", methods=["GET", "POST"]) @allows.requires(IsAdmin) def edit_forum(forum_id): forum = Forum.query.filter_by(id=forum_id).first_or_404() form = EditForumForm(forum) if form.validate_on_submit(): form.save() flash(_("Forum updated."), "success") return redirect(url_for("management.edit_forum", forum_id=forum.id)) else: if forum.moderators: form.moderators.data = ",".join([ user.username for user in forum.moderators ]) else: form.moderators.data = None return render_template("management/forum_form.html", form=form, title=_("Edit Forum")) @management.route("/forums/<int:forum_id>/delete", methods=["POST"]) @allows.requires(IsAdmin) def delete_forum(forum_id): forum = Forum.query.filter_by(id=forum_id).first_or_404() involved_users = User.query.filter(Topic.forum_id == forum.id, Post.user_id == User.id).all() forum.delete(involved_users) flash(_("Forum deleted."), "success") return redirect(url_for("management.forums")) @management.route("/forums/add", methods=["GET", "POST"]) @management.route("/forums/<int:category_id>/add", methods=["GET", "POST"]) @allows.requires(IsAdmin) def add_forum(category_id=None): form = AddForumForm() if form.validate_on_submit(): form.save() flash(_("Forum added."), "success") return redirect(url_for("management.forums")) else: form.groups.data = Group.query.order_by(Group.id.asc()).all() if category_id: category = Category.query.filter_by(id=category_id).first() form.category.data = category return render_template("management/forum_form.html", form=form, title=_("Add Forum")) @management.route("/category/add", methods=["GET", "POST"]) @allows.requires(IsAdmin) def add_category(): form = CategoryForm() if form.validate_on_submit(): form.save() flash(_("Category added."), "success") return redirect(url_for("management.forums")) return render_template("management/category_form.html", form=form, title=_("Add Category")) @management.route("/category/<int:category_id>/edit", methods=["GET", "POST"]) @allows.requires(IsAdmin) def edit_category(category_id): category = Category.query.filter_by(id=category_id).first_or_404() form = CategoryForm(obj=category) if form.validate_on_submit(): form.populate_obj(category) flash(_("Category updated."), "success") category.save() return render_template("management/category_form.html", form=form, title=_("Edit Category")) @management.route("/category/<int:category_id>/delete", methods=["POST"]) @allows.requires(IsAdmin) def delete_category(category_id): category = Category.query.filter_by(id=category_id).first_or_404() involved_users = User.query.filter(Forum.category_id == category.id, Topic.forum_id == Forum.id, Post.user_id == User.id).all() category.delete(involved_users) flash(_("Category with all associated forums deleted."), "success") return redirect(url_for("management.forums")) @management.route("/plugins") @allows.requires(IsAdmin) def plugins(): plugins = get_all_plugins() return render_template("management/plugins.html", plugins=plugins) @management.route("/plugins/<path:plugin>/enable", methods=["POST"]) @allows.requires(IsAdmin) def enable_plugin(plugin): plugin = get_plugin_from_all(plugin) if plugin.enabled: flash(_("Plugin %(plugin)s is already enabled.", plugin=plugin.name), "info") return redirect(url_for("management.plugins")) try: plugin.enable() flash(_("Plugin %(plugin)s enabled. Please restart FlaskBB now.", plugin=plugin.name), "success") except OSError: flash(_("It seems that FlaskBB does not have enough filesystem " "permissions. Try removing the 'DISABLED' file by " "yourself instead."), "danger") return redirect(url_for("management.plugins")) @management.route("/plugins/<path:plugin>/disable", methods=["POST"]) @allows.requires(IsAdmin) def disable_plugin(plugin): try: plugin = get_plugin(plugin) except KeyError: flash(_("Plugin %(plugin)s not found.", plugin=plugin.name), "danger") return redirect(url_for("management.plugins")) try: plugin.disable() flash(_("Plugin %(plugin)s disabled. Please restart FlaskBB now.", plugin=plugin.name), "success") except OSError: flash(_("It seems that FlaskBB does not have enough filesystem " "permissions. Try creating the 'DISABLED' file by " "yourself instead."), "danger") return redirect(url_for("management.plugins")) @management.route("/plugins/<path:plugin>/uninstall", methods=["POST"]) @allows.requires(IsAdmin) def uninstall_plugin(plugin): plugin = get_plugin_from_all(plugin) if plugin.uninstallable: plugin.uninstall() Setting.invalidate_cache() flash(_("Plugin has been uninstalled."), "success") else: flash(_("Cannot uninstall plugin."), "danger") return redirect(url_for("management.plugins")) @management.route("/plugins/<path:plugin>/install", methods=["POST"]) @allows.requires(IsAdmin) def install_plugin(plugin): plugin = get_plugin_from_all(plugin) if plugin.installable and not plugin.uninstallable: plugin.install() Setting.invalidate_cache() flash(_("Plugin has been installed."), "success") else: flash(_("Cannot install plugin."), "danger") return redirect(url_for("management.plugins"))
""" Display current network and ip address for newer Huwei modems. It is tested for Huawei E3276 (usb-id 12d1:1506) aka Telekom Speed Stick LTE III but may work on other devices, too. DEPENDENCIES: - netifaces - pyserial Configuration parameters: - baudrate : There should be no need to configure this, but feel free to experiment. Default is 115200. - cache_timeout : How often we refresh this module in seconds. Default is 5. - consider_3G_degraded : If set to True, only 4G-networks will be considered 'good'; 3G connections are shown as 'degraded', which is yellow by default. Mostly useful if you want to keep track of where there is a 4G connection. Default is False. - format_down : What to display when the modem is not plugged in Default is: 'WWAN: down' - format_error : What to display when modem can't be accessed. Default is 'WWAN: {error}' - format_no_service : What to display when the modem does not have a network connection. This allows to omit the then meaningless network generation. Therefore the default is 'WWAN: ({status}) {ip}' - format_up : What to display upon regular connection Default is 'WWAN: ({status}/{netgen}) {ip}' - interface : The default interface to obtain the IP address from. For wvdial this is most likely ppp0. For netctl it can be different. Default is: ppp0 - modem : The device to send commands to. Default is - modem_timeout : The timespan betwenn querying the modem and collecting the response. Default is 0.4 (which should be sufficient) @author Timo Kohorst timo@kohorst-online.com PGP: B383 6AE6 6B46 5C45 E594 96AB 89D2 209D DBF3 2BB5 """ import subprocess import netifaces as ni import os import stat import serial from time import time, sleep class Py3status: baudrate = 115200 cache_timeout = 5 consider_3G_degraded = False format_down = 'WWAN: down' format_error = 'WWAN: {error}' format_no_service = 'WWAN: {status} {ip}' format_up = 'WWAN: {status} ({netgen}) {ip}' interface = "ppp0" modem = "/dev/ttyUSB1" modem_timeout = 0.4 def wwan_status(self, i3s_output_list, i3s_config): query = "AT^SYSINFOEX" target_line = "^SYSINFOEX" # Set up the highest network generation to display as degraded if self.consider_3G_degraded: degraded_netgen = 3 else: degraded_netgen = 2 response = {} response['cached_until'] = time() + self.cache_timeout # Check if path exists and is a character device if os.path.exists(self.modem) and stat.S_ISCHR(os.stat( self.modem).st_mode): print("Found modem " + self.modem) try: ser = serial.Serial( port=self.modem, baudrate=self.baudrate, # Values below work for my modem. Not sure if # they neccessarily work for all modems parity=serial.PARITY_ODD, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS) if ser.isOpen(): ser.close() ser.open() ser.write((query + "\r").encode()) print("Issued query to " + self.modem) sleep(self.modem_timeout) n = ser.inWaiting() modem_response = ser.read(n) ser.close() except: # This will happen... # 1) in the short timespan between the creation of the device node # and udev changing the permissions. If this message persists, # double check if you are using the proper device file # 2) if/when you unplug the device PermissionError print("Permission error") response['full_text'] = self.format_error.format( error="no access to " + self.modem) response['color'] = i3s_config['color_bad'] return response # Dissect response for line in modem_response.decode("utf-8").split('\n'): print(line) if line.startswith(target_line): # Determine IP once the modem responds ip = self._get_ip(self.interface) if not ip: ip = "no ip" modem_answer = line.split(',') netgen = len(modem_answer[-2]) + 1 netmode = modem_answer[-1].rstrip()[1:-1] if netmode == "NO SERVICE": response['full_text'] = self.format_no_service.format( status=netmode, ip=ip) response['color'] = i3s_config['color_bad'] else: response['full_text'] = self.format_up.format( status=netmode, netgen=str(netgen) + "G", ip=ip) if netgen <= degraded_netgen: response['color'] = i3s_config['color_degraded'] else: response['color'] = i3s_config['color_good'] elif line.startswith("COMMAND NOT SUPPORT") or line.startswith( "ERROR"): response['color'] = i3s_config['color_bad'] response['full_text'] = self.format_error.format( error="unsupported modem") else: # Outputs can be multiline, so just try the next one pass else: print(self.modem + " not found") response['color'] = i3s_config['color_bad'] response['full_text'] = self.format_down return response def _get_ip(self, interface): """ Returns the interface's IPv4 address if device exists and has a valid ip address. Otherwise, returns an empty string """ if interface in ni.interfaces(): addresses = ni.ifaddresses(interface) if ni.AF_INET in addresses: return addresses[ni.AF_INET][0]['addr'] return "" if __name__ == "__main__": from time import sleep x = Py3status() config = { 'color_good': '#00FF00', 'color_bad': '#FF0000', 'color_degraded': '#FFFF00', } while True: print(x.wwan_status([], config)) sleep(1)
"""Schedule models. Much of this module is derived from the work of Eldarion on the `Symposion <https://github.com/pinax/symposion>`_ project. Copyright (c) 2010-2014, Eldarion, Inc. and contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Eldarion, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ from bisect import bisect_left from itertools import tee from cached_property import cached_property from sqlalchemy import func from pygotham.core import db __all__ = ('Day', 'Room', 'Slot', 'Presentation') def pairwise(iterable): """Return values from ``iterable`` two at a time. Recipe from https://docs.python.org/3/library/itertools.html#itertools-recipes. """ a, b = tee(iterable) next(b, None) return zip(a, b) rooms_slots = db.Table( 'rooms_slots', db.Column('slot_id', db.Integer, db.ForeignKey('slots.id')), db.Column('room_id', db.Integer, db.ForeignKey('rooms.id')), ) class Day(db.Model): """Day of talks.""" __tablename__ = 'days' id = db.Column(db.Integer, primary_key=True) date = db.Column(db.Date) event_id = db.Column( db.Integer, db.ForeignKey('events.id'), nullable=False) event = db.relationship( 'Event', backref=db.backref('days', lazy='dynamic')) def __str__(self): """Return a printable representation.""" return self.date.strftime('%B %d, %Y') @cached_property def rooms(self): """Return the rooms for the day.""" return Room.query.join(rooms_slots, Slot).filter( Slot.day == self).order_by(Room.order).all() def __iter__(self): """Iterate over the schedule for the day.""" if not self.rooms: raise StopIteration def rowspan(start, end): """Find the rowspan for an entry in the schedule table. This uses a binary search for the given end time from a sorted list of start times in order to find the index of the first start time that occurs after the given end time. This method is used to prevent issues that can occur with overlapping start and end times being included in the same list. """ return bisect_left(times, end) - times.index(start) times = sorted({slot.start for slot in self.slots}) # While we typically only care about the start times here, the # list is iterated over two items at a time. Without adding a # final element, the last time slot would be omitted. Any value # could be used here as bisect_left only assumes the list is # sorted, but using a meaningful value feels better. times.append(self.slots[-1].end) slots = db.session.query( Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end, func.count(rooms_slots.c.slot_id).label('room_count'), func.min(Room.order).label('order'), ).join(rooms_slots, Room).filter(Slot.day == self).order_by( func.count(rooms_slots.c.slot_id), func.min(Room.order) ).group_by( Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end ).all() for time, next_time in pairwise(times): row = {'time': time, 'slots': []} for slot in slots: if slot.start == time: slot.rowspan = rowspan(slot.start, slot.end) slot.colspan = slot.room_count if not slot.content_override: slot.presentation = Presentation.query.filter( Presentation.slot_id == slot.id).first() row['slots'].append(slot) if row['slots'] or next_time is None: yield row class Room(db.Model): """Room of talks.""" __tablename__ = 'rooms' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), nullable=False) order = db.Column(db.Integer, nullable=False) def __str__(self): """Return a printable representation.""" return self.name class Slot(db.Model): """Time slot.""" __tablename__ = 'slots' id = db.Column(db.Integer, primary_key=True) kind = db.Column( db.Enum( 'break', 'meal', 'keynote', 'talk', 'tutorial', name='slotkind'), nullable=False, ) content_override = db.Column(db.Text) start = db.Column(db.Time, nullable=False) end = db.Column(db.Time, nullable=False) day_id = db.Column(db.Integer, db.ForeignKey('days.id'), nullable=False) day = db.relationship('Day', backref=db.backref('slots', lazy='dynamic')) rooms = db.relationship( 'Room', secondary=rooms_slots, backref=db.backref('slots', lazy='dynamic'), order_by=Room.order, ) def __str__(self): """Return a printable representation.""" start = self.start.strftime('%I:%M %p') end = self.end.strftime('%I:%M %p') rooms = ', '.join(map(str, self.rooms)) return '{} - {} on {}, {}'.format(start, end, self.day, rooms) @cached_property def duration(self): """Return the duration as a :class:`~datetime.timedelta`.""" return self.end - self.start class Presentation(db.Model): """Presentation of a talk.""" __tablename__ = 'presentations' id = db.Column(db.Integer, primary_key=True) slot_id = db.Column(db.Integer, db.ForeignKey('slots.id'), nullable=False) slot = db.relationship( 'Slot', backref=db.backref('presentation', uselist=False)) talk_id = db.Column(db.Integer, db.ForeignKey('talks.id'), nullable=False) talk = db.relationship( 'Talk', backref=db.backref('presentation', uselist=False)) def __str__(self): """Return a printable representation.""" return str(self.talk) def is_in_all_rooms(self): """Return whether the instance is in all rooms.""" return self.slot.number_of_rooms == 4 @cached_property def number_of_rooms(self): """Return the number of rooms for the instance.""" return len(self.slot.rooms)
from apptools.logger.util import *
import re from django import template from django.core.urlresolvers import NoReverseMatch from django.core.urlresolvers import reverse register = template.Library() @register.simple_tag(takes_context=True) def active(context, name): try: pattern = reverse(name) except NoReverseMatch: return '' if re.match(pattern, context['request'].path): return 'active' return ''
import tests.model_control.test_ozone_custom_models_enabled as testmod testmod.build_model( ['Difference'] , ['LinearTrend'] , ['Seasonal_Hour'] , ['NoAR'] );
from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('commtrack', '0005_populate_config_models'), ] operations = [ migrations.RemoveField( model_name='sqlcommtrackconfig', name='couch_id', ), ]
''' isobands_matplotlib.py is a script for creating isobands. Works in a similar way as gdal_contour, but creating polygons instead of polylines This version requires matplotlib, but there is another one, isobands_gdal.py that uses only GDAL python Originally created by Roger Veciana i Rovira, made available via his blog post http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands ''' from numpy import arange from numpy import meshgrid from osgeo import ogr from osgeo import gdal from osgeo import osr from math import floor from math import ceil from os.path import exists from os import remove from argparse import ArgumentParser import matplotlib.pyplot as plt def str2bool(v): return v.lower() in ("yes", "true", "t", "1") def isobands(in_file, band, out_file, out_format, layer_name, attr_name, offset, interval, min_level = None, upper_val_output = False): ''' The method that calculates the isobands ''' ds_in = gdal.Open(in_file) band_in = ds_in.GetRasterBand(band) xsize_in = band_in.XSize ysize_in = band_in.YSize geotransform_in = ds_in.GetGeoTransform() srs = osr.SpatialReference() srs.ImportFromWkt( ds_in.GetProjectionRef() ) #Creating the output vectorial file drv = ogr.GetDriverByName(out_format) if exists(out_file): remove(out_file) dst_ds = drv.CreateDataSource( out_file ) dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon, srs = srs) fdef = ogr.FieldDefn( attr_name, ogr.OFTReal ) dst_layer.CreateField( fdef ) # Use the geotransform pixel size value to avoid weird rounding errors in # original approach. x_pos = [geotransform_in[0]+geotransform_in[1]*ii \ for ii in range(xsize_in)] y_pos = [geotransform_in[3]+geotransform_in[5]*ii \ for ii in range(ysize_in)] #x_pos = arange(geotransform_in[0], # geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1]) #y_pos = arange(geotransform_in[3], # geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5]) x_grid, y_grid = meshgrid(x_pos, y_pos) raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in) #stats = band_in.GetStatistics(True, True) min_value, max_value = band_in.ComputeRasterMinMax() if min_level == None: #min_value = stats[0] min_level = offset + interval * floor((min_value - offset)/interval) #max_value = stats[1] #Due to range issues, a level is added max_level = offset + interval * (1 + ceil((max_value - offset)/interval)) levels = arange(min_level, max_level, interval) contours = plt.contourf(x_grid, y_grid, raster_values, levels) for level in range(len(contours.collections)): paths = contours.collections[level].get_paths() for path in paths: feat_out = ogr.Feature( dst_layer.GetLayerDefn()) if upper_val_output: out_val = contours.levels[level] + interval else: out_val = contours.levels[level] feat_out.SetField( attr_name, out_val ) pol = ogr.Geometry(ogr.wkbPolygon) ring = None for i in range(len(path.vertices)): point = path.vertices[i] if path.codes[i] == 1: if ring != None: pol.AddGeometry(ring) ring = ogr.Geometry(ogr.wkbLinearRing) ring.AddPoint_2D(point[0], point[1]) pol.AddGeometry(ring) feat_out.SetGeometry(pol) if dst_layer.CreateFeature(feat_out) != 0: print "Failed to create feature in shapefile.\n" exit( 1 ) feat_out.Destroy() if __name__ == "__main__": PARSER = ArgumentParser( description="Calculates the isobands from a raster into a vector file") PARSER.add_argument("src_file", help="The raster source file") PARSER.add_argument("out_file", help="The vectorial out file") PARSER.add_argument("-b", help="The band in the source file to process (default 1)", type=int, default = 1, metavar = 'band') PARSER.add_argument("-off", help="The offset to start the isobands (default 0)", type=float, default = 0.0, metavar = 'offset') PARSER.add_argument("-i", help="The interval (default 0)", type=float, default = 0.0, metavar = 'interval') PARSER.add_argument("-nln", help="The out layer name (default bands)", default = 'bands', metavar = 'layer_name') PARSER.add_argument("-a", help="The out layer attribute name (default h)", default = 'h', metavar = 'attr_name') PARSER.add_argument("-f", help="The output file format name (default ESRI Shapefile)", default = 'ESRI Shapefile', metavar = 'formatname') PARSER.add_argument("-up", help="In the output file, whether to use the upper value of an " "isoband, as value name for polygons, rather than lower.", default = "False", metavar='upper_val_output') ARGS = PARSER.parse_args() isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a, ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
from __future__ import division from direct.showbase.ShowBase import ShowBase from direct.actor.Actor import ActorNode from panda3d.core import WindowProperties, NodePath, LVector3 from panda3d.core import LineSegs, OrthographicLens, CardMaker from inputs import Inputs from sys import path import square try: path.insert(1, '../pydaq') import pydaq except ImportError: pydaq = None class ColorWorld(object): def __init__(self, config=None): # keep track of velocity, this allows me to counteract joystick with keyboard self.velocity = LVector3(0) if config is None: self.config = {} execfile('config.py', self.config) else: self.config = config self.reward = None if pydaq: self.reward = pydaq.GiveReward() self.reward_count = 0 # self.color_map always corresponds to (r, g, b) # does not change during game, each game uses a particular color space self.color_dict = square.make_color_map(self.config['colors']) # sets the range of colors for this map self.c_range = self.config['c_range'] # color variables (make dictionary?) # color_list is set in beginning, and then after that this is only # called again for non-random (training) self.color_list = square.set_start_position_colors(self.config) self.color_match = [0, 0, 0] self.color_tolerance = [] self.last_avt, self.avt_factor = square.translate_color_map(self.config, self.color_dict, self.color_list) print 'starting avt position', self.last_avt print 'map avatar factor', self.avt_factor self.random = True if self.config.get('match_direction'): self.random = False # adjustment to speed so corresponds to gobananas task # 7 seconds to cross original environment # speed needs to be adjusted to both speed in original # environment and c_range of colors # self.speed = 0.05 * (self.c_range[1] - self.c_range[0]) # speed is own variable, so can be changed during training. self.speed = self.config['speed'] # map avatar variables self.render2d = None self.match_square = None self.map_avt_node = [] # need a multiplier to the joystick output to tolerable speed self.vel_base = 3 self.max_vel = [500, 500, 0] self.card = None self.base = ShowBase() self.base.disableMouse() # assume we are showing windows unless proven otherwise if self.config.get('win', True): # only need inputs if we have a window self.inputs = Inputs(self.base) props = WindowProperties() props.setCursorHidden(True) props.setForeground(True) print self.config.get('resolution') if self.config.get('resolution'): props.set_size(int(self.config['resolution'][0]), int(self.config['resolution'][1])) props.set_origin(0, 0) else: props.set_size(600, 600) props.set_origin(400, 50) self.base.win.requestProperties(props) # print self.base.win.get_size() # setup color map on second window sq_node = square.setup_square(self.config) self.setup_display2(sq_node) # print 'background color', self.base.getBackgroundColor() # create the avatar self.avatar = NodePath(ActorNode("avatar")) self.avatar.reparentTo(self.base.render) self.avatar.setH(self.base.camera.getH()) self.base.camera.reparentTo(self.avatar) self.base.camera.setPos(0, 0, 0) # initialize task variables self.frame_task = None self.started_game = None self.showed_match = None self.gave_reward = None # initialize and start the game self.set_next_trial() # print 'end init' def start_loop(self): # need to get new match print 'start loop' self.started_game = self.base.taskMgr.doMethodLater(5, self.start_play, 'start_play') self.showed_match = self.base.taskMgr.add(self.show_match_sample, 'match_image') # Task methods def show_match_sample(self, task): print 'show match sample' print self.color_match[:] # match_image.fill(*self.color_match[:]) card = CardMaker('card') color_match = self.color_match[:] # add alpha channel color_match.append(1) print color_match card.set_color(*color_match[:]) card.set_frame(-12, -8, 0, 4) # log this self.card = self.base.render.attach_new_node(card.generate()) return task.done def start_play(self, task): print 'start play' # log this self.base.taskMgr.remove('match_image') self.card.removeNode() # print self.base.render.ls() self.frame_task = self.base.taskMgr.add(self.game_loop, "game_loop") self.frame_task.last = 0 # initiate task time of the last frame # log this self.base.setBackgroundColor(self.color_list[:]) return task.done def game_loop(self, task): dt = task.time - task.last task.last = task.time self.velocity = self.inputs.poll_inputs(self.velocity) move = self.move_avatar(dt) stop = self.change_background(move) self.move_map_avatar(move, stop) match = self.check_color_match() if match: self.give_reward() return task.done return task.cont def reward_loop(self, task): self.reward_count += 1 if self.reward_count <= self.config['num_beeps']: if self.reward: # log this print 'give a bloody reward already' self.reward.pumpOut() print 'give reward' return task.again else: self.end_loop() return task.done def move_avatar(self, dt): # print 'velocity', self.velocity # this makes for smooth (correct speed) diagonal movement # print 'velocity', self.velocity magnitude = max(abs(self.velocity[0]), abs(self.velocity[1])) move = None if self.velocity.normalize(): # go left in increasing amount # print 'dt', dt # print 'normalized' # print 'velocity', self.velocity # print 'magnitude', magnitude self.velocity *= magnitude # print 'velocity', self.velocity # this makes for smooth movement move = self.velocity * self.vel_base * dt # print move self.avatar.setFluidPos(self.avatar, move) return move def change_background(self, move): stop = [True, True, True] if move: # print move move *= self.speed for i in range(3): value = self.color_dict[i] if value is not None: stop[i] = False # keys correspond to x,y,z # values correspond to r,g,b if i == 2: # z axis is treated differently # need to work on this. z should # be at min when both x and y are at max # taking the average is not quite right... z_move = (move[0] + move[1])/2 # print z_move self.color_list[value] -= z_move else: self.color_list[value] += move[i] if self.color_list[value] < self.c_range[0]: self.color_list[value] = self.c_range[0] stop[i] = True elif self.color_list[value] > self.c_range[1]: self.color_list[value] = self.c_range[1] stop[i] = True # log this self.base.setBackgroundColor(self.color_list[:]) # print self.base.getBackgroundColor() return stop def move_map_avatar(self, move, stop): # print move # avatar is mapped assuming c_range of 0.5. What do I need to # change to use a different c_range? c_range of one is twice # the if move: avt = LineSegs() avt.setThickness(1) avt.setColor(1, 1, 1) # print 'last', self.last_avt avt.move_to(self.last_avt[0], -5, self.last_avt[1]) # print 'move', move new_move = [i + (j * self.avt_factor) for i, j in zip(self.last_avt, move)] # new_move = [i + j for i, j in zip(self.last_avt, move)] # would it be better to have a local stop condition? if stop[0]: new_move[0] = self.last_avt[0] # print 'stop x', self.last_avt[0] if stop[1]: new_move[1] = self.last_avt[1] # print 'stop y', self.last_avt[1] # print 'new', new_move self.last_avt = [new_move[0], new_move[1]] avt.draw_to(new_move[0], -5, new_move[1]) self.map_avt_node.append(self.render2d.attach_new_node(avt.create())) # print self.map_avt_node[-1] # can't let too many nodes pile up if len(self.map_avt_node) > 299: # removing the node does not remove the object from the list for i, j in enumerate(self.map_avt_node): j.removeNode() if i > 49: break del self.map_avt_node[0:50] def check_color_match(self): # print 'match this', self.color_tolerance # print self.color_list check_color = [j[0] < self.color_list[i] < j[1] for i, j in enumerate(self.color_tolerance)] # print check_color if all(check_color): return True else: return False def give_reward(self): # clear the background self.base.setBackgroundColor(0.41, 0.41, 0.41) print 'give first reward' self.reward_count = 1 if self.reward: # log this self.reward.pumpOut() self.gave_reward = self.base.taskMgr.doMethodLater(self.config['pump_delay'], self.reward_loop, 'reward_loop') def end_loop(self): print 'end loop' # clear avatar map self.clear_avatar_map() # if there is a match set, return to center of color gradient, # set new match, if applicable self.set_next_trial() def clear_avatar_map(self): for i, j in enumerate(self.map_avt_node): j.removeNode() self.map_avt_node = [] def plot_match_square(self, corners): print 'plot match square' print corners match = LineSegs() match.setThickness(1.5) match.setColor(0, 0, 0) match.move_to(corners[0][0], -5, corners[1][0]) match.draw_to(corners[0][1], -5, corners[1][0]) match.draw_to(corners[0][1], -5, corners[1][1]) match.draw_to(corners[0][0], -5, corners[1][1]) match.draw_to(corners[0][0], -5, corners[1][0]) # print self.render2d self.match_square = self.render2d.attach_new_node(match.create()) def create_avatar_map_match_square(self, config=None): print 'make new square for map' if config is not None: config_dict = config else: config_dict = self.config # create square on avatar map for new color match map_color_match, factor = square.translate_color_map(config_dict, self.color_dict, self.color_match) tolerance = config_dict['tolerance'] * factor map_color_tolerance = [(i - tolerance, i + tolerance) for i in map_color_match] print map_color_tolerance if self.render2d: if self.match_square: self.match_square.removeNode() self.plot_match_square(map_color_tolerance) def set_next_trial(self): print 'set next trial' # move avatar back to beginning position, only matters for # showing card for next color match self.avatar.set_pos(-10, -10, 2) # set color_list with starting color # if random, won't use this again, but for manual, will # return to center # need to update self.config to new direction, if there is one if self.config.get('match_direction'): self.check_key_map() # return to center, otherwise random will start where you left off self.color_list = square.set_start_position_colors(self.config) # starting position for map avatar, just translate new color_list self.last_avt, self.avt_factor = square.translate_color_map(self.config, self.color_dict, self.color_list) print 'start color', self.color_list print self.color_dict # again need to update self.config for match if using keys self.color_match = square.set_match_colors(self.config, self.color_dict) # sets the tolerance for how close to a color for reward self.color_tolerance = [(i - self.config['tolerance'], i + self.config['tolerance']) for i in self.color_match] print 'color match', self.color_match print 'color tolerance', self.color_tolerance self.create_avatar_map_match_square(self.config) # start the game self.start_loop() def check_key_map(self): if self.config['colors'][0]: if self.inputs.key_map['r']: self.config['match_direction'] = ['right'] elif self.inputs.key_map['r'] is not None: self.config['match_direction'] = ['left'] elif self.config['colors'][1]: if self.inputs.key_map['f']: self.config['match_direction'] = ['front'] elif self.inputs.key_map['f'] is not None: self.config['match_direction'] = ['back'] def setup_display2(self, display_node): print 'setup display2' props = WindowProperties() props.set_cursor_hidden(True) props.set_foreground(False) if self.config.get('resolution'): props.setSize(700, 700) props.setOrigin(-int(self.config['resolution'][0] - 5), 5) else: props.setSize(300, 300) props.setOrigin(10, 10) window2 = self.base.openWindow(props=props, aspectRatio=1) lens = OrthographicLens() lens.set_film_size(2, 2) lens.setNearFar(-100, 100) self.render2d = NodePath('render2d') self.render2d.attach_new_node(display_node) camera2d = self.base.makeCamera(window2) camera2d.setPos(0, -10, 0) camera2d.node().setLens(lens) camera2d.reparentTo(self.render2d) if __name__ == "__main__": CW = ColorWorld() CW.base.run()
''' from sc2casts_client import * import json from pprint import * parser = SC2CastsParser() client = SC2CastsClient() TEST_DATA_DIR = 'data' def test_titles(): pass def test_casts(): with open(TEST_DATA_DIR + '/all', 'r') as f: test_data = f.read() #print test_data actual = parser.casts(test_data) pprint(actual) # TODO check each cast def test_games_bo3_in_1_game(): with open(TEST_DATA_DIR + '/cast14719-Soulkey-vs-Cure-Best-of-3-All-in-1-video-IEM-Cologne-2014-Korean-Qualifier', 'r') as f: test_data = f.read() #print test_data actual = parser.games(test_data) assert len(actual) == 1 assert actual[0]['game_id'] == 'Gt4E3rIUhoA' assert actual[0]['game_title'] == 'Game 1' def test_games_5_games(): with open(TEST_DATA_DIR + '/cast14705-KT-Rolster-vs-Prime-Best-of-5-2014-Proleague-Round-1', 'r') as f: test_data = f.read() #print test_data actual = parser.games(test_data) print actual assert len(actual) == 5 assert actual[0]['game_id'] == 'QqSRtBVEXDs' assert actual[0]['game_title'] == 'Game 1' assert actual[1]['game_id'] == '5lFLuOKYTa8' assert actual[1]['game_title'] == 'Game 2' assert actual[2]['game_id'] == 'wNhcT-NenNs' assert actual[2]['game_title'] == 'Game 3' assert actual[3]['game_id'] == '' assert actual[3]['game_title'] == 'Game 4' assert actual[4]['game_id'] == '' assert actual[4]['game_title'] == 'Game 5' def test_events(): with open(TEST_DATA_DIR + '/browse', 'r') as f: test_data = f.read() actual = parser.events(test_data) pprint(actual) def test_casters(): with open(TEST_DATA_DIR + '/browse', 'r') as f: test_data = f.read() actual = parser.casters(test_data) pprint(actual) def test_matchups(): with open(TEST_DATA_DIR + '/browse', 'r') as f: test_data = f.read() actual = parser.matchups(test_data) assert len(actual) == 6 # TODO test that the actual URLs are still valid def test_client_matchups(): actual = client.matchups() assert len(actual) == 6 '''
__all__=('Formatter','DecimalFormatter') __version__=''' $Id: formatters.py 3959 2012-09-27 14:39:39Z robin $ ''' __doc__=""" These help format numbers and dates in a user friendly way. Used by the graphics framework. """ import string, sys, os, re class Formatter: "Base formatter - simply applies python format strings" def __init__(self, pattern): self.pattern = pattern def format(self, obj): return self.pattern % obj def __repr__(self): return "%s('%s')" % (self.__class__.__name__, self.pattern) def __call__(self, x): return self.format(x) _ld_re=re.compile(r'^\d*\.') _tz_re=re.compile('0+$') class DecimalFormatter(Formatter): """lets you specify how to build a decimal. A future NumberFormatter class will take Microsoft-style patterns instead - "$#,##0.00" is WAY easier than this.""" def __init__(self, places=2, decimalSep='.', thousandSep=None, prefix=None, suffix=None): if places=='auto': self.calcPlaces = self._calcPlaces else: self.places = places self.dot = decimalSep self.comma = thousandSep self.prefix = prefix self.suffix = suffix def _calcPlaces(self,V): '''called with the full set of values to be formatted so we can calculate places''' self.places = max([len(_tz_re.sub('',_ld_re.sub('',str(v)))) for v in V]) def format(self, num): # positivize the numbers sign=num<0 if sign: num = -num places, sep = self.places, self.dot strip = places<=0 if places and strip: places = -places strInt = ('%.' + str(places) + 'f') % num if places: strInt, strFrac = strInt.split('.') strFrac = sep + strFrac if strip: while strFrac and strFrac[-1] in ['0',sep]: strFrac = strFrac[:-1] else: strFrac = '' if self.comma is not None: strNew = '' while strInt: left, right = strInt[0:-3], strInt[-3:] if left == '': #strNew = self.comma + right + strNew strNew = right + strNew else: strNew = self.comma + right + strNew strInt = left strInt = strNew strBody = strInt + strFrac if sign: strBody = '-' + strBody if self.prefix: strBody = self.prefix + strBody if self.suffix: strBody = strBody + self.suffix return strBody def __repr__(self): return "%s(places=%d, decimalSep=%s, thousandSep=%s, prefix=%s, suffix=%s)" % ( self.__class__.__name__, self.places, repr(self.dot), repr(self.comma), repr(self.prefix), repr(self.suffix) ) if __name__=='__main__': def t(n, s, places=2, decimalSep='.', thousandSep=None, prefix=None, suffix=None): f=DecimalFormatter(places,decimalSep,thousandSep,prefix,suffix) r = f(n) print("places=%2d dot=%-4s comma=%-4s prefix=%-4s suffix=%-4s result=%10s %s" %(f.places, f.dot, f.comma, f.prefix, f.suffix,r, r==s and 'OK' or 'BAD')) t(1000.9,'1,000.9',1,thousandSep=',') t(1000.95,'1,001.0',1,thousandSep=',') t(1000.95,'1,001',-1,thousandSep=',') t(1000.9,'1,001',0,thousandSep=',') t(1000.9,'1000.9',1) t(1000.95,'1001.0',1) t(1000.95,'1001',-1) t(1000.9,'1001',0) t(1000.1,'1000.1',1) t(1000.55,'1000.6',1) t(1000.449,'1000.4',-1) t(1000.45,'1000',0)
from datetime import datetime, timedelta, tzinfo import unittest import pytz import re from nose.tools import assert_equal, assert_raises # you need it for tests in form of continuations import six from flask_restful import inputs def test_reverse_rfc822_datetime(): dates = [ ("Sat, 01 Jan 2011 00:00:00 -0000", datetime(2011, 1, 1, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 23:59:59 -0000", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ("Sat, 01 Jan 2011 21:59:59 -0200", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ] for date_string, expected in dates: yield assert_equal, inputs.datetime_from_rfc822(date_string), expected def test_reverse_iso8601_datetime(): dates = [ ("2011-01-01T00:00:00+00:00", datetime(2011, 1, 1, tzinfo=pytz.utc)), ("2011-01-01T23:59:59+00:00", datetime(2011, 1, 1, 23, 59, 59, tzinfo=pytz.utc)), ("2011-01-01T23:59:59.001000+00:00", datetime(2011, 1, 1, 23, 59, 59, 1000, tzinfo=pytz.utc)), ("2011-01-01T23:59:59+02:00", datetime(2011, 1, 1, 21, 59, 59, tzinfo=pytz.utc)) ] for date_string, expected in dates: yield assert_equal, inputs.datetime_from_iso8601(date_string), expected def test_urls(): urls = [ 'http://www.djangoproject.com/', 'http://localhost/', 'http://example.com/', 'http://www.example.com/', 'http://www.example.com:8000/test', 'http://valid-with-hyphens.com/', 'http://subdomain.example.com/', 'http://200.8.9.10/', 'http://200.8.9.10:8000/test', 'http://valid-----hyphens.com/', 'http://example.com?something=value', 'http://example.com/index.php?something=value&another=value2', 'http://foo:bar@example.com', 'http://foo:@example.com', 'http://foo:@2001:db8:85a3::8a2e:370:7334', 'http://foo2:qd1%r@example.com', ] for value in urls: yield assert_equal, inputs.url(value), value def check_bad_url_raises(value): try: inputs.url(value) assert False, "shouldn't get here" except ValueError as e: assert_equal(six.text_type(e), u"{0} is not a valid URL".format(value)) def test_bad_urls(): values = [ 'foo', 'http://', 'http://example', 'http://example.', 'http://.com', 'http://invalid-.com', 'http://-invalid.com', 'http://inv-.alid-.com', 'http://inv-.-alid.com', 'foo bar baz', u'foo \u2713', 'http://@foo:bar@example.com', 'http://:bar@example.com', 'http://bar:bar:bar@example.com', ] for value in values: yield check_bad_url_raises, value def test_bad_url_error_message(): values = [ 'google.com', 'domain.google.com', 'kevin:pass@google.com/path?query', u'google.com/path?\u2713', ] for value in values: yield check_url_error_message, value def check_url_error_message(value): try: inputs.url(value) assert False, u"inputs.url({0}) should raise an exception".format(value) except ValueError as e: assert_equal(six.text_type(e), (u"{0} is not a valid URL. Did you mean: http://{0}".format(value))) def test_regex_bad_input(): cases = ( 'abc', '123abc', 'abc123', '', ) num_only = inputs.regex(r'^[0-9]+$') for value in cases: yield assert_raises, ValueError, lambda: num_only(value) def test_regex_good_input(): cases = ( '123', '1234567890', '00000', ) num_only = inputs.regex(r'^[0-9]+$') for value in cases: yield assert_equal, num_only(value), value def test_regex_bad_pattern(): """Regex error raised immediately when regex input parser is created.""" assert_raises(re.error, inputs.regex, '[') def test_regex_flags_good_input(): cases = ( 'abcd', 'ABCabc', 'ABC', ) case_insensitive = inputs.regex(r'^[A-Z]+$', re.IGNORECASE) for value in cases: yield assert_equal, case_insensitive(value), value def test_regex_flags_bad_input(): cases = ( 'abcd', 'ABCabc' ) case_sensitive = inputs.regex(r'^[A-Z]+$') for value in cases: yield assert_raises, ValueError, lambda: case_sensitive(value) class TypesTestCase(unittest.TestCase): def test_boolean_false(self): assert_equal(inputs.boolean("False"), False) def test_boolean_is_false_for_0(self): assert_equal(inputs.boolean("0"), False) def test_boolean_true(self): assert_equal(inputs.boolean("true"), True) def test_boolean_is_true_for_1(self): assert_equal(inputs.boolean("1"), True) def test_boolean_upper_case(self): assert_equal(inputs.boolean("FaLSE"), False) def test_boolean(self): assert_equal(inputs.boolean("FaLSE"), False) def test_boolean_with_python_bool(self): """Input that is already a native python `bool` should be passed through without extra processing.""" assert_equal(inputs.boolean(True), True) assert_equal(inputs.boolean(False), False) def test_bad_boolean(self): assert_raises(ValueError, lambda: inputs.boolean("blah")) def test_date_later_than_1900(self): assert_equal(inputs.date("1900-01-01"), datetime(1900, 1, 1)) def test_date_input_error(self): assert_raises(ValueError, lambda: inputs.date("2008-13-13")) def test_date_input(self): assert_equal(inputs.date("2008-08-01"), datetime(2008, 8, 1)) def test_natual_negative(self): assert_raises(ValueError, lambda: inputs.natural(-1)) def test_natural(self): assert_equal(3, inputs.natural(3)) def test_natual_string(self): assert_raises(ValueError, lambda: inputs.natural('foo')) def test_positive(self): assert_equal(1, inputs.positive(1)) assert_equal(10000, inputs.positive(10000)) def test_positive_zero(self): assert_raises(ValueError, lambda: inputs.positive(0)) def test_positive_negative_input(self): assert_raises(ValueError, lambda: inputs.positive(-1)) def test_int_range_good(self): int_range = inputs.int_range(1, 5) assert_equal(3, int_range(3)) def test_int_range_inclusive(self): int_range = inputs.int_range(1, 5) assert_equal(5, int_range(5)) def test_int_range_low(self): int_range = inputs.int_range(0, 5) assert_raises(ValueError, lambda: int_range(-1)) def test_int_range_high(self): int_range = inputs.int_range(0, 5) assert_raises(ValueError, lambda: int_range(6)) def test_isointerval(): intervals = [ ( # Full precision with explicit UTC. "2013-01-01T12:30:00Z/P1Y2M3DT4H5M6S", ( datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc), datetime(2014, 3, 5, 16, 35, 6, tzinfo=pytz.utc), ), ), ( # Full precision with alternate UTC indication "2013-01-01T12:30+00:00/P2D", ( datetime(2013, 1, 1, 12, 30, 0, tzinfo=pytz.utc), datetime(2013, 1, 3, 12, 30, 0, tzinfo=pytz.utc), ), ), ( # Implicit UTC with time "2013-01-01T15:00/P1M", ( datetime(2013, 1, 1, 15, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 31, 15, 0, 0, tzinfo=pytz.utc), ), ), ( # TZ conversion "2013-01-01T17:00-05:00/P2W", ( datetime(2013, 1, 1, 22, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 15, 22, 0, 0, tzinfo=pytz.utc), ), ), ( # Date upgrade to midnight-midnight period "2013-01-01/P3D", ( datetime(2013, 1, 1, 0, 0, 0, tzinfo=pytz.utc), datetime(2013, 1, 4, 0, 0, 0, 0, tzinfo=pytz.utc), ), ), ( # Start/end with UTC "2013-01-01T12:00:00Z/2013-02-01T12:00:00Z", ( datetime(2013, 1, 1, 12, 0, 0, tzinfo=pytz.utc), datetime(2013, 2, 1, 12, 0, 0, tzinfo=pytz.utc), ), ), ( # Start/end with time upgrade "2013-01-01/2013-06-30", ( datetime(2013, 1, 1, tzinfo=pytz.utc), datetime(2013, 6, 30, tzinfo=pytz.utc), ), ), ( # Start/end with TZ conversion "2013-02-17T12:00:00-07:00/2013-02-28T15:00:00-07:00", ( datetime(2013, 2, 17, 19, 0, 0, tzinfo=pytz.utc), datetime(2013, 2, 28, 22, 0, 0, tzinfo=pytz.utc), ), ), # Resolution expansion for single date(time) ( # Second with UTC "2013-01-01T12:30:45Z", ( datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc), ), ), ( # Second with tz conversion "2013-01-01T12:30:45+02:00", ( datetime(2013, 1, 1, 10, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 10, 30, 46, tzinfo=pytz.utc), ), ), ( # Second with implicit UTC "2013-01-01T12:30:45", ( datetime(2013, 1, 1, 12, 30, 45, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, 46, tzinfo=pytz.utc), ), ), ( # Minute with UTC "2013-01-01T12:30+00:00", ( datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc), ), ), ( # Minute with conversion "2013-01-01T12:30+04:00", ( datetime(2013, 1, 1, 8, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 8, 31, tzinfo=pytz.utc), ), ), ( # Minute with implicit UTC "2013-01-01T12:30", ( datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 31, tzinfo=pytz.utc), ), ), ( # Hour, explicit UTC "2013-01-01T12Z", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 13, tzinfo=pytz.utc), ), ), ( # Hour with offset "2013-01-01T12-07:00", ( datetime(2013, 1, 1, 19, tzinfo=pytz.utc), datetime(2013, 1, 1, 20, tzinfo=pytz.utc), ), ), ( # Hour with implicit UTC "2013-01-01T12", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 13, tzinfo=pytz.utc), ), ), ( # Interval with trailing zero fractional seconds should # be accepted. "2013-01-01T12:00:00.0/2013-01-01T12:30:00.000000", ( datetime(2013, 1, 1, 12, tzinfo=pytz.utc), datetime(2013, 1, 1, 12, 30, tzinfo=pytz.utc), ), ), ] for value, expected in intervals: yield assert_equal, inputs.iso8601interval(value), expected def test_invalid_isointerval_error(): try: inputs.iso8601interval('2013-01-01/blah') except ValueError as error: assert_equal( str(error), "Invalid argument: 2013-01-01/blah. argument must be a valid ISO8601 " "date/time interval.", ) return assert False, 'Should raise a ValueError' def test_bad_isointervals(): bad_intervals = [ '2013-01T14:', '', 'asdf', '01/01/2013', ] for bad_interval in bad_intervals: yield ( assert_raises, Exception, inputs.iso8601interval, bad_interval, ) if __name__ == '__main__': unittest.main()
import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import django DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( ('Justin Quick', 'justquick@gmail.com'), ) ENGINE = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3') DATABASES = { 'default': { 'ENGINE': ENGINE, 'NAME': 'test', 'OPTIONS': { } } } if 'postgres' in ENGINE or 'mysql' in ENGINE: USER, PASSWORD = 'test', 'test' if os.environ.get('TRAVIS', False): if 'mysql' in ENGINE: USER, PASSWORD = 'travis', '' else: USER, PASSWORD = 'postgres', '' DATABASES['default'].update( USER=os.environ.get('DATABASE_USER', USER), PASSWORD=os.environ.get('DATABASE_PASSWORD', PASSWORD), HOST=os.environ.get('DATABASE_HOST', 'localhost') ) print(ENGINE) TIME_ZONE = 'America/New_York' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True MEDIA_ROOT = 'media' MEDIA_URL = '/media/' SECRET_KEY = 'wzf0h@r2u%m^_zgj^39-y(kd%+n+j0r7=du(q0^s@q1asdfasdfasdft%^2!p' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'actstream.runtests.urls' TEMPLATE_DIRS = ( 'templates', # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.admin', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admindocs', 'django.contrib.sites', 'django.contrib.comments', 'actstream.runtests.testapp', 'actstream.runtests.testapp_nested', 'actstream', ) TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.request', ) ACTSTREAM_SETTINGS = { 'MANAGER': 'actstream.runtests.testapp.streams.MyActionManager', 'FETCH_RELATIONS': True, 'USE_PREFETCH': True, 'USE_JSONFIELD': True, 'GFK_FETCH_DEPTH': 0, } if django.VERSION[:2] >= (1, 5): AUTH_USER_MODEL = 'testapp.MyUser' TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner' if 'COVERAGE' in os.environ: INSTALLED_APPS += ('django_coverage',) TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner' COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage' COVERAGE_REPORT_DATA_FILE = '.coverage'
from sklearn2sql_heroku.tests.classification import generic as class_gen class_gen.test_model("DecisionTreeClassifier" , "BinaryClass_10" , "db2")
"""Bridge the ``PropertyLoader`` (i.e. a ``relation()``) and the ``UOWTransaction`` together to allow processing of relation()-based dependencies at flush time. """ from sqlalchemy.orm import sync from sqlalchemy import sql, util, exceptions from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY def create_dependency_processor(prop): types = { ONETOMANY : OneToManyDP, MANYTOONE: ManyToOneDP, MANYTOMANY : ManyToManyDP, } if prop.association is not None: return AssociationDP(prop) else: return types[prop.direction](prop) class DependencyProcessor(object): no_dependencies = False def __init__(self, prop): self.prop = prop self.cascade = prop.cascade self.mapper = prop.mapper self.parent = prop.parent self.secondary = prop.secondary self.direction = prop.direction self.is_backref = prop.is_backref self.post_update = prop.post_update self.foreign_keys = prop.foreign_keys self.passive_deletes = prop.passive_deletes self.passive_updates = prop.passive_updates self.enable_typechecks = prop.enable_typechecks self.key = prop.key if not self.prop.synchronize_pairs: raise exceptions.ArgumentError("Can't build a DependencyProcessor for relation %s. No target attributes to populate between parent and child are present" % self.prop) def _get_instrumented_attribute(self): """Return the ``InstrumentedAttribute`` handled by this ``DependencyProecssor``. """ return getattr(self.parent.class_, self.key) def hasparent(self, state): """return True if the given object instance has a parent, according to the ``InstrumentedAttribute`` handled by this ``DependencyProcessor``.""" # TODO: use correct API for this return self._get_instrumented_attribute().impl.hasparent(state) def register_dependencies(self, uowcommit): """Tell a ``UOWTransaction`` what mappers are dependent on which, with regards to the two or three mappers handled by this ``PropertyLoader``. Also register itself as a *processor* for one of its mappers, which will be executed after that mapper's objects have been saved or before they've been deleted. The process operation manages attributes and dependent operations upon the objects of one of the involved mappers. """ raise NotImplementedError() def whose_dependent_on_who(self, state1, state2): """Given an object pair assuming `obj2` is a child of `obj1`, return a tuple with the dependent object second, or None if there is no dependency. """ if state1 is state2: return None elif self.direction == ONETOMANY: return (state1, state2) else: return (state2, state1) def process_dependencies(self, task, deplist, uowcommit, delete = False): """This method is called during a flush operation to synchronize data between a parent and child object. It is called within the context of the various mappers and sometimes individual objects sorted according to their insert/update/delete order (topological sort). """ raise NotImplementedError() def preprocess_dependencies(self, task, deplist, uowcommit, delete = False): """Used before the flushes' topological sort to traverse through related objects and ensure every instance which will require save/update/delete is properly added to the UOWTransaction. """ raise NotImplementedError() def _verify_canload(self, state): if not self.enable_typechecks: return if state is not None and not self.mapper._canload(state): raise exceptions.FlushError("Attempting to flush an item of type %s on collection '%s', which is handled by mapper '%s' and does not load items of that type. Did you mean to use a polymorphic mapper for this relationship ? Set 'enable_typechecks=False' on the relation() to disable this exception. Mismatched typeloading may cause bi-directional relationships (backrefs) to not function properly." % (state.class_, self.prop, self.mapper)) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): """Called during a flush to synchronize primary key identifier values between a parent/child object, as well as to an associationrow in the case of many-to-many. """ raise NotImplementedError() def _conditional_post_update(self, state, uowcommit, related): """Execute a post_update call. For relations that contain the post_update flag, an additional ``UPDATE`` statement may be associated after an ``INSERT`` or before a ``DELETE`` in order to resolve circular row dependencies. This method will check for the post_update flag being set on a particular relationship, and given a target object and list of one or more related objects, and execute the ``UPDATE`` if the given related object list contains ``INSERT``s or ``DELETE``s. """ if state is not None and self.post_update: for x in related: if x is not None: uowcommit.register_object(state, postupdate=True, post_update_cols=[r for l, r in self.prop.synchronize_pairs]) break def _pks_changed(self, uowcommit, state): raise NotImplementedError() def __str__(self): return "%s(%s)" % (self.__class__.__name__, str(self.prop)) class OneToManyDP(DependencyProcessor): def register_dependencies(self, uowcommit): if self.post_update: if not self.is_backref: stub = MapperStub(self.parent, self.mapper, self.key) uowcommit.register_dependency(self.mapper, stub) uowcommit.register_dependency(self.parent, stub) uowcommit.register_processor(stub, self, self.parent) else: uowcommit.register_dependency(self.parent, self.mapper) uowcommit.register_processor(self.parent, self, self.parent) def process_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction) if delete: # head object is being deleted, and we manage its list of child objects # the child objects have to have their foreign key to the parent set to NULL # this phase can be called safely for any cascade but is unnecessary if delete cascade # is on. if self.post_update or not self.passive_deletes=='all': for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if unchanged or deleted: for child in deleted: if child is not None and self.hasparent(child) is False: self._synchronize(state, child, None, True, uowcommit) self._conditional_post_update(child, uowcommit, [state]) if self.post_update or not self.cascade.delete: for child in unchanged: if child is not None: self._synchronize(state, child, None, True, uowcommit) self._conditional_post_update(child, uowcommit, [state]) else: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key, passive=True) if added or deleted: for child in added: self._synchronize(state, child, None, False, uowcommit) if child is not None: self._conditional_post_update(child, uowcommit, [state]) for child in deleted: if not self.cascade.delete_orphan and not self.hasparent(child): self._synchronize(state, child, None, True, uowcommit) if self._pks_changed(uowcommit, state): if unchanged: for child in unchanged: self._synchronize(state, child, None, False, uowcommit) def preprocess_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " preprocess_dep isdelete " + repr(delete) + " direction " + repr(self.direction) if delete: # head object is being deleted, and we manage its list of child objects # the child objects have to have their foreign key to the parent set to NULL if not self.post_update: should_null_fks = not self.cascade.delete and not self.passive_deletes=='all' for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if unchanged or deleted: for child in deleted: if child is not None and self.hasparent(child) is False: if self.cascade.delete_orphan: uowcommit.register_object(child, isdelete=True) else: uowcommit.register_object(child) if should_null_fks: for child in unchanged: if child is not None: uowcommit.register_object(child) else: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True) if added or deleted: for child in added: if child is not None: uowcommit.register_object(child) for child in deleted: if not self.cascade.delete_orphan: uowcommit.register_object(child, isdelete=False) elif self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True) for c, m in self.mapper.cascade_iterator('delete', child): uowcommit.register_object(c._state, isdelete=True) if not self.passive_updates and self._pks_changed(uowcommit, state): if not unchanged: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key, passive=False) if unchanged: for child in unchanged: uowcommit.register_object(child) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): source = state dest = child if dest is None or (not self.post_update and uowcommit.is_deleted(dest)): return self._verify_canload(child) if clearkeys: sync.clear(dest, self.mapper, self.prop.synchronize_pairs) else: sync.populate(source, self.parent, dest, self.mapper, self.prop.synchronize_pairs) def _pks_changed(self, uowcommit, state): return sync.source_changes(uowcommit, state, self.parent, self.prop.synchronize_pairs) class DetectKeySwitch(DependencyProcessor): """a special DP that works for many-to-one relations, fires off for child items who have changed their referenced key.""" no_dependencies = True def register_dependencies(self, uowcommit): uowcommit.register_processor(self.parent, self, self.mapper) def preprocess_dependencies(self, task, deplist, uowcommit, delete=False): # for non-passive updates, register in the preprocess stage # so that mapper save_obj() gets a hold of changes if not delete and not self.passive_updates: self._process_key_switches(deplist, uowcommit) def process_dependencies(self, task, deplist, uowcommit, delete=False): # for passive updates, register objects in the process stage # so that we avoid ManyToOneDP's registering the object without # the listonly flag in its own preprocess stage (results in UPDATE) # statements being emitted if not delete and self.passive_updates: self._process_key_switches(deplist, uowcommit) def _process_key_switches(self, deplist, uowcommit): switchers = util.Set([s for s in deplist if self._pks_changed(uowcommit, s)]) if switchers: # yes, we're doing a linear search right now through the UOW. only # takes effect when primary key values have actually changed. # a possible optimization might be to enhance the "hasparents" capability of # attributes to actually store all parent references, but this introduces # more complicated attribute accounting. for s in [elem for elem in uowcommit.session.identity_map.all_states() if issubclass(elem.class_, self.parent.class_) and self.key in elem.dict and elem.dict[self.key]._state in switchers ]: uowcommit.register_object(s, listonly=self.passive_updates) sync.populate(s.dict[self.key]._state, self.mapper, s, self.parent, self.prop.synchronize_pairs) #self.syncrules.execute(s.dict[self.key]._state, s, None, None, False) def _pks_changed(self, uowcommit, state): return sync.source_changes(uowcommit, state, self.mapper, self.prop.synchronize_pairs) class ManyToOneDP(DependencyProcessor): def __init__(self, prop): DependencyProcessor.__init__(self, prop) self.mapper._dependency_processors.append(DetectKeySwitch(prop)) def register_dependencies(self, uowcommit): if self.post_update: if not self.is_backref: stub = MapperStub(self.parent, self.mapper, self.key) uowcommit.register_dependency(self.mapper, stub) uowcommit.register_dependency(self.parent, stub) uowcommit.register_processor(stub, self, self.parent) else: uowcommit.register_dependency(self.mapper, self.parent) uowcommit.register_processor(self.mapper, self, self.parent) def process_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction) if delete: if self.post_update and not self.cascade.delete_orphan and not self.passive_deletes=='all': # post_update means we have to update our row to not reference the child object # before we can DELETE the row for state in deplist: self._synchronize(state, None, None, True, uowcommit) (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if added or unchanged or deleted: self._conditional_post_update(state, uowcommit, deleted + unchanged + added) else: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True) if added or deleted or unchanged: for child in added: self._synchronize(state, child, None, False, uowcommit) self._conditional_post_update(state, uowcommit, deleted + unchanged + added) def preprocess_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " PRE process_dep isdelete " + repr(delete) + " direction " + repr(self.direction) if self.post_update: return if delete: if self.cascade.delete or self.cascade.delete_orphan: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if self.cascade.delete_orphan: todelete = added + unchanged + deleted else: todelete = added + unchanged for child in todelete: if child is None: continue uowcommit.register_object(child, isdelete=True) for c, m in self.mapper.cascade_iterator('delete', child): uowcommit.register_object(c._state, isdelete=True) else: for state in deplist: uowcommit.register_object(state) if self.cascade.delete_orphan: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if deleted: for child in deleted: if self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True) for c, m in self.mapper.cascade_iterator('delete', child): uowcommit.register_object(c._state, isdelete=True) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): if state is None or (not self.post_update and uowcommit.is_deleted(state)): return if clearkeys or child is None: sync.clear(state, self.parent, self.prop.synchronize_pairs) else: self._verify_canload(child) sync.populate(child, self.mapper, state, self.parent, self.prop.synchronize_pairs) class ManyToManyDP(DependencyProcessor): def register_dependencies(self, uowcommit): # many-to-many. create a "Stub" mapper to represent the # "middle table" in the relationship. This stub mapper doesnt save # or delete any objects, but just marks a dependency on the two # related mappers. its dependency processor then populates the # association table. stub = MapperStub(self.parent, self.mapper, self.key) uowcommit.register_dependency(self.parent, stub) uowcommit.register_dependency(self.mapper, stub) uowcommit.register_processor(stub, self, self.parent) def process_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction) connection = uowcommit.transaction.connection(self.mapper) secondary_delete = [] secondary_insert = [] secondary_update = [] if self.prop._reverse_property: reverse_dep = getattr(self.prop._reverse_property, '_dependency_processor', None) else: reverse_dep = None if delete: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=self.passive_deletes) if deleted or unchanged: for child in deleted + unchanged: if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes): continue associationrow = {} self._synchronize(state, child, associationrow, False, uowcommit) secondary_delete.append(associationrow) uowcommit.attributes[(self, "manytomany", state, child)] = True else: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key) if added or deleted: for child in added: if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes): continue associationrow = {} self._synchronize(state, child, associationrow, False, uowcommit) uowcommit.attributes[(self, "manytomany", state, child)] = True secondary_insert.append(associationrow) for child in deleted: if child is None or (reverse_dep and (reverse_dep, "manytomany", child, state) in uowcommit.attributes): continue associationrow = {} self._synchronize(state, child, associationrow, False, uowcommit) uowcommit.attributes[(self, "manytomany", state, child)] = True secondary_delete.append(associationrow) if not self.passive_updates and unchanged and self._pks_changed(uowcommit, state): for child in unchanged: associationrow = {} sync.update(state, self.parent, associationrow, "old_", self.prop.synchronize_pairs) sync.update(child, self.mapper, associationrow, "old_", self.prop.secondary_synchronize_pairs) #self.syncrules.update(associationrow, state, child, "old_") secondary_update.append(associationrow) if secondary_delete: secondary_delete.sort() # TODO: precompile the delete/insert queries? statement = self.secondary.delete(sql.and_(*[c == sql.bindparam(c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow])) result = connection.execute(statement, secondary_delete) if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_delete): raise exceptions.ConcurrentModificationError("Deleted rowcount %d does not match number of secondary table rows deleted from table '%s': %d" % (result.rowcount, self.secondary.description, len(secondary_delete))) if secondary_update: statement = self.secondary.update(sql.and_(*[c == sql.bindparam("old_" + c.key, type_=c.type) for c in self.secondary.c if c.key in associationrow])) result = connection.execute(statement, secondary_update) if result.supports_sane_multi_rowcount() and result.rowcount != len(secondary_update): raise exceptions.ConcurrentModificationError("Updated rowcount %d does not match number of secondary table rows updated from table '%s': %d" % (result.rowcount, self.secondary.description, len(secondary_update))) if secondary_insert: statement = self.secondary.insert() connection.execute(statement, secondary_insert) def preprocess_dependencies(self, task, deplist, uowcommit, delete = False): #print self.mapper.mapped_table.name + " " + self.key + " " + repr(len(deplist)) + " preprocess_dep isdelete " + repr(delete) + " direction " + repr(self.direction) if not delete: for state in deplist: (added, unchanged, deleted) = uowcommit.get_attribute_history(state, self.key,passive=True) if deleted: for child in deleted: if self.cascade.delete_orphan and self.hasparent(child) is False: uowcommit.register_object(child, isdelete=True) for c, m in self.mapper.cascade_iterator('delete', child): uowcommit.register_object(c._state, isdelete=True) def _synchronize(self, state, child, associationrow, clearkeys, uowcommit): if associationrow is None: return self._verify_canload(child) sync.populate_dict(state, self.parent, associationrow, self.prop.synchronize_pairs) sync.populate_dict(child, self.mapper, associationrow, self.prop.secondary_synchronize_pairs) def _pks_changed(self, uowcommit, state): return sync.source_changes(uowcommit, state, self.parent, self.prop.synchronize_pairs) class AssociationDP(OneToManyDP): def __init__(self, *args, **kwargs): super(AssociationDP, self).__init__(*args, **kwargs) self.cascade.delete = True self.cascade.delete_orphan = True class MapperStub(object): """Pose as a Mapper representing the association table in a many-to-many join, when performing a ``flush()``. The ``Task`` objects in the objectstore module treat it just like any other ``Mapper``, but in fact it only serves as a dependency placeholder for the many-to-many update task. """ __metaclass__ = util.ArgSingleton def __init__(self, parent, mapper, key): self.mapper = mapper self.base_mapper = self self.class_ = mapper.class_ self._inheriting_mappers = [] def polymorphic_iterator(self): return iter([self]) def _register_dependencies(self, uowcommit): pass def _save_obj(self, *args, **kwargs): pass def _delete_obj(self, *args, **kwargs): pass def primary_mapper(self): return self
import numpy as np import pandas as pd import gen_fns import math import re import csv import correlation_matrix as co def reformat_raw_data(file, n_header=1, outfile=None): from gen_fns import get_data from numpy.lib.recfunctions import append_fields countries, columns, raw_data = get_data(file, n_header, named_struct=True) pursue_names = [] acquire_names = [] clean_names = [] status = np.full(len(countries), 0) raw_data = append_fields(raw_data, 'Status', status) cols = raw_data.dtype.names order = range(0, len(cols) - 1) order.insert(2,len(cols) - 1) data = raw_data[[cols[i] for i in order]] cols = data.dtype.names for c in range(len(cols)): if ('Pursuit_' in cols[c]): pursue_names.append(cols[c]) new_str = re.sub('Pursuit_', '', cols[c]) clean_names.append(new_str) elif ('Acquire_' in cols[c]): acquire_names.append(cols[c]) else: pursue_names.append(cols[c]) acquire_names.append(cols[c]) clean_names.append(cols[c]) # all countries, data split into pursue-relevant or acquire-relevant pursue_array = data[pursue_names] acquire_array = data[acquire_names] no_acquire_mask = np.isnan(acquire_array['Acquire_Date']) conven_mask = np.isnan(pursue_array['Pursuit_Date']) explore_mask = (pursue_array['Pursuit_Date'] < 0) pursue_only_mask = (np.isnan(acquire_array['Acquire_Date'])) & (~(np.isnan(pursue_array['Pursuit_Date'])) & (pursue_array['Pursuit_Date'] > 0)) # states that pursued (_init_) and successfully acquired acquire_states = countries[~no_acquire_mask] acquire_final_array = acquire_array[~no_acquire_mask] acquire_init_array = pursue_array[~no_acquire_mask] # states that never pursued and have only conventional weapons conven_array = pursue_array[conven_mask] conven_states = countries[conven_mask] # states that explored NW but ultimately did not pursue explore_array = pursue_array[explore_mask] explore_present_array = acquire_array[explore_mask] explore_states = countries[explore_mask] # states that pursued but did not succeeed in acquiring pursue_only_array = pursue_array[pursue_only_mask] pursue_present_array = acquire_array[pursue_only_mask] pursue_states = countries[pursue_only_mask] # Status # -1 present data for a state that unsucessfully pursued # 0 present data for never-pursued (has only 'conventional weapons') # 1 historical data for explored # 2 historical data for pursued # 3 historical data for acquired acquire_init_array['Status'] = 2 acquire_final_array['Status'] = 3 conven_array['Status'] = 0 pursue_only_array['Status'] = 2 pursue_present_array['Status'] = -1 explore_array['Status'] = 1 explore_present_array['Status'] = 0 conven_array['Pursuit_Date'] = 2015 explore_array['Pursuit_Date'] = abs(explore_array['Pursuit_Date']) explore_present_array['Acquire_Date'] = 2015 pursue_present_array['Acquire_Date'] = 2015 acquire_final_array.dtype.names = clean_names acquire_final_array.mask.dtype.names = clean_names acquire_init_array.dtype.names = clean_names acquire_init_array.mask.dtype.names = clean_names conven_array.dtype.names = clean_names conven_array.mask.dtype.names = clean_names pursue_only_array.dtype.names = clean_names pursue_only_array.mask.dtype.names = clean_names pursue_present_array.dtype.names = clean_names pursue_present_array.mask.dtype.names = clean_names explore_array.dtype.names = clean_names explore_array.mask.dtype.names = clean_names explore_present_array.dtype.names = clean_names explore_present_array.mask.dtype.names = clean_names final_states = np.hstack((conven_states, explore_states, explore_states, pursue_states, acquire_states, acquire_states, pursue_states)) final_data = np.hstack((conven_array, explore_present_array, explore_array, pursue_only_array, acquire_init_array, acquire_final_array, pursue_present_array)) header ='Country' + '\t' + ('\t'.join(map(str,final_data.dtype.names))) if (outfile != None): with open(outfile, 'wb') as f: writer = csv.writer(f) writer.writerow([header]) i = 0 for comp in final_data.compressed(): cur_line = final_states[i] for c in range(len(comp)): val = comp[c] if (c <= 1): val = int(val) cur_line = cur_line + '\t' + str(val) writer.writerow([cur_line]) i+=1 return final_states,final_data def calc_weights(filename, mn_status=0, mx_status=2, correl_min = 1e-6): data_file = open(filename, 'r') full_matrix = np.loadtxt(data_file, skiprows=1,usecols=(2,3,4,5,6,7,8,9,10)) relevant_mask = ((full_matrix[:,0] >= mn_status) & (full_matrix[:,0] <= mx_status)) matrix = full_matrix[relevant_mask] cor = co.Cor_matrix(matrix) factor_vals = np.array(cor[0,1:])[0] factor_vals[factor_vals < correl_min] = 0 f_tot = factor_vals.sum() weights = factor_vals/f_tot # normalize weights to sum to one return weights def calc_pursuit(raw_data, weights): final_vals = [] weighted_factors = weights*raw_data for i in range(raw_data.shape[0]): val = weighted_factors[i].sum() final_vals.append(round(val,4)) return final_vals def get_nws(): nws = {} nws["China"] = 1964 nws["France"] = 1960 nws["India"] = 1974 nws["Israel"] = 1969 nws["N-Korea"] = 2006 nws["Pakist"] = 1987 nws["S-Afric"] = 1979 nws["UK"] = 1952 nws["US"] = 1945 nws["USSR"] = 1949 return nws def time_to_acquire(): t2acq = {} t2acq["Argent"] = -37 t2acq["Austral"] = -54 t2acq["Brazil"] = -37 t2acq["China"] = 9 t2acq["Egypt"] = -50 t2acq["France"] = 6 t2acq["India"] = 10 t2acq["Iran"] = -30 t2acq["Iraq"] = -32 t2acq["Israel"] = 9 t2acq["Libya"] = -45 t2acq["N-Korea"] = 26 t2acq["S-Korea"] = -45 t2acq["Pakist"] = 15 t2acq["S-Afric"] = 5 t2acq["Syria"] = -15 t2acq["UK"] = 5 t2acq["US"] = 3 t2acq["USSR"] = 4 return t2acq def get_pursue(): pursues = {} pursues["Argent"] = 1978 pursues["Austral"] = 1961 pursues["Brazil"] = 1978 pursues["China"] = 1955 pursues["Egypt"] = 1965 pursues["France"] = 1954 pursues["India"] = 1964 pursues["Iran"] = 1985 pursues["Iraq"] = 1983 pursues["Israel"] = 1960 pursues["Libya"] = 1970 pursues["N-Korea"] = 1980 pursues["S-Korea"] = 1970 pursues["Pakist"] = 1972 pursues["S-Afric"] = 1974 pursues["Syria"] = 2000 pursues["UK"] = 1947 pursues["US"] = 1939 pursues["USSR"] = 1945 return pursues def get_prolif_pe(countries, pes): prolif_pes = [] prolif_st = [] proliferants = get_prolif() for i in range(len(countries)): curr_state = countries[i] if curr_state in proliferants: prolif_pes.append(pes[i]) prolif_st.append(curr_state) return(prolif_st, prolif_pes) def get_pes(all_countries, pes, status): pursue_pes = [] pursue_st = [] if (status == "Pursue"): states = get_pursue() elif (status == "Prolif"): states = get_nws() else: return "DO YOU WANT PURSUIT OR PROLIFERANTS?" for i in range(len(all_countries)): curr_state = all_countries[i] if curr_state in states: pursue_pes.append(pes[i]) pursue_st.append(curr_state) return(pursue_st, pursue_pes) def raw_to_factor_scores(infile, n_head=1, outfile=None): from gen_fns import get_data countries, col_names, raw_data = get_data(infile, n_header = n_head, named_struct=True) factors = { "Reactor": [react2score, [raw_data['Reactors'], raw_data['Research_Reactors']]], "Mil_Iso": [alliance2iso_score, [raw_data['NonProlif_Alliance'], raw_data['Prolif_Alliance']]], "En_Repr": [enrich2score, raw_data['Enrichment']], "U_Res": [ures2score, raw_data['UReserves']], "Sci_Net": [network2score, raw_data['Scientific_Network']], "Conflict": [relations2conflict_score, [raw_data['Weapon_Status_1'], raw_data['Conflict_Relation_1'], raw_data['Weapon_Status_2'], raw_data['Conflict_Relation_2'], raw_data['Weapon_Status_3'], raw_data['Conflict_Relation_3'], raw_data['Status']]], "Auth": [polity2auth_score, raw_data['Polity_Index']], "Mil_Sp": [mil_sp2score, raw_data['Military_GDP']] } score_columns = [] i = 0 for key in factors: score_columns.append(key) fn, inputs = factors[key] scores = fn(inputs) if (i == 0): all_scores = scores else: all_scores = np.column_stack((all_scores, scores)) i+=1 header ='Country' + '\t' + 'Date'+ '\t' + 'Status' + '\t'+ ( '\t'.join(map(str,score_columns))) if (outfile != None): with open(outfile, 'wb') as f: writer = csv.writer(f) writer.writerow([header]) for row in range(len(all_scores)): cur_line = countries[row]+ '\t'+ ( str(int(raw_data['Date'][row]))+ '\t') + ( str(raw_data['Status'][row]) + '\t') + ( ('\t'.join(map(str, all_scores[row])))) writer.writerow([cur_line]) return countries, raw_data['Date'], raw_data['Status'], score_columns, all_scores def bilateral2score(npt, ws=None): stepA = 2 stepB = 4 stepC = 7 all_scores = np.ndarray(npt.size) for i in range(npt.size): score = -1 # if all agreements are with other non-nuclear states if (ws is None) or (ws[i] == 0) or (math.isnan(ws[i])): if (math.isnan(npt[i])): score = np.nan elif (npt[i] <= stepA): score = 1 elif (npt[i] <= stepB): score = 2 else: score = 3 else: if (ws[i] <= stepA): score = 6 elif (ws[i] <= stepB): score = 7 elif (ws[i] <= stepC): score = 8 else: score = 10 all_scores[i] = score return all_scores def gpi2conflict_score(gpi_array): stepA = 1.5 stepB = 2 stepC = 2.5 stepD = 3.5 all_scores = np.ndarray(gpi_array.size) for i in range(gpi_array.size): gpi_val = gpi_array[i] score = -1 if (math.isnan(gpi_val)): score = np.nan elif (gpi_val < stepA): score = 2 elif (gpi_val < stepB): score = 4 elif (gpi_val < stepC): score = 6 elif (gpi_val < stepD): score = 8 else: score = 10 all_scores[i] = score return all_scores def mil_sp2score(mil_gdp): stepA = 1 stepB = 2 stepC = 3 stepD = 5 all_scores = np.ndarray(mil_gdp.size) for i in range(mil_gdp.size): score = -1 if (math.isnan(mil_gdp[i])): score = np.nan elif (mil_gdp[i] < stepA): score = 1 elif (mil_gdp[i] < stepB): score = 2 elif (mil_gdp[i] < stepC): score = 4 elif (mil_gdp[i] < stepD): score = 7 else: score = 10 all_scores[i] = score return all_scores def react2score(all_reactors): step0 = 0.0 stepA = -4.0 stepB = -1.0 stepC = 3.0 stepD = 7.0 n_react = all_reactors[0] n_research = all_reactors[1] all_scores = np.ndarray(n_react.size) for i in range(n_react.size): score = 0 n_tot = 0 # if there are both planned (negative) and built reactors (positive) # between research and commercial, use the 'built' number if ((n_react[i] * n_research[i]) < 0): n_tot = max(n_research[i], n_react[i]) else: n_tot = n_research[i] + n_react[i] if (math.isnan(n_tot)): score = np.nan elif (n_tot == step0): score = 0 elif (n_tot <= stepA): score = 2 elif (n_tot <= stepB): score = 1 elif (n_tot <= stepC): score = 4 elif (n_tot <= stepD): score = 7 else: score = 10 all_scores[i] = 10 - abs(score) return all_scores def upsala2conflict_score(all_conflict): neutral = 5 # for neutral relationships, score is 5 stepC = 4 n_conflicts = all_conflict[0] intensity = all_conflict[1] all_scores = np.ndarray(n_conflicts.size) for i in range(n_conflicts.size): score = 0 # If intensity is (-) then conflict is a coalition, downgrade intensity # to -1. If n_conflict is (-) then an additional non-armed (tense) # conflict has been added to the data (eg Korean Armistace), (but still # may be coded as zero intensity) if ((intensity[i] < 0) or (n_conflicts[i] < 0)): n_tot = abs(n_conflicts[i]) else: n_tot = n_conflicts[i] * intensity[i] if (math.isnan(n_tot)): score = np.nan elif (n_tot <= stepC): score = neutral + n_tot else: score = 10 all_scores[i] = score return all_scores def relations2conflict_score(all_conflict): weapon_stat1 = all_conflict[0] conflict1= all_conflict[1] weapon_stat2 = all_conflict[2] conflict2= all_conflict[3] weapon_stat3 = all_conflict[4] conflict3= all_conflict[5] host_status = all_conflict[6] all_scores = np.ndarray(weapon_stat1.size) for i in range(all_scores.size): n_scores = 0 score = 0 if (np.isfinite(weapon_stat1[i])): n_scores+=1 score+= lookup_conflict_val(host_status[i], weapon_stat1[i], conflict1[i]) if (np.isfinite(weapon_stat2[i])): n_scores+=1 score+= lookup_conflict_val(host_status[i], weapon_stat2[i], conflict2[i]) if (np.isfinite(weapon_stat3[i])): n_scores+=1 score+= lookup_conflict_val(host_status[i], weapon_stat3[i], conflict3[i]) if (math.isnan(score) or (n_scores == 0)): avg_score = np.nan else: avg_score = score/n_scores all_scores[i] = avg_score return all_scores def lookup_conflict_val(statusA, statusB, relation): score_matrix = { "ally_0_0": 2, "neut_0_0": 2, "enemy_0_0": 6, "ally_0_2": 3, "neut_0_2": 4, "enemy_0_2": 8, "ally_0_3": 1, "neut_0_3": 4, "enemy_0_3": 6, "ally_2_2": 3, "neut_2_2": 4, "enemy_2_2": 9, "ally_2_3": 3, "neut_2_3": 5, "enemy_2_3": 10, "ally_3_3": 1, "neut_3_3": 3, "enemy_3_3": 5 } # string is enemy if relation is -1 reln_str = "enemy_" if (relation == 1): reln_str = "ally_" elif (relation == 0): reln_str = "neut_" first_stat = statusA second_stat = statusB #Convention - list smaller number first if (statusA > statusB): first_stat = statusB second_stat = statusA # Recode any status that is not 0,2,3 # If a country has given up its weapon program (-1) or is only # 'exploring' (1) then treat them as 'never pursued' (0) for now. # Someday these status' could be given unique conflict values in the matrix if (first_stat == 1): first_stat = 0 if (first_stat == -1): first_stat = 0 if (second_stat == 1): second_stat = 0 if (second_stat == -1): second_stat = 0 reln_str += str(int(first_stat)) + "_" + str(int(second_stat)) return score_matrix[reln_str] def network2score(sci_val): stepA = 1 stepB = 2 stepC = 3 all_scores = np.ndarray(sci_val.size) for i in range(sci_val.size): score = -1 if (math.isnan(sci_val[i])): score = 1 elif (sci_val[i] < stepA): score = 2 elif (sci_val[i] < stepB): score = 4 elif (sci_val[i] < stepC): score = 7 else: score = 10 all_scores[i] = score return all_scores def alliance2iso_score(all_alliances): np_stepA = 2 np_stepB = 4 p_stepA = 1 p_stepB = 2 p_stepC = 3 non_prolif = all_alliances[0] prolif = all_alliances[1] all_scores = np.ndarray(non_prolif.size) for i in range(non_prolif.size): score = 0 if (math.isnan(prolif[i])) and (math.isnan(non_prolif[i])): score = np.nan elif (non_prolif[i] <= np_stepA): score = 1 elif (non_prolif[i] <= np_stepB): score = 2 else: score = 3 if (not math.isnan(prolif[i])): if (prolif[i] == p_stepA): score = score + 5 elif (prolif[i] == p_stepB): score = score + 6 elif (prolif[i] >= p_stepC): score = score + 7 # Isolation is the inverse of amount of alliances all_scores[i] = 10 - score return all_scores def polity2auth_score(polity): scores = polity return scores def enrich2score(enrich): scores = enrich*10.0 return scores def ures2score(ures): scores = ures*10.0 return scores