code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
from __future__ import unicode_literals from django.core.exceptions import ObjectDoesNotExist from django.db.models import Model, NOT_PROVIDED from django.utils.encoding import smart_text def track_field(field): """ Returns whether the given field should be tracked by Auditlog. Untracked fields are many-to-many relations and relations to the Auditlog LogEntry model. :param field: The field to check. :type field: Field :return: Whether the given field should be tracked. :rtype: bool """ from auditlog.models import LogEntry # Do not track many to many relations if field.many_to_many: return False # Do not track relations to LogEntry if getattr(field, 'rel', None) is not None and field.rel.to == LogEntry: return False return True def get_fields_in_model(instance): """ Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw data. This method excludes many to many fields. :param instance: The model instance to get the fields for :type instance: Model :return: The list of fields for the given model (instance) :rtype: list """ assert isinstance(instance, Model) # Check if the Django 1.8 _meta API is available use_api = hasattr(instance._meta, 'get_fields') and callable(instance._meta.get_fields) if use_api: return [f for f in instance._meta.get_fields() if track_field(f)] return instance._meta.fields def model_instance_diff(old, new): """ Calculates the differences between two model instances. One of the instances may be ``None`` (i.e., a newly created model or deleted model). This will cause all fields with a value to have changed (from ``None``). :param old: The old state of the model instance. :type old: Model :param new: The new state of the model instance. :type new: Model :return: A dictionary with the names of the changed fields as keys and a two tuple of the old and new field values as value. :rtype: dict """ from auditlog.registry import auditlog if not(old is None or isinstance(old, Model)): raise TypeError("The supplied old instance is not a valid model instance.") if not(new is None or isinstance(new, Model)): raise TypeError("The supplied new instance is not a valid model instance.") diff = {} if old is not None and new is not None: fields = set(old._meta.fields + new._meta.fields) model_fields = auditlog.get_model_fields(new._meta.model) elif old is not None: fields = set(get_fields_in_model(old)) model_fields = auditlog.get_model_fields(old._meta.model) elif new is not None: fields = set(get_fields_in_model(new)) model_fields = auditlog.get_model_fields(new._meta.model) else: fields = set() model_fields = None # Check if fields must be filtered if model_fields and (model_fields['include_fields'] or model_fields['exclude_fields']) and fields: filtered_fields = [] if model_fields['include_fields']: filtered_fields = [field for field in fields if field.name in model_fields['include_fields']] else: filtered_fields = fields if model_fields['exclude_fields']: filtered_fields = [field for field in filtered_fields if field.name not in model_fields['exclude_fields']] fields = filtered_fields for field in fields: try: old_value = smart_text(getattr(old, field.name, None)) except ObjectDoesNotExist: old_value = field.default if field.default is not NOT_PROVIDED else None try: new_value = smart_text(getattr(new, field.name, None)) except ObjectDoesNotExist: new_value = None if old_value != new_value: diff[field.name] = (smart_text(old_value), smart_text(new_value)) if len(diff) == 0: diff = None return diff
robmagee/django-auditlog
src/auditlog/diff.py
Python
mit
4,114
# -*- coding: utf-8 -*- from typing import Text from zerver.lib.test_classes import WebhookTestCase class JiraHookTests(WebhookTestCase): STREAM_NAME = 'jira' URL_TEMPLATE = u"/api/v1/external/jira?api_key={api_key}" def test_unknown(self) -> None: url = self.build_webhook_url() result = self.client_post(url, self.get_body('unknown_v1'), stream_name="jira", content_type="application/json") self.assert_json_success(result) result = self.client_post(url, self.get_body('unknown_v2'), stream_name="jira", content_type="application/json") self.assert_json_success(result) def test_custom_stream(self) -> None: api_key = self.test_user.api_key url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,) msg = self.send_json_payload(self.test_user, url, self.get_body('created_v2'), stream_name="jira_custom", content_type="application/json") self.assertEqual(msg.topic_name(), "BUG-15: New bug with hook") self.assertEqual(msg.content, """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**: > New bug with hook""") def test_created(self) -> None: expected_subject = "BUG-15: New bug with hook" expected_message = """Leo Franchi **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**: > New bug with hook""" self.send_and_test_stream_message('created_v1', expected_subject, expected_message) self.send_and_test_stream_message('created_v2', expected_subject, expected_message) def test_created_with_unicode(self) -> None: expected_subject = u"BUG-15: New bug with à hook" expected_message = u"""Leo Franchià **created** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) priority Major, assigned to **no one**: > New bug with à hook""" self.send_and_test_stream_message('created_with_unicode_v1', expected_subject, expected_message) self.send_and_test_stream_message('created_with_unicode_v2', expected_subject, expected_message) def test_created_assignee(self) -> None: expected_subject = "TEST-4: Test Created Assignee" expected_message = """Leonardo Franchi [Administrator] **created** [TEST-4](https://zulipp.atlassian.net/browse/TEST-4) priority Major, assigned to **Leonardo Franchi [Administrator]**: > Test Created Assignee""" self.send_and_test_stream_message('created_assignee_v1', expected_subject, expected_message) self.send_and_test_stream_message('created_assignee_v2', expected_subject, expected_message) def test_commented(self) -> None: expected_subject = "BUG-15: New bug with hook" expected_message = """Leo Franchi **added comment to** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to **Othello, the Moor of Venice**): Adding a comment. Oh, what a comment it is!""" self.send_and_test_stream_message('commented_v1', expected_subject, expected_message) self.send_and_test_stream_message('commented_v2', expected_subject, expected_message) def test_comment_edited(self) -> None: expected_subject = "BUG-15: New bug with hook" expected_message = """Leo Franchi **edited comment on** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to **Othello, the Moor of Venice**): Adding a comment. Oh, what a comment it is!""" self.send_and_test_stream_message('comment_edited_v2', expected_subject, expected_message) def test_comment_deleted(self) -> None: expected_subject = "TOM-1: New Issue" expected_message = "Tomasz Kolek **deleted comment from** [TOM-1](https://zuliptomek.atlassian.net/browse/TOM-1) (assigned to **kolaszek@go2.pl**)" self.send_and_test_stream_message('comment_deleted_v2', expected_subject, expected_message) def test_commented_markup(self) -> None: expected_subject = "TEST-7: Testing of rich text" expected_message = """Leonardo Franchi [Administrator] **added comment to** [TEST-7](https://zulipp.atlassian.net/browse/TEST-7):\n\n\nThis is a comment that likes to **exercise** a lot of _different_ `conventions` that `jira uses`.\r\n\r\n~~~\n\r\nthis code is not highlighted, but monospaced\r\n\n~~~\r\n\r\n~~~\n\r\ndef python():\r\n print "likes to be formatted"\r\n\n~~~\r\n\r\n[http://www.google.com](http://www.google.com) is a bare link, and [Google](http://www.google.com) is given a title.\r\n\r\nThanks!\r\n\r\n~~~ quote\n\r\nSomeone said somewhere\r\n\n~~~""" self.send_and_test_stream_message('commented_markup_v1', expected_subject, expected_message) self.send_and_test_stream_message('commented_markup_v2', expected_subject, expected_message) def test_deleted(self) -> None: expected_subject = "BUG-15: New bug with hook" expected_message = "Leo Franchi **deleted** [BUG-15](http://lfranchi.com:8080/browse/BUG-15)!" self.send_and_test_stream_message('deleted_v1', expected_subject, expected_message) self.send_and_test_stream_message('deleted_v2', expected_subject, expected_message) def test_reassigned(self) -> None: expected_subject = "BUG-15: New bug with hook" expected_message = """Leo Franchi **updated** [BUG-15](http://lfranchi.com:8080/browse/BUG-15) (assigned to **Othello, the Moor of Venice**): * Changed assignee to **Othello, the Moor of Venice**""" self.send_and_test_stream_message('reassigned_v1', expected_subject, expected_message) self.send_and_test_stream_message('reassigned_v2', expected_subject, expected_message) def test_priority_updated(self) -> None: expected_subject = "TEST-1: Fix That" expected_message = """Leonardo Franchi [Administrator] **updated** [TEST-1](https://zulipp.atlassian.net/browse/TEST-1) (assigned to **leo@zulip.com**): * Changed priority from **Critical** to **Major**""" self.send_and_test_stream_message('updated_priority_v1', expected_subject, expected_message) self.send_and_test_stream_message('updated_priority_v2', expected_subject, expected_message) def test_status_changed(self) -> None: expected_subject = "TEST-1: Fix That" expected_message = """Leonardo Franchi [Administrator] **updated** [TEST-1](https://zulipp.atlassian.net/browse/TEST-1): * Changed status from **To Do** to **In Progress**""" self.send_and_test_stream_message('change_status_v1', expected_subject, expected_message) self.send_and_test_stream_message('change_status_v2', expected_subject, expected_message) def get_body(self, fixture_name: Text) -> Text: return self.fixture_data('jira', fixture_name)
mahim97/zulip
zerver/webhooks/jira/tests.py
Python
apache-2.0
7,146
import os import urllib import urllib2 import cookielib users_email_address = "username@gmail.com" users_password = "password" my_app_name = "wattcher" target_authenticated_google_app_engine_uri = 'http://wattcher.appspot.com/report' # we use a cookie to authenticate with Google App Engine # by registering a cookie handler here, this will automatically store the # cookie returned when we use urllib2 to open http://currentcost.appspot.com/_ah/login cookiejar = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar)) urllib2.install_opener(opener) # # get an AuthToken from Google accounts # auth_uri = 'https://www.google.com/accounts/ClientLogin' authreq_data = urllib.urlencode({ "Email": users_email_address, "Passwd": users_password, "service": "ah", "source": my_app_name, "accountType": "HOSTED_OR_GOOGLE" }) auth_req = urllib2.Request(auth_uri, data=authreq_data) auth_resp = urllib2.urlopen(auth_req) auth_resp_body = auth_resp.read() # auth response includes several fields - we're interested in # the bit after Auth= auth_resp_dict = dict(x.split("=") for x in auth_resp_body.split("\n") if x) authtoken = auth_resp_dict["Auth"] # # get a cookie # # the call to request a cookie will also automatically redirect us to the page # that we want to go to # the cookie jar will automatically provide the cookie when we reach the # redirected location def sendreport(sensornum, watt): # this is where I actually want to go to serv_uri = target_authenticated_google_app_engine_uri + "?watt="+str(watt)+"&sensornum="+str(sensornum) serv_args = {} serv_args['continue'] = serv_uri serv_args['auth'] = authtoken full_serv_uri = "http://wattcher.appspot.com/_ah/login?%s" % (urllib.urlencode(serv_args)) serv_req = urllib2.Request(full_serv_uri) serv_resp = urllib2.urlopen(serv_req) serv_resp_body = serv_resp.read() # serv_resp_body should contain the contents of the # target_authenticated_google_app_engine_uri page - as we will have been # redirected to that page automatically # # to prove this, I'm just gonna print it out print serv_resp_body
ldm5180/hammerhead
hab/killawatt/appengineauth.py
Python
lgpl-2.1
2,358
import base64 import os # Web app flask_host = os.getenv('FLASK_HOST', 'localhost') flask_port = int(os.getenv('FLASK_PORT', 5000)) try: flask_secret_key = base64.b64decode(os.getenv('FLASK_SECRET_KEY', None)) except TypeError: raise Exception('FLASK_SECRET_KEY must be base64 encoded.') flask_debug_mode = bool(int(os.getenv('FLASK_DEBUG_MODE', 1))) # Persona persona_verifier = os.getenv('PERSONA_VERIFIER', 'https://verifier.login.persona.org/verify') persona_audience = os.getenv('PERSONA_AUDIENCE', 'https://{0}:{1}'.format(flask_host, flask_port)) # Mail email_account = os.getenv('EMAIL_ACCOUNT', 'automation@mozilla.com') email_password = os.getenv('EMAIL_PASSWORD', None) email_from = os.getenv('EMAIL_FROM', 'Mozilla A-Team <auto-tools@mozilla.com>') email_smtp_server = os.getenv('EMAIL_SMTP_SERVER', 'smtp.mozilla.org') email_smtp_port = int(os.getenv('EMAIL_SMTP_PORT', 25)) email_ssl = bool(int(os.getenv('EMAIL_SSL', 0))) # Database database_url = os.getenv('DATABASE_URL', 'postgresql://root@localhost/pulseguardian') pool_recycle_interval = int(os.getenv('POOL_RECYCLE_INTERVAL', 60)) # RabbitMQ # Management API URL. rabbit_management_url = os.getenv('RABBIT_MANAGEMENT_URL', 'http://localhost:15672/api/') rabbit_vhost = os.getenv('RABBIT_VHOST', '/') # RabbitMQ user with administrator privilege. rabbit_user = os.getenv('RABBIT_USER', 'guest') # Password of the RabbitMQ user. rabbit_password = os.getenv('RABBIT_PASSWORD', 'guest') # PulseGuardian warn_queue_size = int(os.getenv('WARN_QUEUE_SIZE', 2000)) del_queue_size = int(os.getenv('DEL_QUEUE_SIZE', 8000)) polling_interval = int(os.getenv('POLLING_INTERVAL', 5)) polling_max_interval = int(os.getenv('POLLING_MAX_INTERVAL', 300)) fake_account = os.getenv('FAKE_ACCOUNT', None) # Logging guardian_log_path = os.getenv('GUARDIAN_LOG_PATH', None) webapp_log_path = os.getenv('WEBAPP_LOG_PATH', None) debug_logs = bool(int(os.getenv('DEBUG', 0))) # Only used if at least one log path is specified above. max_log_size = int(os.getenv('MAX_LOG_SIZE', 20480)) backup_count = int(os.getenv('BACKUP_COUNT', 5))
mccricardo/pulseguardian
pulseguardian/config.py
Python
mpl-2.0
2,217
from django.forms import Textarea from django.utils.safestring import mark_safe from .base import WidgetTest class TextareaTest(WidgetTest): widget = Textarea() def test_render(self): self.check_html(self.widget, 'msg', 'value', html=( '<textarea rows="10" cols="40" name="msg">value</textarea>' )) def test_render_required(self): widget = Textarea() widget.is_required = True self.check_html(widget, 'msg', 'value', html='<textarea rows="10" cols="40" name="msg">value</textarea>') def test_render_empty(self): self.check_html(self.widget, 'msg', '', html='<textarea rows="10" cols="40" name="msg"></textarea>') def test_render_none(self): self.check_html(self.widget, 'msg', None, html='<textarea rows="10" cols="40" name="msg"></textarea>') def test_escaping(self): self.check_html(self.widget, 'msg', 'some "quoted" & ampersanded value', html=( '<textarea rows="10" cols="40" name="msg">some &quot;quoted&quot; &amp; ampersanded value</textarea>' )) def test_mark_safe(self): self.check_html(self.widget, 'msg', mark_safe('pre &quot;quoted&quot; value'), html=( '<textarea rows="10" cols="40" name="msg">pre &quot;quoted&quot; value</textarea>' ))
Beauhurst/django
tests/forms_tests/widget_tests/test_textarea.py
Python
bsd-3-clause
1,313
#!/usr/bin/env python # -*- coding: utf-8 -*- # ############################################################################## from distutils.core import setup, Extension ############################################################################## VERSION = '1.0' EXTRA_COMPILE_ARGS = [ '-Wall', '-fno-strict-aliasing' ] PcapLibExt = Extension( 'ether/cether/pcaplib', sources=['ether/cether/pcaplib.c'], libraries=['crypto', 'pcap'] ) setup_args = dict( name='ether', version=VERSION, description='Description', long_description='Long Description', license='GNU General Public License v3', author='Ivan Alechin', author_email='alexin.ivan@gmail.com', url='http://vvs.ru', classifiers=[ 'Development Status :: 1 - Alpha', 'Environment :: GUI', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Natural Language :: English, Russian', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Security' ], platforms=['any'], packages=['ether'], py_modules=[], ext_modules=[PcapLibExt], options={'install': {'optimize': 1}}, install_requires=[ 'graphviz', 'scapy', 'networkx', 'netaddr', ], ) if __name__ == '__main__': setup(**setup_args)
alexin-ivan/ether
setup.py
Python
mit
1,363
# -*- test-case-name: twisted.runner.test.test_procmon -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Support for starting, monitoring, and restarting child process. """ from twisted.python import log from twisted.internet import error, protocol, reactor as _reactor from twisted.application import service from twisted.protocols import basic class DummyTransport: disconnecting = 0 transport = DummyTransport() class LineLogger(basic.LineReceiver): tag = None delimiter = '\n' def lineReceived(self, line): log.msg('[%s] %s' % (self.tag, line)) class LoggingProtocol(protocol.ProcessProtocol): service = None name = None empty = 1 def connectionMade(self): self.output = LineLogger() self.output.tag = self.name self.output.makeConnection(transport) def outReceived(self, data): self.output.dataReceived(data) self.empty = data[-1] == '\n' errReceived = outReceived def processEnded(self, reason): if not self.empty: self.output.dataReceived('\n') self.service.connectionLost(self.name) class ProcessMonitor(service.Service): """ ProcessMonitor runs processes, monitors their progress, and restarts them when they die. The ProcessMonitor will not attempt to restart a process that appears to die instantly -- with each "instant" death (less than 1 second, by default), it will delay approximately twice as long before restarting it. A successful run will reset the counter. The primary interface is L{addProcess} and L{removeProcess}. When the service is running (that is, when the application it is attached to is running), adding a process automatically starts it. Each process has a name. This name string must uniquely identify the process. In particular, attempting to add two processes with the same name will result in a C{KeyError}. @type threshold: C{float} @ivar threshold: How long a process has to live before the death is considered instant, in seconds. The default value is 1 second. @type killTime: C{float} @ivar killTime: How long a process being killed has to get its affairs in order before it gets killed with an unmaskable signal. The default value is 5 seconds. @type minRestartDelay: C{float} @ivar minRestartDelay: The minimum time (in seconds) to wait before attempting to restart a process. Default 1s. @type maxRestartDelay: C{float} @ivar maxRestartDelay: The maximum time (in seconds) to wait before attempting to restart a process. Default 3600s (1h). @type _reactor: L{IReactorProcess} provider @ivar _reactor: A provider of L{IReactorProcess} and L{IReactorTime} which will be used to spawn processes and register delayed calls. """ threshold = 1 killTime = 5 minRestartDelay = 1 maxRestartDelay = 3600 def __init__(self, reactor=_reactor): self._reactor = reactor self.processes = {} self.protocols = {} self.delay = {} self.timeStarted = {} self.murder = {} self.restart = {} def __getstate__(self): dct = service.Service.__getstate__(self) del dct['_reactor'] dct['protocols'] = {} dct['delay'] = {} dct['timeStarted'] = {} dct['murder'] = {} dct['restart'] = {} return dct def addProcess(self, name, args, uid=None, gid=None, env={}): """ Add a new monitored process and start it immediately if the L{ProcessMonitor} service is running. Note that args are passed to the system call, not to the shell. If running the shell is desired, the common idiom is to use C{ProcessMonitor.addProcess("name", ['/bin/sh', '-c', shell_script])} @param name: A name for this process. This value must be unique across all processes added to this monitor. @type name: C{str} @param args: The argv sequence for the process to launch. @param uid: The user ID to use to run the process. If L{None}, the current UID is used. @type uid: C{int} @param gid: The group ID to use to run the process. If L{None}, the current GID is used. @type uid: C{int} @param env: The environment to give to the launched process. See L{IReactorProcess.spawnProcess}'s C{env} parameter. @type env: C{dict} @raises: C{KeyError} if a process with the given name already exists """ if name in self.processes: raise KeyError("remove %s first" % (name,)) self.processes[name] = args, uid, gid, env self.delay[name] = self.minRestartDelay if self.running: self.startProcess(name) def removeProcess(self, name): """ Stop the named process and remove it from the list of monitored processes. @type name: C{str} @param name: A string that uniquely identifies the process. """ self.stopProcess(name) del self.processes[name] def startService(self): """ Start all monitored processes. """ service.Service.startService(self) for name in self.processes: self.startProcess(name) def stopService(self): """ Stop all monitored processes and cancel all scheduled process restarts. """ service.Service.stopService(self) # Cancel any outstanding restarts for name, delayedCall in self.restart.items(): if delayedCall.active(): delayedCall.cancel() for name in self.processes: self.stopProcess(name) def connectionLost(self, name): """ Called when a monitored processes exits. If L{service.IService.running} is L{True} (ie the service is started), the process will be restarted. If the process had been running for more than L{ProcessMonitor.threshold} seconds it will be restarted immediately. If the process had been running for less than L{ProcessMonitor.threshold} seconds, the restart will be delayed and each time the process dies before the configured threshold, the restart delay will be doubled - up to a maximum delay of maxRestartDelay sec. @type name: C{str} @param name: A string that uniquely identifies the process which exited. """ # Cancel the scheduled _forceStopProcess function if the process # dies naturally if name in self.murder: if self.murder[name].active(): self.murder[name].cancel() del self.murder[name] del self.protocols[name] if self._reactor.seconds() - self.timeStarted[name] < self.threshold: # The process died too fast - backoff nextDelay = self.delay[name] self.delay[name] = min(self.delay[name] * 2, self.maxRestartDelay) else: # Process had been running for a significant amount of time # restart immediately nextDelay = 0 self.delay[name] = self.minRestartDelay # Schedule a process restart if the service is running if self.running and name in self.processes: self.restart[name] = self._reactor.callLater(nextDelay, self.startProcess, name) def startProcess(self, name): """ @param name: The name of the process to be started """ # If a protocol instance already exists, it means the process is # already running if name in self.protocols: return args, uid, gid, env = self.processes[name] proto = LoggingProtocol() proto.service = self proto.name = name self.protocols[name] = proto self.timeStarted[name] = self._reactor.seconds() self._reactor.spawnProcess(proto, args[0], args, uid=uid, gid=gid, env=env) def _forceStopProcess(self, proc): """ @param proc: An L{IProcessTransport} provider """ try: proc.signalProcess('KILL') except error.ProcessExitedAlready: pass def stopProcess(self, name): """ @param name: The name of the process to be stopped """ if name not in self.processes: raise KeyError('Unrecognized process name: %s' % (name,)) proto = self.protocols.get(name, None) if proto is not None: proc = proto.transport try: proc.signalProcess('TERM') except error.ProcessExitedAlready: pass else: self.murder[name] = self._reactor.callLater( self.killTime, self._forceStopProcess, proc) def restartAll(self): """ Restart all processes. This is useful for third party management services to allow a user to restart servers because of an outside change in circumstances -- for example, a new version of a library is installed. """ for name in self.processes: self.stopProcess(name) def __repr__(self): l = [] for name, proc in self.processes.items(): uidgid = '' if proc[1] is not None: uidgid = str(proc[1]) if proc[2] is not None: uidgid += ':'+str(proc[2]) if uidgid: uidgid = '(' + uidgid + ')' l.append('%r%s: %r' % (name, uidgid, proc[0])) return ('<' + self.__class__.__name__ + ' ' + ' '.join(l) + '>')
Tokyo-Buffalo/tokyosouth
env/lib/python3.6/site-packages/twisted/runner/procmon.py
Python
mit
10,089
from tools.load import LoadMatrix lm=LoadMatrix() traindat = lm.load_numbers('../data/fm_train_real.dat') testdat = lm.load_numbers('../data/fm_test_real.dat') label_traindat = lm.load_labels('../data/label_train_multiclass.dat') parameter_list = [[traindat,testdat,label_traindat],[traindat,testdat,label_traindat]] def classifier_conjugateindex_modular (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat): from shogun.Features import RealFeatures, MulticlassLabels from shogun.Classifier import ConjugateIndex feats_train = RealFeatures(fm_train_real) feats_test = RealFeatures(fm_test_real) labels = MulticlassLabels(label_train_multiclass) ci = ConjugateIndex(feats_train, labels) ci.train() res = ci.apply(feats_test).get_labels() return ci, res if __name__=='__main__': print('ConjugateIndex') classifier_conjugateindex_modular(*parameter_list[0])
ratschlab/ASP
examples/undocumented/python_modular/classifier_conjugateindex_modular.py
Python
gpl-2.0
905
""" The history module gathers all functionalities related to the organization of time series data that are used by the simulator and by the forecasters. """ from .database import Database __all__ = [ 'Database' ]
bcornelusse/microgrid-bench
microgrid/history/__init__.py
Python
bsd-2-clause
220
from django.conf import settings from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.contrib import admin from tastypie.api import Api from accounts.api import UserResource, GroupResource, ProfileResource, ObjectProfileLinkResource # from alambic.api import RoomResource from bucket.api import BucketResource, BucketFileResource, BucketTagResource, BucketFileCommentResource from commons.api.usage import UsageResource, PertinenceResource from commons.api.prestation import PrestationResource, PrestationModuleResource, SelectedModulesResource # from deal.api import DealResource from flipflop.api import BoardResource, ListResource, CardResource, TaskResource, LabelResource, CardCommentResource from graffiti.api import TagResource, TaggedItemResource from projects.api import ProjectResource, ProjectProgressRangeResource, ProjectProgressResource from projectsheet.api import (ProjectSheetResource, ProjectSheetTemplateResource, ProjectSheetQuestionAnswerResource, ProjectSheetQuestionResource, QuestionChoiceResource) from projecttool.api import ProjectToolResource from scout.api import (MapResource, TileLayerResource, DataLayerResource, MarkerResource, MarkerCategoryResource, PostalAddressResource, PlaceResource) from ucomment.api import CommentResource from unisson.api import IngredientResource, EvaluationIngredientResource admin.autodiscover() # Build API api = Api(api_name='v0') # Scout api.register(MapResource()) api.register(TileLayerResource()) api.register(MarkerResource()) api.register(DataLayerResource()) api.register(MarkerCategoryResource()) api.register(PostalAddressResource()) api.register(PlaceResource()) # Auth api.register(UserResource()) api.register(GroupResource()) api.register(ProfileResource()) api.register(ObjectProfileLinkResource()) # Flipflop (Kanban) api.register(BoardResource()) api.register(ListResource()) api.register(CardResource()) api.register(TaskResource()) api.register(CardCommentResource()) api.register(LabelResource()) # Bucket api.register(BucketResource()) api.register(BucketTagResource()) api.register(BucketFileResource()) api.register(BucketFileCommentResource()) # Projects api.register(ProjectResource()) api.register(ProjectProgressResource()) api.register(ProjectProgressRangeResource()) # Project Sheets api.register(ProjectSheetResource()) api.register(ProjectSheetTemplateResource()) api.register(ProjectSheetQuestionAnswerResource()) api.register(ProjectSheetQuestionResource()) api.register(QuestionChoiceResource()) # Projects Tools api.register(ProjectToolResource()) # Commons api.register(UsageResource()) api.register(PertinenceResource()) # Unisson api.register(IngredientResource()) api.register(EvaluationIngredientResource()) # deal # api.register(DealResource()) # Prestation api.register(PrestationResource()) api.register(PrestationModuleResource()) api.register(SelectedModulesResource()) # Graffiti api.register(TagResource()) api.register(TaggedItemResource()) # ucomment api.register(CommentResource()) # Alambic # api.register(RoomResource()) urlpatterns = patterns('', url(r'^admin/', include(admin.site.urls)), url(r'^api/', include(api.urls)), url(r'^bucket/', include('bucket.urls')) ) if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
CommonsDev/dataserver
dataserver/urls.py
Python
agpl-3.0
3,425
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ['UndoLogWdg'] from pyasm.common import Date, Xml, Common, Environment from pyasm.command import Command, CommandExitException from pyasm.search import TransactionLog, SearchType, Search from pyasm.web import DivWdg, Table, SpanWdg, WebContainer from pyasm.widget import FilterSelectWdg, IconRefreshWdg, CheckboxWdg, DateSelectWdg, DateTimeWdg from pyasm.biz import Project from pyasm.prod.web import DateFilterWdg class UndoLogWdg(DivWdg): def __init__(my, is_refresh=False): super(UndoLogWdg,my).__init__() my.all_users_flag = False my.all_namespaces_flag = False my.add_class("spt_panel") my.add_attr("spt_class_name", Common.get_full_class_name(my) ) def set_all_namespaces(my, flag=True): my.all_namespaces_flag = flag def set_all_users(my, flag=True): my.all_users_flag = flag def set_admin(my): my.set_all_namespaces() my.set_all_users() def get_display(my): #WebContainer.register_cmd("pyasm.admin.UndoLogCbk") # add a time filter div = DivWdg() div.add_color('background','background', -10) div.add_color('color','color') div.add_style("padding: 15px") div.add_border() project = '' # add a project filter if my.all_namespaces_flag: span = SpanWdg("Project: ") span.add_color('color','color') project_select = FilterSelectWdg("project") project_select.add_empty_option(label="-- All Projects --") project_select.set_option("query", "sthpw/project|code|title") span.add(project_select) div.add(span) project = project_select.get_value() else: from pyasm.biz import Project project = Project.get_global_project_code() # add a time filter select = DateFilterWdg("undo_time_filter", label="Show Transaction Log From: ") select.set_label(["1 Hour Ago", "Today", "1 Day Ago", "1 Week Ago", "1 Month Ago"]) select.set_value(["1 Hour", "today", "1 Day", "1 Week", "1 Month"]) select.set_option("default", "1 Hour") div.add(select) time_interval = select.get_value() my.add(div) if not my.all_users_flag: user = Environment.get_user_name() else: span = SpanWdg(css="med") span.add("User: ") user_select = FilterSelectWdg("user") user_select.set_option("query", "sthpw/login|login|login") user_select.add_empty_option() span.add(user_select) div.add(span) user = user_select.get_value() transaction_log = TransactionLog.get( user_name=user, \ namespace=project, time_interval=time_interval) from tactic.ui.panel import FastTableLayoutWdg, TableLayoutWdg table = FastTableLayoutWdg(search_type="sthpw/transaction_log", view="table", show_shelf='false', show_select="false") #table = TableLayoutWdg(search_type="sthpw/transaction_log", view="table", mode='simple', show_row_select="false") table.set_sobjects(transaction_log) #table.set_refresh_mode("table") my.add(table) return super(UndoLogWdg, my).get_display() # TODO: this code is commented out until such a time as a better solution is # found. It is hightly questionable whether it is desireable to allow users # to undo a previous command outside the order of the stack. This may leave # the database in an unstable state (deleting sobjects that have dependencies # on them) """ class UndoLogCbk(Command): def get_title(my): return "Undo Log Command" def check(my): web = WebContainer.get_web() return True def execute(my): web = WebContainer.get_web() transaction_ids = web.get_form_values("transaction_log_id") if not transaction_ids: return search = Search(TransactionLog.SEARCH_TYPE) search.add_filters("id", transaction_ids) transactions = search.get_sobjects() # start with just the first one transaction = transactions[0] transaction.undo() my.description = "Undo #%s: %s" % (transaction.get_id(), transaction.get_value("description") ) """
sadanandb/pmt
src/tactic/ui/app/undo_log_wdg.py
Python
epl-1.0
4,731
# -*- coding: utf-8 -*- from lektor.pluginsystem import Plugin from markupsafe import Markup SCRIPT = ''' <div id="ga-script"></div> <script type="text/javascript"> (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); ga('create', '%(GOOGLE_ANALYTICS_ID)s', '%(GOOGLE_ANALYTICS_PROPERTY)s'); ga('send', 'pageview'); </script> ''' class GoogleAnalyticsPlugin(Plugin): name = u'Google Analytics' description = u'adds support for Google Analytics to Lektor CMS' def on_setup_env(self, **extra): ga_property = self.get_config().get('GOOGLE_ANALYTICS_PROPERTY', 'auto') google_analytics_id = self.get_config().get('GOOGLE_ANALYTICS_ID') if google_analytics_id is None: raise RuntimeError('GOOGLE_ANALYTICS_ID is not configured. ' 'Please configure it in ' '`./configs/google-analytics.ini` file') def google_analytics(): return Markup(SCRIPT % {'GOOGLE_ANALYTICS_ID': google_analytics_id, 'GOOGLE_ANALYTICS_PROPERTY': ga_property}) self.env.jinja_env.globals['generate_google_analytics'] = google_analytics
kmonsoor/lektor-google-analytics
lektor_google_analytics.py
Python
bsd-3-clause
1,491
import json import os import urlparse import re import webdriver import mozlog from tests.support.asserts import assert_error from tests.support.http_request import HTTPRequest from tests.support import merge_dictionaries default_host = "http://127.0.0.1" default_port = "4444" logger = mozlog.get_default_logger() def ignore_exceptions(f): def inner(*args, **kwargs): try: return f(*args, **kwargs) except webdriver.error.WebDriverException as e: logger.warning("Ignored exception %s" % e) inner.__name__ = f.__name__ return inner @ignore_exceptions def _ensure_valid_window(session): """If current window is not open anymore, ensure to have a valid one selected. """ try: session.window_handle except webdriver.NoSuchWindowException: session.window_handle = session.handles[0] @ignore_exceptions def _dismiss_user_prompts(session): """Dismisses any open user prompts in windows.""" current_window = session.window_handle for window in _windows(session): session.window_handle = window try: session.alert.dismiss() except webdriver.NoSuchAlertException: pass session.window_handle = current_window @ignore_exceptions def _restore_window_state(session): """Reset window to an acceptable size, bringing it out of maximized, minimized, or fullscreened state """ session.window.size = (800, 600) @ignore_exceptions def _restore_windows(session): """Closes superfluous windows opened by the test without ending the session implicitly by closing the last window. """ current_window = session.window_handle for window in _windows(session, exclude=[current_window]): session.window_handle = window if len(session.handles) > 1: session.close() session.window_handle = current_window def _switch_to_top_level_browsing_context(session): """If the current browsing context selected by WebDriver is a `<frame>` or an `<iframe>`, switch it back to the top-level browsing context. """ session.switch_frame(None) def _windows(session, exclude=None): """Set of window handles, filtered by an `exclude` list if provided. """ if exclude is None: exclude = [] wins = [w for w in session.handles if w not in exclude] return set(wins) def create_frame(session): """Create an `iframe` element in the current browsing context and insert it into the document. Return a reference to the newly-created element.""" def create_frame(): append = """ var frame = document.createElement('iframe'); document.body.appendChild(frame); return frame; """ return session.execute_script(append) return create_frame def create_window(session): """Open new window and return the window handle.""" def create_window(): windows_before = session.handles name = session.execute_script("window.open()") assert len(session.handles) == len(windows_before) + 1 new_windows = list(set(session.handles) - set(windows_before)) return new_windows.pop() return create_window def http(configuration): return HTTPRequest(configuration["host"], configuration["port"]) def server_config(): return json.loads(os.environ.get("WD_SERVER_CONFIG")) def configuration(): host = os.environ.get("WD_HOST", default_host) port = int(os.environ.get("WD_PORT", default_port)) capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}")) return { "host": host, "port": port, "capabilities": capabilities } _current_session = None def session(configuration, request): """Create and start a session for a test that does not itself test session creation. By default the session will stay open after each test, but we always try to start a new one and assume that if that fails there is already a valid session. This makes it possible to recover from some errors that might leave the session in a bad state, but does not demand that we start a new session per test.""" global _current_session if _current_session is None: _current_session = webdriver.Session(configuration["host"], configuration["port"], capabilities={"alwaysMatch": configuration["capabilities"]}) try: _current_session.start() except webdriver.error.SessionNotCreatedException: if not _current_session.session_id: raise # finalisers are popped off a stack, # making their ordering reverse request.addfinalizer(lambda: _switch_to_top_level_browsing_context(_current_session)) request.addfinalizer(lambda: _restore_window_state(_current_session)) request.addfinalizer(lambda: _restore_windows(_current_session)) request.addfinalizer(lambda: _dismiss_user_prompts(_current_session)) request.addfinalizer(lambda: _ensure_valid_window(_current_session)) return _current_session def new_session(configuration, request): """Return a factory function that will attempt to start a session with a given body. This is intended for tests that are themselves testing new session creation, and the session created is closed at the end of the test.""" def end(): global _current_session if _current_session is not None and _current_session.session_id: _current_session.end() _current_session = None def create_session(body): global _current_session _session = webdriver.Session(configuration["host"], configuration["port"], capabilities=None) # TODO: merge in some capabilities from the confguration capabilities # since these might be needed to start the browser value = _session.send_command("POST", "session", body=body) # Don't set the global session until we are sure this succeeded _current_session = _session _session.session_id = value["sessionId"] return value, _current_session end() request.addfinalizer(end) return create_session def url(server_config): def inner(path, protocol="http", query="", fragment=""): port = server_config["ports"][protocol][0] host = "%s:%s" % (server_config["host"], port) return urlparse.urlunsplit((protocol, host, path, query, fragment)) inner.__name__ = "url" return inner def create_dialog(session): """Create a dialog (one of "alert", "prompt", or "confirm") and provide a function to validate that the dialog has been "handled" (either accepted or dismissed) by returning some value.""" def create_dialog(dialog_type, text=None, result_var=None): assert dialog_type in ("alert", "confirm", "prompt"), ( "Invalid dialog type: '%s'" % dialog_type) if text is None: text = "" assert isinstance(text, basestring), "`text` parameter must be a string" if result_var is None: result_var = "__WEBDRIVER" assert re.search(r"^[_$a-z$][_$a-z0-9]*$", result_var, re.IGNORECASE), ( 'The `result_var` must be a valid JavaScript identifier') # Script completion and modal summoning are scheduled on two separate # turns of the event loop to ensure that both occur regardless of how # the user agent manages script execution. spawn = """ var done = arguments[0]; setTimeout(done, 0); setTimeout(function() {{ window.{0} = window.{1}("{2}"); }}, 0); """.format(result_var, dialog_type, text) session.send_session_command("POST", "execute/async", {"script": spawn, "args": []}) return create_dialog def clear_all_cookies(session): """Removes all cookies associated with the current active document""" session.transport.send("DELETE", "session/%s/cookie" % session.session_id)
n0max/servo
tests/wpt/web-platform-tests/webdriver/tests/support/fixtures.py
Python
mpl-2.0
8,260
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-05-03 13:54 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('news', '0030_auto_20160503_1235'), ] operations = [ migrations.CreateModel( name='TwitterStream', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('started', models.DateTimeField(auto_now=True)), ('stopped', models.DateTimeField()), ('filter_keyword', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='news.FilterKeyword')), ('filter_location', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='news.FilterLocation')), ], ), migrations.RemoveField( model_name='tweet', name='filter_keyword', ), migrations.RemoveField( model_name='tweet', name='filter_location', ), migrations.AddField( model_name='tweet', name='stream', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='news.TwitterStream'), ), ]
kwameboame/newsdex
news/migrations/0031_auto_20160503_1354.py
Python
bsd-2-clause
1,422
# -*- coding: utf-8 -*- # # JSONBOT documentation build configuration file, created by # sphinx-quickstart on Thu Jul 2 23:24:35 2009. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If your extensions (or modules documented by autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) sys.path.insert(0, os.path.abspath('..' + os.sep + 'jsb')) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'drivers'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'drivers', 'gae'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'utils'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'utils', 'gae'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'contrib'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'lib','gae'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'plugs','core'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'plugs','common'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'plugs','socket'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'plugs','wave'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'plugs','gae'))) #sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'upload'))) #sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'upload', 'waveapi'))) #sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'upload', 'webapp2'))) sys.path.insert(0, os.path.abspath(os.path.join('..', 'jsb', 'data'))) sys.path.insert(0, os.path.abspath(os.path.expanduser("~") + os.sep + "google_appengine")) sys.path.insert(0, os.path.abspath(os.path.expanduser("~") + os.sep + "google_appengine" + os.sep +'google')) sys.path.insert(0, os.path.abspath(os.path.expanduser("~") + os.sep + "google_appengine" + os.sep +'google' + os.sep + "appengine")) #print sys.path # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. #extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx'] extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest'] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'JSONBOT' copyright = u'2010,2011.2012 Bart Thate' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.84.4' # The full version, including alpha/beta/rc tags. release = '0.84.4 RELEASE' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = ['jsbindex', 'lindex', 'uindex', 'plugsdocindex', 'dindex'] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['.build', 'jsbnest', 'docs', '.hg', 'files'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The style sheet to use for HTML and HTML Help pages. A file of that name # must exist either in Sphinx' static/ path, or in one of the custom paths # given in html_static_path. html_style = 'docs.css' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = "<project> <release>" # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = "<release>" # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static', '.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {'docs/baseplugs/': '*', 'docs/gozerplugs/': '*'} # If false, no module index is generated. html_use_modindex = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. html_split_index = True # If true, the reST sources are included in the HTML build as _sources/<name>. html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'JSONBOTdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'JSONBOT.tex', ur'JSONBOT Documentation', ur'Bart Thate', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
Petraea/jsonbot
docs/conf.py
Python
mit
8,088
#!/usr/bin/env python # Copyright (C) 2014: # Gabes Jean, naparuba@gmail.com import traceback from opsbro_test import * from opsbro.evaluater import evaluater class TestEvaluater(OpsBroTest): def setUp(self): pass def test_evaluator(self): rules = [ {'rule': '1+1', 'expected': 2}, {'rule': '2-1', 'expected': 1}, {'rule': '10*10', 'expected': 100}, {'rule': '100/10', 'expected': 10}, {'rule': '2**3', 'expected': 8}, {'rule': '16^4', 'expected': 20}, {'rule': '"azerty"', 'expected': "azerty"}, {'rule': '("a"=="a")', 'expected': True}, {'rule': '("a"!="b")', 'expected': True}, {'rule': '10 > 5', 'expected': True}, {'rule': '10 < 5', 'expected': False}, {'rule': '10 >= 10', 'expected': True}, {'rule': '10 <= 10', 'expected': True}, {'rule': '13 % 2', 'expected': 1}, {'rule': '(1 == 1) and (2 == 3)', 'expected': False}, {'rule': '(1 == 1) or (2 == 3)', 'expected': True}, {'rule': '10 | 3', 'expected': 11}, {'rule': '10 ^ 3', 'expected': 9}, {'rule': 'True and False', 'expected': False}, {'rule': 'True == False', 'expected': False}, {'rule': 'True != False', 'expected': True}, {'rule': 'True and not False', 'expected': True}, {'rule': 'not ("a"=="a")', 'expected': False}, {'rule': '{"k":"v"}', 'expected': {'k': 'v'}}, {'rule': '(1, 2, 3)', 'expected': (1, 2, 3)}, {'rule': '(1, 2,3) == (1,2,3)', 'expected': True}, {'rule': '"PI %.2f" % 3.14', 'expected': "PI 3.14"}, # Dicts {'rule': '{"k":"v"}["k"]', 'expected': 'v'}, {'rule': '"k" in {"k":"v"}', 'expected': True}, {'rule': 'list({"k":"v"}.values())', 'expected': ["v"]}, # Math functions {'rule': 'min([1,2,3])', 'expected': 1}, {'rule': 'max([1,2,3])', 'expected': 3}, {'rule': 'abs(-1)', 'expected': 1}, {'rule': 'sum([1,2,3])', 'expected': 6}, # List & strings functions {'rule': '"v2" in ["v1", "v2"]', 'expected': True}, {'rule': 'sorted(["v2", "v1"])', 'expected': ["v1", "v2"]}, {'rule': 'len([1,2,3])', 'expected': 3}, # dict.values() {'rule': 'sorted({"k":"v", "k2":"v2"}.values())', 'expected': ["v", "v2"]}, # Sets {'rule': 'set(["v1", "v2", "v2"])', 'expected': set(['v1', 'v2'])}, # Do not execute functions at right in And if the first part is False {'rule': 'False and missing_function()', 'expected': False}, # Do not execute functions at right in Or if the first part is True {'rule': 'True or missing_function()', 'expected': True}, ] for r in rules: print("\n\n" + "#" * 30) rule = r['rule'] expected = r['expected'] try: r = evaluater.eval_expr(rule) except Exception as exp: r = traceback.format_exc() print("Rule: %s" % rule) print("Expected: %s" % str(expected)) print("Result: %s" % str(r)) print("Is The same?: %s" % (r == expected)) self.assert_(r == expected) if __name__ == '__main__': unittest.main()
naparuba/kunai
test/test_evaluator.py
Python
mit
3,577
import logging from configparser import ConfigParser from typing import Optional from pathlib import Path import os from click import UsageError class CLIConfig: def __init__(self, config_content): assert isinstance(config_content, str) self.host = '' self.username = '' self.password = '' if config_content: self._parse_config(config_content) def _parse_config(self, config_content): cp = ConfigParser() cp.read_string(config_content) try: self.host = cp['DEFAULT']['host'] self.username = cp['DEFAULT']['username'] self.password = cp['DEFAULT']['password'] except Exception as e: logging.error('Exception while parsing configuration {}'.format(e)) raise UsageError('Exception while parsing configuration {}'.format(e)) def find_config_file() -> Optional[Path]: """ Find the configuration file to use and return a Path to it. This function will look in various place to find the configuration file. In order, those location are tested: + ./.leosacclirc + $HOME/.leosacclirc :return: Optionally a path to the config file we found. """ logger = logging.getLogger('find_config_file') for location in os.curdir, os.path.expanduser('~'): location = os.path.abspath(location) logger.debug('Trying to locate configuration file in {}'.format(location)) p = Path(os.path.join(location, '.leosacclirc')) if p.is_file(): return p return None def load_config_file(): """ Attempt to load CLI configuration file. """ p = find_config_file() if p: with p.open('rt') as file: cfg = CLIConfig(file.read()) return cfg else: logging.warning('Could not locate CLI configuration file') return CLIConfig('')
islog/leosac
python/leosacpy/cli/cli_config.py
Python
agpl-3.0
1,918
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. orm.Journal.objects.filter(twitter_user=None).update(twitter_user='') def backwards(self, orm): "Write your backwards methods here." orm.Journal.objects.filter(twitter_user='').update(twitter_user=None) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'journalmanager.aheadpressrelease': { 'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']}, 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}), 'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'}) }, 'journalmanager.article': { 'Meta': {'object_name': 'Article'}, 'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}), 'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}), 'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}), 'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}), 'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}), 'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}), 'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}), 'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}), 'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}), 'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}), 'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'}) }, 'journalmanager.articleslinkage': { 'Meta': {'object_name': 'ArticlesLinkage'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}), 'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"}) }, 'journalmanager.collection': { 'Meta': {'ordering': "['name']", 'object_name': 'Collection'}, 'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'address': ('django.db.models.fields.TextField', [], {}), 'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.institution': { 'Meta': {'ordering': "['name']", 'object_name': 'Institution'}, 'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'address': ('django.db.models.fields.TextField', [], {}), 'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.issue': { 'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'}, 'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}), 'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'publication_year': ('django.db.models.fields.IntegerField', [], {}), 'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}), 'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}), 'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}) }, 'journalmanager.issuetitle': { 'Meta': {'object_name': 'IssueTitle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'journalmanager.journal': { 'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'}, 'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}), 'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'ccn_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}), 'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}), 'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}), 'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}), 'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}), 'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}), 'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}), 'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}), 'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}), 'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}), 'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}), 'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}), 'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}), 'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}), 'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}), 'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}), 'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}), 'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"}) }, 'journalmanager.journalmission': { 'Meta': {'object_name': 'JournalMission'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'}) }, 'journalmanager.journaltimeline': { 'Meta': {'object_name': 'JournalTimeline'}, 'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}), 'reason': ('django.db.models.fields.TextField', [], {'default': "''"}), 'since': ('django.db.models.fields.DateTimeField', [], {}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}) }, 'journalmanager.journaltitle': { 'Meta': {'object_name': 'JournalTitle'}, 'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'journalmanager.language': { 'Meta': {'ordering': "['name']", 'object_name': 'Language'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'journalmanager.membership': { 'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'}, 'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}), 'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'}) }, 'journalmanager.pendedform': { 'Meta': {'object_name': 'PendedForm'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}), 'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'journalmanager.pendedvalue': { 'Meta': {'object_name': 'PendedValue'}, 'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'journalmanager.pressrelease': { 'Meta': {'object_name': 'PressRelease'}, 'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'journalmanager.pressreleasearticle': { 'Meta': {'object_name': 'PressReleaseArticle'}, 'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"}) }, 'journalmanager.pressreleasetranslation': { 'Meta': {'object_name': 'PressReleaseTranslation'}, 'content': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}), 'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'journalmanager.regularpressrelease': { 'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']}, 'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}), 'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'}) }, 'journalmanager.section': { 'Meta': {'ordering': "('id',)", 'object_name': 'Section'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'journalmanager.sectiontitle': { 'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'journalmanager.sponsor': { 'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']}, 'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}), 'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'}) }, 'journalmanager.studyarea': { 'Meta': {'object_name': 'StudyArea'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'journalmanager.subjectcategory': { 'Meta': {'object_name': 'SubjectCategory'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}) }, 'journalmanager.translateddata': { 'Meta': {'object_name': 'TranslatedData'}, 'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.uselicense': { 'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'}, 'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.usercollections': { 'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'}, 'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'journalmanager.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['journalmanager'] symmetrical = True
gustavofonseca/scielo-manager
scielomanager/journalmanager/migrations/0013_fix_nulls_in_twitter_user.py
Python
bsd-2-clause
33,524
import os, sys, logging, traceback import json import math import itertools from multiprocessing import Pool from clint.textui import progress from PIL import Image from doit import get_var from . import tilesets from uristmaps.config import conf paths = conf["Paths"] # Reference to that conf section to make the lines a bit shorter. Unlinke this one which still gets really long. def load_biomes_map(): """ Load heightmap json. """ with open("{}/biomes.json".format(paths["build"]),"r") as biomejson: biomes = json.loads(biomejson.read()) return biomes def load_structures_map(): with open("{}/structs.json".format(paths["build"]),"r") as structjson: structs = json.loads(structjson.read()) return structs def render_layer(level): """ Render all image tiles for the specified level. """ biomes = load_biomes_map() structures = load_structures_map() # Determine wich will be the first zoom level to use graphic tiles # bigger than 1px: zoom_offset = 0 mapsize = 256 while mapsize < biomes["worldsize"]: mapsize *= 2 zoom_offset += 1 # Zoom level 'zoom_offset' will be the first in which the world can # be rendered onto the map using 1px sized tiles. tile_amount = int(math.pow(2,level)) graphic_size = int(math.pow(2, level - zoom_offset)) # Dont render this layer when the world would not even fit if the tiles were 1px big # TODO: Find way to render this: Only draw every <n> world tiles or render big and scale down if graphic_size == 0: return # Read max number of processes process_count = conf.getint("Performance", "processes") # Chunk the amount of tiles to render in equals parts # for each process. This would be the ideal chunk size to keep # processes from coming back the pool to get more work. That just # costs time apparently. chunk = tile_amount ** 2 # Limiting the chunksize helps getting more frequent updates for the progress bar # This slows the operation a bit down, though (about 1.5sek for zoom lvl 6...) chunk = min(chunk, 2048) # Have maximum as many processes as there are chunks so we don't have more # processes than there is work available. process_count = min(process_count, chunk) chunk //= process_count # Setup multiprocessing pool pool = Pool(process_count) # Load the tilesheet TILES = tilesets.get_tileset(graphic_size) # Save the path to the config file in a pid file for this process' children with open(".{}.txt".format(os.getpid()), "w") as pidfile: pidfile.write(get_var("conf", "config.cfg")) # Send the tile render jobs to the pool. Generates the parameters for each tile # with the get_tasks function. a = pool.imap_unordered(render_tile_mp, get_tasks(tile_amount, level, zoom_offset, biomes, structures, TILES), chunksize=chunk) counter = 0 total = tile_amount**2 # Show a nice progress bar with integrated ETA estimation with progress.Bar(label="Using {}px sized tiles ".format(graphic_size), expected_size=total) as bar: for b in a: counter += 1 bar.show(counter) pool.close() pool.join() # Remove the pidfile containing the config path if os.path.exists(".{}.txt".format(os.getpid())): os.remove(".{}.txt".format(os.getpid())) def get_tasks(tileamount, level, zoom_offset, biomes, structures, tiles): """ Generate the parameters for render_tile_mp calls for every tile that will be rendered. Each set of parameters is a single task for a process. """ for x, y in itertools.product(range(tileamount), repeat=2): yield (x, y, level, zoom_offset, biomes, structures, tiles) def render_tile_mp(opts): """ Wrapper function used by the process pool to call render_tile. Unpacks the list of parameters and retrieves the exceptions that might be raised in the processes and get otherwise lost. """ try: render_tile(*opts) except Exception as e: print("Exception in working process: {}".format(type(e))) traceback.print_exc() def render_tile(tile_x, tile_y, level, zoom_offset, biomes, structures, tiles): """ Render the world map tile with the given indeces at the provided level. """ worldsize = biomes["worldsize"] # Convenience shortname image = Image.new("RGBA", (256, 256), "white") # The size of graphic-tiles that will be used for rendering graphic_size = int(math.pow(2, level - zoom_offset)) # Calculate the size of the rendered world in tiles render_size = 256 * math.pow(2, level) # How many render tiles are kept clear left and top to center the world render clear_tiles = 256 * math.pow(2, zoom_offset) - worldsize clear_tiles //= 2 # Half it to get the offset left and top of the world. tiles_per_block = 256 // graphic_size for render_tile_x in range(tiles_per_block): global_tile_x = render_tile_x + tile_x * tiles_per_block if global_tile_x < clear_tiles: continue if global_tile_x >= biomes["worldsize"] + clear_tiles: break for render_tile_y in range(tiles_per_block): global_tile_y = render_tile_y + tile_y * tiles_per_block if global_tile_y < clear_tiles: continue if global_tile_y >= biomes["worldsize"] + clear_tiles: break world_x = int(global_tile_x - clear_tiles) world_y = int(global_tile_y - clear_tiles) location = (render_tile_x * graphic_size, render_tile_y * graphic_size) # Render biome img = tiles[biomes["map"][world_y][world_x]] image.paste(img, location) # Check if theres a structure to render on it # TODO: Read the structures export to place tower/town sprites ontop the biomes try: struct_name = structures["map"][str(world_x)][str(world_y)] try: struct = tiles[struct_name] image.paste(struct, location, struct) except: log.warning("Could not render image: {}".format(struct_name)) except: # No structure found pass target_dir = "{}/tiles/{}/{}/".format(paths["output"], level, tile_x) if not os.path.exists(target_dir): os.makedirs(target_dir) fname = "{}/tiles/{}/{}/{}.png".format(paths["output"], level, tile_x, tile_y) image.save(fname) if __name__ == "__main__": render_layer(5)
dominiks/uristmaps
uristmaps/render_sat_layer.py
Python
gpl-3.0
6,677
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-29 17:46 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('polls', '0025_question_poll_algorithm'), ('polls', '0025_item_image'), ] operations = [ ]
PrefPy/opra
compsocsite/polls/migrations/0026_merge.py
Python
mit
330
from wtforms import ( BooleanField, HiddenField, MultipleFileField, StringField, TextAreaField, ) from wtforms.validators import InputRequired from CTFd.forms import BaseForm class PageEditForm(BaseForm): title = StringField( "Title", description="This is the title shown on the navigation bar" ) route = StringField( "Route", description="This is the URL route that your page will be at (e.g. /page). You can also enter links to link to that page.", ) draft = BooleanField("Draft") hidden = BooleanField("Hidden") auth_required = BooleanField("Authentication Required") content = TextAreaField("Content") class PageFilesUploadForm(BaseForm): file = MultipleFileField( "Upload Files", description="Attach multiple files using Control+Click or Cmd+Click.", validators=[InputRequired()], ) type = HiddenField("Page Type", default="page", validators=[InputRequired()])
LosFuzzys/CTFd
CTFd/forms/pages.py
Python
apache-2.0
983
'''Main module containing the class Program(), which allows the conversion from ordinary Python functions into commands for the command line. It uses :py:module:``argparse`` behind the scenes.''' import sys import inspect import argparse try: from itertools import izip_longest except ImportError: # pragma: no cover from itertools import zip_longest as izip_longest from mando.utils import (purify_doc, action_by_type, find_param_docs, split_doc, ensure_dashes, purify_kwargs) _POSITIONAL = type('_positional', (object,), {}) _DISPATCH_TO = '_dispatch_to' class Program(object): def __init__(self, prog=None, version=None, **kwargs): self.parser = argparse.ArgumentParser(prog, **kwargs) if version is not None: self.parser.add_argument('-v', '--version', action='version', version=version) self.subparsers = self.parser.add_subparsers() self.argspecs = {} self.current_command = None def command(self, *args, **kwargs): '''A decorator to convert a function into a command. It can be applied as ``@command`` or as ``@command(new_name)``, specifying an alternative name for the command (default one is ``func.__name__``).''' if len(args) == 1 and hasattr(args[0], '__call__'): return self._generate_command(args[0]) else: def _command(func): return self._generate_command(func, *args, **kwargs) return _command def arg(self, param, *args, **kwargs): '''A decorator to override the parameters extracted from the docstring or to add new ones. :param param: The parameter's name. It must be among the function's arguments names.''' def wrapper(func): if not hasattr(func, '_argopts'): func._argopts = {} func._argopts[param] = (args, kwargs) return func return wrapper def _generate_command(self, func, name=None, *args, **kwargs): '''Generate argparse's subparser. :param func: The function to analyze. :param name: If given, a different name for the command. The default one is ``func.__name__``.''' func_name = func.__name__ name = func_name if name is None else name argspec = inspect.getargspec(func) self.argspecs[func_name] = argspec argz = izip_longest(reversed(argspec.args), reversed(argspec.defaults or []), fillvalue=_POSITIONAL()) argz = reversed(list(argz)) doc = (inspect.getdoc(func) or '').strip() + '\n' cmd_help, cmd_desc = split_doc(purify_doc(doc)) subparser = self.subparsers.add_parser(name, help=cmd_help or None, description=cmd_desc or None, **kwargs) for a, kw in self.analyze_func(func, doc, argz, argspec.varargs): subparser.add_argument(*a, **purify_kwargs(kw)) subparser.set_defaults(**{_DISPATCH_TO: func}) return func def analyze_func(self, func, doc, argz, varargs_name): '''Analyze the given function, merging default arguments, overridden arguments (with @arg) and parameters extracted from the docstring. :param func: The function to analyze. :param doc: The function's docstring. :param argz: A list of the form (arg, default), containing arguments and their default value. :param varargs_name: The name of the variable arguments, if present, otherwise ``None``.''' params = find_param_docs(doc) for arg, default in argz: override = getattr(func, '_argopts', {}).get(arg, ((), {})) yield merge(arg, default, override, *params.get(arg, ([], {}))) if varargs_name is not None: kwargs = {'nargs': '*'} kwargs.update(params.get(varargs_name, (None, {}))[1]) yield ([varargs_name], kwargs) def parse(self, args): '''Parse the given arguments and return a tuple ``(command, args)``, where ``args`` is a list consisting of all arguments. The command can then be called as ``command(*args)``. :param args: The arguments to parse.''' arg_map = self.parser.parse_args(args).__dict__ command = arg_map.pop(_DISPATCH_TO) argspec = self.argspecs[command.__name__] real_args = [] for arg in argspec.args: real_args.append(arg_map.pop(arg)) if arg_map and arg_map.get(argspec.varargs): real_args.extend(arg_map.pop(argspec.varargs)) return command, real_args def execute(self, args): '''Parse the arguments and execute the resulting command. :param args: The arguments to parse.''' command, a = self.parse(args) self.current_command = command.__name__ return command(*a) def __call__(self): # pragma: no cover '''Parse ``sys.argv`` and execute the resulting command.''' self.execute(sys.argv[1:]) def merge(arg, default, override, args, kwargs): '''Merge all the possible arguments into a tuple and a dictionary. :param arg: The argument's name. :param default: The argument's default value or an instance of _POSITIONAL. :param override: A tuple containing (args, kwargs) given to @arg. :param args: The arguments extracted from the docstring. :param kwargs: The keyword arguments extracted from the docstring.''' opts = [arg] if not isinstance(default, _POSITIONAL): opts = list(ensure_dashes(args or opts)) kwargs.update({'default': default, 'dest': arg}) kwargs.update(action_by_type(default)) else: # positionals can't have a metavar, otherwise the help is screwed # if one really wants the metavar, it can be added with @arg kwargs['metavar'] = None kwargs.update(override[1]) return override[0] or opts, kwargs
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/mando/core.py
Python
agpl-3.0
6,201
__author__ = 'Dani' class DatasetUserFileGroup(object): def __init__(self, dataset, user, file_path): self.dataset = dataset self.user = user self.file_path = file_path
landportal/landbook-importers
old-importers/IpfriExtractor/es/weso/translator/dataset_user_pair.py
Python
mit
200
from time import time from datetime import datetime from mock import patch, Mock, sentinel from nose import tools from eventtracker.tasks import track, collect_events from eventtracker.models import save_event @patch('eventtracker.tasks._get_carrot_object') def test_track_sends_event(get_carrot_object): get_carrot_object.return_value = get_carrot_object args = ('event', {'some': 'params'}) track(*args) tools.assert_true(get_carrot_object.send.called) cargs, ckwargs = get_carrot_object.send.call_args tools.assert_equals(1, len(cargs)) cargs = cargs[0] tools.assert_equals({}, ckwargs) tools.assert_equals(args[0], cargs[0]) tools.assert_true(time() > cargs[1] > time() - 1) tools.assert_equals(args[1], cargs[2]) @patch('eventtracker.tasks._get_carrot_object') @patch('eventtracker.models.get_mongo_collection') @patch('eventtracker.models.save_event') def test_collect_events_calls_save_event_for_every_event_in_queue(save_event, get_mongo_collection, get_carrot_object): collection = Mock() get_mongo_collection.return_value = collection t1, t2 = time() - 1, time() m1, m2 = Mock(), Mock() m1.decode.return_value, m2.decode.return_value = ('event', t1, {'some': 'params'}), ('other_event', t2, {'other': 'params'}) consumer = Mock() get_carrot_object.return_value = consumer consumer.iterqueue.return_value = [m1, m2] collect_events() tools.assert_equals([('decode', (), {}), ('ack', (), {})], m1.method_calls) tools.assert_equals([('decode', (), {}), ('ack', (), {})], m2.method_calls) tools.assert_equals(1, consumer.iterqueue.call_count) tools.assert_equals(2, save_event.call_count) tools.assert_equals([ ((collection, 'event', t1, {'some': 'params'}), {}), ((collection, 'other_event', t2, {'other': 'params'}), {}) ], save_event.call_args_list ) def test_save_event_inserts_into_collection(): collection = Mock() t = time() save_event(collection, sentinel.event, t, sentinel.params) tools.assert_equals(1, len(collection.method_calls)) method, args, kwargs = collection.method_calls[0] tools.assert_equals('insert', method) tools.assert_equals(1, len(args)) tools.assert_equals({}, kwargs) tools.assert_equals(sentinel.params, args[0]['params']) tools.assert_equals(sentinel.event, args[0]['event']) tools.assert_equals(datetime.fromtimestamp(t), args[0]['timestamp'])
ella/django-event-tracker
tests/test_tasks.py
Python
bsd-3-clause
2,486
import sys import os def is_logfile(f): f = f.lower() return f.startswith('awr_jobscheduler') and f.endswith('.txt') def expand_file_list(file_args): file_list = [] for f in file_args: if os.path.isfile(f): if is_logfile(f): file_list.append(f) elif os.path.isdir(f): for root, dirs, files in os.walk(f): for file in files: fn = os.path.basename(file) if is_logfile(fn): full_name = os.path.join(root, file) file_list.append(full_name) # need to sort by file date ordered_file_list = sorted(file_list, key=lambda x: os.path.getmtime(x)) return ordered_file_list if __name__ == '__main__': print(sys.argv[1:]) for f in expand_file_list(sys.argv[1:]): print(f)
danecollins/awrjs
js/util.py
Python
mit
868
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('feti', '0017_auto_20150601_1252'), ] operations = [ migrations.AlterField( model_name='course', name='course_description', field=models.CharField(max_length=200, null=True, blank=True), preserve_default=True, ), ]
lucernae/feti
django_project/feti/migrations/0018_auto_20150601_1253.py
Python
bsd-2-clause
470
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt. from django.conf.urls.defaults import * from django.conf import settings from django.contrib import admin # from django.core.urlresolvers import reverse admin.autodiscover() crumbs_waterbalance = [ {'name': 'home', 'url': '/', 'title': 'hoofdpagina'}, # ^^^ Pretty hardcoded and Dutch. ] urlpatterns = patterns( '', (r'^admin/', include(admin.site.urls)), (r'^configuration/', 'lizard_waterbalance.views.configuration_edit',), # Waterbalance screens. (r'^$', 'lizard_waterbalance.views.waterbalance_start', {'crumbs_prepend': list(crumbs_waterbalance), }, 'waterbalance_start'), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)/$', 'lizard_waterbalance.views.waterbalance_area_summary', {'crumbs_prepend': list(crumbs_waterbalance), }, 'waterbalance_area_summary', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/$', 'lizard_waterbalance.views.waterbalance_area_edit', {'crumbs_prepend': list(crumbs_waterbalance), }, 'waterbalance_area_edit', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/conf/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_conf', {}, 'waterbalance_area_edit_sub_conf', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/openwater/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_openwater', {}, 'waterbalance_area_edit_sub_openwater', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/buckets/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_buckets', {}, 'waterbalance_area_edit_sub_buckets', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/out/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_out', {}, 'waterbalance_area_edit_sub_out', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/in/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_in', {}, 'waterbalance_area_edit_sub_in', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/in/(?P<pump_id>[^/]+)/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_in_single', {}, 'waterbalance_area_edit_sub_in_single', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/labels/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub_labels', {}, 'waterbalance_area_edit_sub_labels', ), (r'^summary/(?P<area_slug>[^/]+)/scenario/(?P<scenario_slug>[^/]+)' '/edit/7/$', 'lizard_waterbalance.views.waterbalance_area_edit_sub7', {}, 'waterbalance_area_edit_sub7', ), # TODO: 1..6 as parameter?? (r'^summary/(?P<area_slug>.*)/scenario/(?P<scenario_slug>.*)' '/recalculate_graph_data/$', 'lizard_waterbalance.views.recalculate_graph_data', {}, "waterbalance_graph_recalculate_data"), (r'^summary/(?P<area_slug>.*)/scenario/(?P<scenario_slug>.*)' '/graph/(?P<graph_type>.*)/$', 'lizard_waterbalance.views.waterbalance_area_graphs', {}, 'waterbalance_area_graph'), (r'^summary/(?P<area_slug>.*)/scenario/(?P<scenario_slug>.*)' '/export_excel_small/$', 'lizard_waterbalance.export.export_excel_small', {}, 'waterbalance_excel_export_small'), (r'^graphselect/$', 'lizard_waterbalance.views.graph_select', {}, "waterbalance_graph_select"), (r'^area_search/', 'lizard_waterbalance.views.waterbalance_shapefile_search', {}, "waterbalance_area_search"), (r'^search_fews_lkeys/', 'lizard_waterbalance.views.search_fews_lkeys', {}, "waterbalance_search_fews_lkeys"), # Viewer (r'^viewer/$', 'lizard_waterbalance.viewer.waterbalance_viewer', {}, "waterbalance_viewer"), ) if settings.DEBUG: # Add this also to the projects that use this application urlpatterns += patterns('', (r'', include('staticfiles.urls')), )
lizardsystem/lizard-waterbalance
lizard_waterbalance/urls.py
Python
gpl-3.0
4,274
# -*- test-case-name: twisted.web.test.test_static -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Static resources for L{twisted.web}. """ from __future__ import division, absolute_import import os import warnings import itertools import time import errno import mimetypes from zope.interface import implementer from twisted.web import server from twisted.web import resource from twisted.web import http from twisted.web.util import redirectTo from twisted.python.compat import networkString, intToBytes, nativeString, _PY3 from twisted.python.compat import escape from twisted.python import components, filepath, log from twisted.internet import abstract, interfaces from twisted.python.util import InsensitiveDict from twisted.python.runtime import platformType from twisted.python.url import URL from twisted.python.versions import Version from twisted.python.deprecate import deprecated if _PY3: from urllib.parse import quote, unquote else: from urllib import quote, unquote dangerousPathError = resource.NoResource("Invalid request URL.") def isDangerous(path): return path == b'..' or b'/' in path or networkString(os.sep) in path class Data(resource.Resource): """ This is a static, in-memory resource. """ def __init__(self, data, type): resource.Resource.__init__(self) self.data = data self.type = type def render_GET(self, request): request.setHeader(b"content-type", networkString(self.type)) request.setHeader(b"content-length", intToBytes(len(self.data))) if request.method == b"HEAD": return b'' return self.data render_HEAD = render_GET @deprecated(Version("Twisted", 16, 0, 0)) def addSlash(request): """ Add a trailing slash to C{request}'s URI. Deprecated, do not use. """ return _addSlash(request) def _addSlash(request): """ Add a trailing slash to C{request}'s URI. @param request: The incoming request to add the ending slash to. @type request: An object conforming to L{twisted.web.iweb.IRequest} @return: A URI with a trailing slash, with query and fragment preserved. @rtype: L{bytes} """ url = URL.fromText(request.uri.decode('ascii')) # Add an empty path segment at the end, so that it adds a trailing slash url = url.replace(path=list(url.path) + [u""]) return url.asText().encode('ascii') class Redirect(resource.Resource): def __init__(self, request): resource.Resource.__init__(self) self.url = _addSlash(request) def render(self, request): return redirectTo(self.url, request) class Registry(components.Componentized): """ I am a Componentized object that will be made available to internal Twisted file-based dynamic web content such as .rpy and .epy scripts. """ def __init__(self): components.Componentized.__init__(self) self._pathCache = {} def cachePath(self, path, rsrc): self._pathCache[path] = rsrc def getCachedPath(self, path): return self._pathCache.get(path) def loadMimeTypes(mimetype_locations=None, init=mimetypes.init): """ Produces a mapping of extensions (with leading dot) to MIME types. It does this by calling the C{init} function of the L{mimetypes} module. This will have the side effect of modifying the global MIME types cache in that module. Multiple file locations containing mime-types can be passed as a list. The files will be sourced in that order, overriding mime-types from the files sourced beforehand, but only if a new entry explicitly overrides the current entry. @param mimetype_locations: Optional. List of paths to C{mime.types} style files that should be used. @type mimetype_locations: iterable of paths or C{None} @param init: The init function to call. Defaults to the global C{init} function of the C{mimetypes} module. For internal use (testing) only. @type init: callable """ init(mimetype_locations) mimetypes.types_map.update( { '.conf': 'text/plain', '.diff': 'text/plain', '.flac': 'audio/x-flac', '.java': 'text/plain', '.oz': 'text/x-oz', '.swf': 'application/x-shockwave-flash', '.wml': 'text/vnd.wap.wml', '.xul': 'application/vnd.mozilla.xul+xml', '.patch': 'text/plain' } ) return mimetypes.types_map def getTypeAndEncoding(filename, types, encodings, defaultType): p, ext = filepath.FilePath(filename).splitext() ext = filepath._coerceToFilesystemEncoding('', ext.lower()) if ext in encodings: enc = encodings[ext] ext = os.path.splitext(p)[1].lower() else: enc = None type = types.get(ext, defaultType) return type, enc class File(resource.Resource, filepath.FilePath): """ File is a resource that represents a plain non-interpreted file (although it can look for an extension like .rpy or .cgi and hand the file to a processor for interpretation if you wish). Its constructor takes a file path. Alternatively, you can give a directory path to the constructor. In this case the resource will represent that directory, and its children will be files underneath that directory. This provides access to an entire filesystem tree with a single Resource. If you map the URL 'http://server/FILE' to a resource created as File('/tmp'), then http://server/FILE/ will return an HTML-formatted listing of the /tmp/ directory, and http://server/FILE/foo/bar.html will return the contents of /tmp/foo/bar.html . @cvar childNotFound: L{Resource} used to render 404 Not Found error pages. @cvar forbidden: L{Resource} used to render 403 Forbidden error pages. """ contentTypes = loadMimeTypes() contentEncodings = { ".gz" : "gzip", ".bz2": "bzip2" } processors = {} indexNames = ["index", "index.html", "index.htm", "index.rpy"] type = None def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0): """ Create a file with the given path. @param path: The filename of the file from which this L{File} will serve data. @type path: C{str} @param defaultType: A I{major/minor}-style MIME type specifier indicating the I{Content-Type} with which this L{File}'s data will be served if a MIME type cannot be determined based on C{path}'s extension. @type defaultType: C{str} @param ignoredExts: A sequence giving the extensions of paths in the filesystem which will be ignored for the purposes of child lookup. For example, if C{ignoredExts} is C{(".bar",)} and C{path} is a directory containing a file named C{"foo.bar"}, a request for the C{"foo"} child of this resource will succeed with a L{File} pointing to C{"foo.bar"}. @param registry: The registry object being used to handle this request. If C{None}, one will be created. @type registry: L{Registry} @param allowExt: Ignored parameter, only present for backwards compatibility. Do not pass a value for this parameter. """ resource.Resource.__init__(self) filepath.FilePath.__init__(self, path) self.defaultType = defaultType if ignoredExts in (0, 1) or allowExt: warnings.warn("ignoredExts should receive a list, not a boolean") if ignoredExts or allowExt: self.ignoredExts = [b'*'] else: self.ignoredExts = [] else: self.ignoredExts = list(ignoredExts) self.registry = registry or Registry() def ignoreExt(self, ext): """Ignore the given extension. Serve file.ext if file is requested """ self.ignoredExts.append(ext) childNotFound = resource.NoResource("File not found.") forbidden = resource.ForbiddenResource() def directoryListing(self): return DirectoryLister(self.path, self.listNames(), self.contentTypes, self.contentEncodings, self.defaultType) def getChild(self, path, request): """ If this L{File}'s path refers to a directory, return a L{File} referring to the file named C{path} in that directory. If C{path} is the empty string, return a L{DirectoryLister} instead. """ self.restat(reraise=False) if not self.isdir(): return self.childNotFound if path: try: fpath = self.child(path) except filepath.InsecurePath: return self.childNotFound else: fpath = self.childSearchPreauth(*self.indexNames) if fpath is None: return self.directoryListing() if not fpath.exists(): fpath = fpath.siblingExtensionSearch(*self.ignoredExts) if fpath is None: return self.childNotFound if platformType == "win32": # don't want .RPY to be different than .rpy, since that would allow # source disclosure. processor = InsensitiveDict(self.processors).get(fpath.splitext()[1]) else: processor = self.processors.get(fpath.splitext()[1]) if processor: return resource.IResource(processor(fpath.path, self.registry)) return self.createSimilarFile(fpath.path) # methods to allow subclasses to e.g. decrypt files on the fly: def openForReading(self): """Open a file and return it.""" return self.open() def getFileSize(self): """Return file size.""" return self.getsize() def _parseRangeHeader(self, range): """ Parse the value of a Range header into (start, stop) pairs. In a given pair, either of start or stop can be None, signifying that no value was provided, but not both. @return: A list C{[(start, stop)]} of pairs of length at least one. @raise ValueError: if the header is syntactically invalid or if the Bytes-Unit is anything other than 'bytes'. """ try: kind, value = range.split(b'=', 1) except ValueError: raise ValueError("Missing '=' separator") kind = kind.strip() if kind != b'bytes': raise ValueError("Unsupported Bytes-Unit: %r" % (kind,)) unparsedRanges = list(filter(None, map(bytes.strip, value.split(b',')))) parsedRanges = [] for byteRange in unparsedRanges: try: start, end = byteRange.split(b'-', 1) except ValueError: raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) if start: try: start = int(start) except ValueError: raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) else: start = None if end: try: end = int(end) except ValueError: raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) else: end = None if start is not None: if end is not None and start > end: # Start must be less than or equal to end or it is invalid. raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) elif end is None: # One or both of start and end must be specified. Omitting # both is invalid. raise ValueError("Invalid Byte-Range: %r" % (byteRange,)) parsedRanges.append((start, end)) return parsedRanges def _rangeToOffsetAndSize(self, start, end): """ Convert a start and end from a Range header to an offset and size. This method checks that the resulting range overlaps with the resource being served (and so has the value of C{getFileSize()} as an indirect input). Either but not both of start or end can be C{None}: - Omitted start means that the end value is actually a start value relative to the end of the resource. - Omitted end means the end of the resource should be the end of the range. End is interpreted as inclusive, as per RFC 2616. If this range doesn't overlap with any of this resource, C{(0, 0)} is returned, which is not otherwise a value return value. @param start: The start value from the header, or C{None} if one was not present. @param end: The end value from the header, or C{None} if one was not present. @return: C{(offset, size)} where offset is how far into this resource this resource the range begins and size is how long the range is, or C{(0, 0)} if the range does not overlap this resource. """ size = self.getFileSize() if start is None: start = size - end end = size elif end is None: end = size elif end < size: end += 1 elif end > size: end = size if start >= size: start = end = 0 return start, (end - start) def _contentRange(self, offset, size): """ Return a string suitable for the value of a Content-Range header for a range with the given offset and size. The offset and size are not sanity checked in any way. @param offset: How far into this resource the range begins. @param size: How long the range is. @return: The value as appropriate for the value of a Content-Range header. """ return networkString('bytes %d-%d/%d' % ( offset, offset + size - 1, self.getFileSize())) def _doSingleRangeRequest(self, request, startAndEnd): """ Set up the response for Range headers that specify a single range. This method checks if the request is satisfiable and sets the response code and Content-Range header appropriately. The return value indicates which part of the resource to return. @param request: The Request object. @param startAndEnd: A 2-tuple of start of the byte range as specified by the header and the end of the byte range as specified by the header. At most one of the start and end may be C{None}. @return: A 2-tuple of the offset and size of the range to return. offset == size == 0 indicates that the request is not satisfiable. """ start, end = startAndEnd offset, size = self._rangeToOffsetAndSize(start, end) if offset == size == 0: # This range doesn't overlap with any of this resource, so the # request is unsatisfiable. request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) request.setHeader( b'content-range', networkString('bytes */%d' % (self.getFileSize(),))) else: request.setResponseCode(http.PARTIAL_CONTENT) request.setHeader( b'content-range', self._contentRange(offset, size)) return offset, size def _doMultipleRangeRequest(self, request, byteRanges): """ Set up the response for Range headers that specify a single range. This method checks if the request is satisfiable and sets the response code and Content-Type and Content-Length headers appropriately. The return value, which is a little complicated, indicates which parts of the resource to return and the boundaries that should separate the parts. In detail, the return value is a tuple rangeInfo C{rangeInfo} is a list of 3-tuples C{(partSeparator, partOffset, partSize)}. The response to this request should be, for each element of C{rangeInfo}, C{partSeparator} followed by C{partSize} bytes of the resource starting at C{partOffset}. Each C{partSeparator} includes the MIME-style boundary and the part-specific Content-type and Content-range headers. It is convenient to return the separator as a concrete string from this method, because this method needs to compute the number of bytes that will make up the response to be able to set the Content-Length header of the response accurately. @param request: The Request object. @param byteRanges: A list of C{(start, end)} values as specified by the header. For each range, at most one of C{start} and C{end} may be C{None}. @return: See above. """ matchingRangeFound = False rangeInfo = [] contentLength = 0 boundary = networkString("%x%x" % (int(time.time()*1000000), os.getpid())) if self.type: contentType = self.type else: contentType = b'bytes' # It's what Apache does... for start, end in byteRanges: partOffset, partSize = self._rangeToOffsetAndSize(start, end) if partOffset == partSize == 0: continue contentLength += partSize matchingRangeFound = True partContentRange = self._contentRange(partOffset, partSize) partSeparator = networkString(( "\r\n" "--%s\r\n" "Content-type: %s\r\n" "Content-range: %s\r\n" "\r\n") % (nativeString(boundary), nativeString(contentType), nativeString(partContentRange))) contentLength += len(partSeparator) rangeInfo.append((partSeparator, partOffset, partSize)) if not matchingRangeFound: request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) request.setHeader( b'content-length', b'0') request.setHeader( b'content-range', networkString('bytes */%d' % (self.getFileSize(),))) return [], b'' finalBoundary = b"\r\n--" + boundary + b"--\r\n" rangeInfo.append((finalBoundary, 0, 0)) request.setResponseCode(http.PARTIAL_CONTENT) request.setHeader( b'content-type', networkString('multipart/byteranges; boundary="%s"' % (nativeString(boundary),))) request.setHeader( b'content-length', intToBytes(contentLength + len(finalBoundary))) return rangeInfo def _setContentHeaders(self, request, size=None): """ Set the Content-length and Content-type headers for this request. This method is not appropriate for requests for multiple byte ranges; L{_doMultipleRangeRequest} will set these headers in that case. @param request: The L{Request} object. @param size: The size of the response. If not specified, default to C{self.getFileSize()}. """ if size is None: size = self.getFileSize() request.setHeader(b'content-length', intToBytes(size)) if self.type: request.setHeader(b'content-type', networkString(self.type)) if self.encoding: request.setHeader(b'content-encoding', networkString(self.encoding)) def makeProducer(self, request, fileForReading): """ Make a L{StaticProducer} that will produce the body of this response. This method will also set the response code and Content-* headers. @param request: The L{Request} object. @param fileForReading: The file object containing the resource. @return: A L{StaticProducer}. Calling C{.start()} on this will begin producing the response. """ byteRange = request.getHeader(b'range') if byteRange is None: self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) try: parsedRanges = self._parseRangeHeader(byteRange) except ValueError: log.msg("Ignoring malformed Range header %r" % (byteRange.decode(),)) self._setContentHeaders(request) request.setResponseCode(http.OK) return NoRangeStaticProducer(request, fileForReading) if len(parsedRanges) == 1: offset, size = self._doSingleRangeRequest( request, parsedRanges[0]) self._setContentHeaders(request, size) return SingleRangeStaticProducer( request, fileForReading, offset, size) else: rangeInfo = self._doMultipleRangeRequest(request, parsedRanges) return MultipleRangeStaticProducer( request, fileForReading, rangeInfo) def render_GET(self, request): """ Begin sending the contents of this L{File} (or a subset of the contents, based on the 'range' header) to the given request. """ self.restat(False) if self.type is None: self.type, self.encoding = getTypeAndEncoding(self.basename(), self.contentTypes, self.contentEncodings, self.defaultType) if not self.exists(): return self.childNotFound.render(request) if self.isdir(): return self.redirect(request) request.setHeader(b'accept-ranges', b'bytes') try: fileForReading = self.openForReading() except IOError as e: if e.errno == errno.EACCES: return self.forbidden.render(request) else: raise if request.setLastModified(self.getModificationTime()) is http.CACHED: # `setLastModified` also sets the response code for us, so if the # request is cached, we close the file now that we've made sure that # the request would otherwise succeed and return an empty body. fileForReading.close() return b'' if request.method == b'HEAD': # Set the content headers here, rather than making a producer. self._setContentHeaders(request) # We've opened the file to make sure it's accessible, so close it # now that we don't need it. fileForReading.close() return b'' producer = self.makeProducer(request, fileForReading) producer.start() # and make sure the connection doesn't get closed return server.NOT_DONE_YET render_HEAD = render_GET def redirect(self, request): return redirectTo(_addSlash(request), request) def listNames(self): if not self.isdir(): return [] directory = self.listdir() directory.sort() return directory def listEntities(self): return list(map(lambda fileName, self=self: self.createSimilarFile(os.path.join(self.path, fileName)), self.listNames())) def createSimilarFile(self, path): f = self.__class__(path, self.defaultType, self.ignoredExts, self.registry) # refactoring by steps, here - constructor should almost certainly take these f.processors = self.processors f.indexNames = self.indexNames[:] f.childNotFound = self.childNotFound return f @implementer(interfaces.IPullProducer) class StaticProducer(object): """ Superclass for classes that implement the business of producing. @ivar request: The L{IRequest} to write the contents of the file to. @ivar fileObject: The file the contents of which to write to the request. """ bufferSize = abstract.FileDescriptor.bufferSize def __init__(self, request, fileObject): """ Initialize the instance. """ self.request = request self.fileObject = fileObject def start(self): raise NotImplementedError(self.start) def resumeProducing(self): raise NotImplementedError(self.resumeProducing) def stopProducing(self): """ Stop producing data. L{IPullProducer.stopProducing} is called when our consumer has died, and subclasses also call this method when they are done producing data. """ self.fileObject.close() self.request = None class NoRangeStaticProducer(StaticProducer): """ A L{StaticProducer} that writes the entire file to the request. """ def start(self): self.request.registerProducer(self, False) def resumeProducing(self): if not self.request: return data = self.fileObject.read(self.bufferSize) if data: # this .write will spin the reactor, calling .doWrite and then # .resumeProducing again, so be prepared for a re-entrant call self.request.write(data) else: self.request.unregisterProducer() self.request.finish() self.stopProducing() class SingleRangeStaticProducer(StaticProducer): """ A L{StaticProducer} that writes a single chunk of a file to the request. """ def __init__(self, request, fileObject, offset, size): """ Initialize the instance. @param request: See L{StaticProducer}. @param fileObject: See L{StaticProducer}. @param offset: The offset into the file of the chunk to be written. @param size: The size of the chunk to write. """ StaticProducer.__init__(self, request, fileObject) self.offset = offset self.size = size def start(self): self.fileObject.seek(self.offset) self.bytesWritten = 0 self.request.registerProducer(self, 0) def resumeProducing(self): if not self.request: return data = self.fileObject.read( min(self.bufferSize, self.size - self.bytesWritten)) if data: self.bytesWritten += len(data) # this .write will spin the reactor, calling .doWrite and then # .resumeProducing again, so be prepared for a re-entrant call self.request.write(data) if self.request and self.bytesWritten == self.size: self.request.unregisterProducer() self.request.finish() self.stopProducing() class MultipleRangeStaticProducer(StaticProducer): """ A L{StaticProducer} that writes several chunks of a file to the request. """ def __init__(self, request, fileObject, rangeInfo): """ Initialize the instance. @param request: See L{StaticProducer}. @param fileObject: See L{StaticProducer}. @param rangeInfo: A list of tuples C{[(boundary, offset, size)]} where: - C{boundary} will be written to the request first. - C{offset} the offset into the file of chunk to write. - C{size} the size of the chunk to write. """ StaticProducer.__init__(self, request, fileObject) self.rangeInfo = rangeInfo def start(self): self.rangeIter = iter(self.rangeInfo) self._nextRange() self.request.registerProducer(self, 0) def _nextRange(self): self.partBoundary, partOffset, self._partSize = next(self.rangeIter) self._partBytesWritten = 0 self.fileObject.seek(partOffset) def resumeProducing(self): if not self.request: return data = [] dataLength = 0 done = False while dataLength < self.bufferSize: if self.partBoundary: dataLength += len(self.partBoundary) data.append(self.partBoundary) self.partBoundary = None p = self.fileObject.read( min(self.bufferSize - dataLength, self._partSize - self._partBytesWritten)) self._partBytesWritten += len(p) dataLength += len(p) data.append(p) if self.request and self._partBytesWritten == self._partSize: try: self._nextRange() except StopIteration: done = True break self.request.write(b''.join(data)) if done: self.request.unregisterProducer() self.request.finish() self.stopProducing() class ASISProcessor(resource.Resource): """ Serve files exactly as responses without generating a status-line or any headers. Inspired by Apache's mod_asis. """ def __init__(self, path, registry=None): resource.Resource.__init__(self) self.path = path self.registry = registry or Registry() def render(self, request): request.startedWriting = 1 res = File(self.path, registry=self.registry) return res.render(request) def formatFileSize(size): """ Format the given file size in bytes to human readable format. """ if size < 1024: return '%iB' % size elif size < (1024 ** 2): return '%iK' % (size / 1024) elif size < (1024 ** 3): return '%iM' % (size / (1024 ** 2)) else: return '%iG' % (size / (1024 ** 3)) class DirectoryLister(resource.Resource): """ Print the content of a directory. @ivar template: page template used to render the content of the directory. It must contain the format keys B{header} and B{tableContent}. @type template: C{str} @ivar linePattern: template used to render one line in the listing table. It must contain the format keys B{class}, B{href}, B{text}, B{size}, B{type} and B{encoding}. @type linePattern: C{str} @ivar contentEncodings: a mapping of extensions to encoding types. @type contentEncodings: C{dict} @ivar defaultType: default type used when no mimetype is detected. @type defaultType: C{str} @ivar dirs: filtered content of C{path}, if the whole content should not be displayed (default to C{None}, which means the actual content of C{path} is printed). @type dirs: C{NoneType} or C{list} @ivar path: directory which content should be listed. @type path: C{str} """ template = """<html> <head> <title>%(header)s</title> <style> .even-dir { background-color: #efe0ef } .even { background-color: #eee } .odd-dir {background-color: #f0d0ef } .odd { background-color: #dedede } .icon { text-align: center } .listing { margin-left: auto; margin-right: auto; width: 50%%; padding: 0.1em; } body { border: 0; padding: 0; margin: 0; background-color: #efefef; } h1 {padding: 0.1em; background-color: #777; color: white; border-bottom: thin white dashed;} </style> </head> <body> <h1>%(header)s</h1> <table> <thead> <tr> <th>Filename</th> <th>Size</th> <th>Content type</th> <th>Content encoding</th> </tr> </thead> <tbody> %(tableContent)s </tbody> </table> </body> </html> """ linePattern = """<tr class="%(class)s"> <td><a href="%(href)s">%(text)s</a></td> <td>%(size)s</td> <td>%(type)s</td> <td>%(encoding)s</td> </tr> """ def __init__(self, pathname, dirs=None, contentTypes=File.contentTypes, contentEncodings=File.contentEncodings, defaultType='text/html'): resource.Resource.__init__(self) self.contentTypes = contentTypes self.contentEncodings = contentEncodings self.defaultType = defaultType # dirs allows usage of the File to specify what gets listed self.dirs = dirs self.path = pathname def _getFilesAndDirectories(self, directory): """ Helper returning files and directories in given directory listing, with attributes to be used to build a table content with C{self.linePattern}. @return: tuple of (directories, files) @rtype: C{tuple} of C{list} """ files = [] dirs = [] for path in directory: if _PY3: if isinstance(path, bytes): path = path.decode("utf8") url = quote(path, "/") escapedPath = escape(path) childPath = filepath.FilePath(self.path).child(path) if childPath.isdir(): dirs.append({'text': escapedPath + "/", 'href': url + "/", 'size': '', 'type': '[Directory]', 'encoding': ''}) else: mimetype, encoding = getTypeAndEncoding(path, self.contentTypes, self.contentEncodings, self.defaultType) try: size = childPath.getsize() except OSError: continue files.append({ 'text': escapedPath, "href": url, 'type': '[%s]' % mimetype, 'encoding': (encoding and '[%s]' % encoding or ''), 'size': formatFileSize(size)}) return dirs, files def _buildTableContent(self, elements): """ Build a table content using C{self.linePattern} and giving elements odd and even classes. """ tableContent = [] rowClasses = itertools.cycle(['odd', 'even']) for element, rowClass in zip(elements, rowClasses): element["class"] = rowClass tableContent.append(self.linePattern % element) return tableContent def render(self, request): """ Render a listing of the content of C{self.path}. """ request.setHeader(b"content-type", b"text/html; charset=utf-8") if self.dirs is None: directory = os.listdir(self.path) directory.sort() else: directory = self.dirs dirs, files = self._getFilesAndDirectories(directory) tableContent = "".join(self._buildTableContent(dirs + files)) header = "Directory listing for %s" % ( escape(unquote(nativeString(request.uri))),) done = self.template % {"header": header, "tableContent": tableContent} if _PY3: done = done.encode("utf8") return done def __repr__(self): return '<DirectoryLister of %r>' % self.path __str__ = __repr__
Architektor/PySnip
venv/lib/python2.7/site-packages/twisted/web/static.py
Python
gpl-3.0
35,214
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.shortcuts import render from django.views.decorators.csrf import csrf_exempt from djangovirtualpos import views as djangovirtualpos_views from djshop.apps.club.models import CreditCardReference from djshop.apps.sale.models import Sale @csrf_exempt def set_attributes(request, member_id): sale_model = CreditCardReference sale_ok_url = "club:subscription_ok" sale_nok_url = "club:subscription_cancel" return djangovirtualpos_views.set_payment_attributes(request, sale_model, sale_ok_url, sale_nok_url, reference_number="request") # Confirm sale @csrf_exempt def confirm(request, virtualpos_type): """ This view will be called by the bank. """ return djangovirtualpos_views.confirm_payment(request, virtualpos_type, CreditCardReference) # Sale completed successfully def ok(request, sale_code): reference = CreditCardReference.objects.get(code=sale_code) replacements = {"reference": reference, "member": reference.member} return render(request, "club/credit_card_references/subscription/ok.html", replacements) # Cancel sale def cancel(request, sale_code): reference = CreditCardReference.objects.get(code=sale_code) replacements = {"reference": reference, "member": reference.member} return render(request, "club/credit_card_references/subscription/cancel.html", replacements)
diegojromerolopez/djshop
src/djshop/apps/club/views/credit_card_references.py
Python
mit
1,420
# Copyright (C) 2011 Jayson Vaughn <vaughn.jayson@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.
thedonvaughn/cover_grabber
tests/__init__.py
Python
gpl-3.0
698
#!/usr/bin/python """ This script receives as input a list of URLs and gives as output the pages downloaded. It is a sort of simplified crawler. The pages are output in text format, not in HTML, because our goal is to use text processing tools on it. The output format is a folder with a set of """ # TODO: separate the different classes in different files. Create a data # structure for a query and for a downloaded page with a decent to_xml method. # Hard refactoring and some deal of software engineering required! import sys import re import pdb import subprocess import threading import random import time import Queue import urllib2 import datetime import chardet import httplib import socket ################################################################################ # CONSTANTS ################################################################################ XML_HEADER = """<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE page SYSTEM "page.dtd"> <page> <url>%(url)s</url> <title>%(title)s</title> <charset>%(charset)s</charset> <charsetsource>%(charset_source)s</charsetsource> <date-downloaded>%(datedown)s</date-downloaded> <queries> %(queries)s </queries> <text>""" XML_FOOTER = """ </text> </page>""" QUERIES_ELEM = """<query> <keywords></keywords> <!-- The query keywords --> <date-queried></date-queried> <!-- Date when google retrieved the URL --> <lang></lang> <!-- Language filter used for google --> <position></position> <!-- Index/position in the results list --> <totalresults></totalresutls> <!-- Total number of pages matching the keywords --> <snippet></snippet> <!-- Search snippet text --> </query> """ MAX_WIDTH = 1024 NUMBER_OF_THREADS = 50 TIMEOUT = 60 # seconds, for downloading a page SEP = "\t" # LOG FILE CSV SEPARATOR HTTP_CODES = { 0: "SUCCESS", 300: "ERR-MULTIPLE-CHOICE", 301: "ERR-?????", 302: "ERR-MOVED-TEMPORARILY", 400: "ERR-BADREQUEST", 403: "ERR-FORBIDDEN", 404: "ERR-NOTFOUND", 410: "ERR-????????", 500: "ERR-INTERNALSERVER", 503: "ERR-UNAVAILABLE", 502: "ERR-???????", 504: "ERR-GATEWAYTIMEOUT", -994: "ERR-TIMEOUT", -995: "ERR-SOCKET", -996: "ERR-BADSTATUS", -997: "ERR-NETWORK", -998: "ERR-CHARSET", -999: "ERR-UNKNOWN" } # Obsolete: wget is not used anymore to download the pages. urllib2 replaces it # WGET_CODES = { 0: "SUCCESS", # 1: "ERR-GENERIC", # 2: "ERR-PARSE", # 3: "ERR-IO", # 4: "ERR-NETWORK", # 5: "ERR-SSL", # 6: "ERR-AUTH", # 7: "ERR-PROTOCOL", # 8: "ERR-SERVER" } #0 No problems occurred. #1 Generic error code. #2 Parse error---for instance, when parsing command-line options, the # .wgetrc or .netrc... #3 File I/O error. #4 Network failure. #5 SSL verification failure. #6 Username/password authentication failure. #7 Protocol errors. #8 Server issued an error response. ################################################################################ class CleanThread( threading.Thread ) : """ This daemon thread receives an HTML file and cleans it, writing it to a file and then logging the action to the log file """ ################################################################################ def __init__( self, clean_queue, log_file, lang, prefix ) : """ Override constructor so that the object has access to the pages queue """ threading.Thread.__init__( self ) self.clean_queue = clean_queue self.log_file = log_file # Writable open file descriptor self.lang = lang self.prefix = prefix ################################################################################ def run( self ) : """ Override thread run function. This is where the actual code of the thread runs. It's a daemon thread, so it will run "forever" until the main program ends. THis thread must be unique running. We could model it as part of the main program, but since it is called from the other threads it's easier to model it as a parallel single thread """ while True : #grabs downloaded page from queue cq_item = self.clean_queue.get() ( url, source_text, exit_status, charset, charset_source, timestamp ) = cq_item if exit_status == 0 : cleaned_page = self.clean_page( source_text, charset ) ( page_title, page_text ) = cleaned_page self.write_output( url, page_title, page_text, \ charset, charset_source, timestamp ) print >> sys.stderr, "Saved " + url else : err_msg = "Error " + self.get_error_reason( exit_status ) + " " + url print >> sys.stderr, err_msg #write log message to file log_file.write( self.log_message( url, exit_status, timestamp ) ) #signals to queue job is done self.clean_queue.task_done() ################################################################################ def clean_page( self, source_text, encoding ) : """ @result A tuple (title, output) containing two strings, the first is shorter and contains the content of the page <title> head. The second is a long string with line breaks containing the clean text extracted from the HTML. """ global MAX_WIDTH #pdb.set_trace() CMD_CONVERT = "lynx -force_html -nolist -width %(w)d -dump -stdin \ -display_charset UTF-8 -assume_local_charset %(c)s"\ % { "w": MAX_WIDTH, "c" : encoding } html2txt = subprocess.Popen( CMD_CONVERT, shell=True, \ stdout=subprocess.PIPE, \ stdin=subprocess.PIPE ) (output, error) = html2txt.communicate( source_text ) #TODO: treat error Popen.returncode source_text = source_text.replace( "\r", "\n" ) title = re.search( "<title>[^<]*</title>", \ source_text.replace("\n"," "), \ flags=re.IGNORECASE ) if title : title = re.sub( "<[^>]*>", "", title.group() ).strip() title = "" else : title = "" return ( title, output ) ################################################################################ def get_error_reason( self, exit_status ) : """ Returns a short error message that indicates the error reason. @param exit_status The exit status code from the download function @return A short string error containing the reason message and the error code in parentheses """ global HTTP_CODES str_err = HTTP_CODES.get( exit_status, "UNKNOWN-" + str(exit_status) ) return str_err + "(" + str(exit_status) + ")" ################################################################################ def log_message( self, url, exit_status, timestamp ) : """ Create log message and send to logging thread """ global SEP exit_msg = self.get_error_reason( exit_status ) return url + SEP + exit_msg + SEP + timestamp + SEP + self.lang + "\n" ################################################################################ def write_output( self, url, page_title, page_text, charset, \ charset_source, timestamp ) : """ Writes the contents of a downloaded and cleaned page into a XML file @param url The complete URL of the page @param page_title The content of <title> head as given by `download` @param page_text The HTML-extracted page text as given by `download` @param charset The character encoding returned by `download` @param charset_source The place where the character encoding comes from. """ global XML_HEADER global XML_FOOTER global QUERIES_ELEM filename = self.filenamize( url ) fileout = open( self.prefix + "/" + filename + ".xml", "w" ) fileout.write( XML_HEADER % { "url" : url, \ "title" : page_title, \ "charset" : charset, \ "charset_source" : charset_source, \ "datedown" : timestamp, \ "queries" : QUERIES_ELEM } ) #TODO: replace empty query by the query info coming from input URL file text = self.clean_lynx( page_text ) fileout.write( text ) fileout.write( XML_FOOTER ) fileout.close() ################################################################################ def filenamize( self, url ) : """ Remove special characters from URL and trim the name """ rand_factor = str( random.randint(10000,99999) ) # Maximal filename size is 120, so we get 115 charcters from the # beginning of the URL (modulo special characters) and concatenate a # random 5-digit integer, thus reducing the probability of collisions # for two websites whose URL shares the first 115 characters. return url.replace("/","").replace(":","").replace("?","")\ .replace("&","").replace(".","")[:115] + rand_factor ################################################################################ def recreate_paragraphs( self, text ) : """ This function simply removes the spacing added by lynx's automatic word wrapping (which cannot be turned off by any command line option). This function is not guaranteed to work and should be evaluated at some point. @param text The input text generated by lynx @return The same text with paragraphs on a single line (no wrapping) """ out_text = "" prev_line = "" started = False for line in text.split("\n") : # Little workaround to ignore lines starting with # at the beginning # of the document. This is how Lynx represents some kinds of <link> # elements which have no visible counterpart if line.startswith( " #" ) and not started : continue elif not started : started = True # Assumes that word wrapping will never find a word longer than 70 # chars (which might happen, though, e.g. if it's an URL) if re.match( " [^\* ]", line ) and \ re.match( " [^\* ]", prev_line ) and \ len(prev_line) > MAX_WIDTH - 70 : out_text += " " + line.strip() else : out_text += "\n" + line prev_line = line return out_text ################################################################################ def strip_xml( self, the_string ) : """ Replaces the XML/HTML special characters in the text, escaping them with their corresponding entities. This helps generating well-formed XML in the output, regardless of the text in the website. @param the_string A text that should be escaped @return The escaped text with special characters replaces by entities """ return the_string.replace( "&", "&amp;" ).replace( "<", "&lt;" )\ .replace( ">", "&gt;" ).replace( "\"", "&quot;" ) ################################################################################ def clean_lynx( self, text ) : """ Cleans the format output by lynx in order to obtain a more "NLP- friendly" version. This includes removing bulltets and numbers in lists, removing word wrap from paragraphs and extra indenting indicating headers. This information is then appended to the string in the form of an attribute on the XML element <s> @param text The text generated by lynx @param text The same text in a more NLP-friendly format """ # First remove word wrapping from paragraphs clean_text = self.recreate_paragraphs( text ) # Remove text fields and horizontal lines, represented by underscore clean_text = re.sub( "_+","", clean_text ) out_text = "" # Process the text line by line for line in clean_text.split( "\n" ) : line = re.sub( "\[.*\] *", "", line ) # Escape special XML characters line = self.strip_xml( line ) # Any line with no heading space is a header if re.match( "^[^ ]", line ) : out_text += " <s source=\"h\">" + line.strip() + "</s>\n" # Any line with a heading special bullet symbol is a list item elif re.match( "^ +[\*\+o#@] ", line ) : nobullet = line.strip()[2:] # Remove additional manual numbering added to the list item nobullet = re.sub( "^ *[0-9\.]+ ", "", nobullet ) out_text += " <s source=\"li\">" + nobullet + "</s>\n" # Any line with heading number followed by dot is numbered list item elif re.match( "^ *[0-9]+\. ", line ) : nobullet = re.sub( "^ *[0-9\.]+ ", "", line ) out_text += " <s source=\"li\">" + nobullet + "</s>\n" # Other non-empty indented lines are paragraphs elif re.match( "^ +[^ ]", line ) and len(line.strip()) > 0 : if not line.strip().startswith( "IFRAME:" ) : out_text += " <s source=\"p\">" + line.strip() + "</s>\n" # Other lines are ignored, but for debugging you can uncomment the # next two code lines #elif len( line.strip() ) > 0 : # print >> sys.stderr, "\n\n\n" + line + "\n\n\n" return out_text.strip() ################################################################################ ################################################################################ class DownloadThread( threading.Thread ) : """ This thread is responsible for downloading a give URL, cleaning the text and writing the output to a file. """ ################################################################################ def __init__( self, url_queue, clean_queue ) : """ Override constructor so that the object has access to the url queue """ threading.Thread.__init__( self ) self.url_queue = url_queue self.clean_queue = clean_queue ################################################################################ def run( self ) : """ Override thread run function. This is where the actual code of the thread runs. It's a daemon thread, so it will run "forever" until the main program ends. """ while True : #grabs URL from queue url = self.url_queue.get() # Download the page and put in HTML print >> sys.stderr, "Downloading " + url downloaded_p = self.download( url ) # returns a tuple self.clean_queue.put( ( url, ) + downloaded_p ) #signals to queue job is done self.url_queue.task_done() ################################################################################ def get_charset( self, source_text, page_descr ) : """ Tries to discover/guess the character encoding (utf-8, latin1, etc.) of a downloaded page. """ charset_source = "http-header" # Assume it's in the HTTP header charset = None # First, tries to get the char encoding from the header charset = page_descr.info().getparam("charset") # Second, if the header is not present, tries to get it from the # HTML header if charset is None : # starts with meta, has charset, finished with > pat = "<meta[^>]*charset=([^ >]*)[^>]*>" charset = re.search( pat, source_text, re.DOTALL) if charset is not None : charset = charset.group(1).replace( "\"", "" ) charset_source = "html-header" # Third, it the HTML header is not present, tries to get it from # automatic detection if charset is None : charset = chardet.detect( source_text )[ 'encoding' ] charset_source = "detected" # If one of the former succeeded, convert the detected encoding to # utf-8 # if encoding is not None : # unicode_obj = unicode( source_text, encoding, errors="ignore" ) # source_text = unicode_obj.encode( 'utf-8' ) # exit_status = 0 # Otherwise ignore the page and return error\ if charset is None : raise CharsetError( "character set not detected" ) return ( charset, charset_source ) ################################################################################ def download( self, url ) : """ Downloads the raw HTML source, then uses the same program to extract the text from it. It must be done as a 2-step process otherwise we cannot acceed to the page title information. @param url The complete URL of the page to download. @result A tuple (title, text) containing two strings, the first is shorter and contains the content of the page <title> head. The second is a long string with line breaks containing the clean text extracted from the HTML. """ global TIMEOUT # in seconds # Initialize timestamp = str( datetime.datetime.now() ) ( source_text, charset, charset_source ) = ( None, None, None ) try : page_descr = urllib2.urlopen( url, timeout=TIMEOUT ) source_text = page_descr.read() ( charset, charset_source ) = self.get_charset( source_text, page_descr ) exit_status = 0 except urllib2.HTTPError as err : exit_status = err.code except CharsetError as err : exit_status = -998 except urllib2.URLError as err : exit_status = -997 except httplib.BadStatusLine as err : exit_status = -996 # Bad http status sent by server except socket.error as err : exit_status = -995 # Socket connection error except socket.timeout as err : exit_status = -994 # Connection timed out except Exception as e : ms = "\n\nUNKNOWN EXCEPTION " + str(type(e)) + " " + str(e) + "\n\n" print >> sys.stderr, ms exit_status = -999 return ( source_text, exit_status, charset, charset_source, timestamp ) #CMD_WGET = "wget --timeout %(to)d -t 1 -q -O - \"%(url)s\"" % { "to": TIMEOUT, "url": url } #download = subprocess.Popen( CMD_WGET, shell=True, \ # stdout=subprocess.PIPE ) #(source_text, error) = download.communicate() #exit_status = download.returncode #return ( source_text, exit_status ) ################################################################################ ################################################################################ class CharsetError( Exception ) : """ Exception representing the fact that the character encoding was not detected. """ def __init__( self, message ): self.message = message def __str__( self ): return repr( self.message ) ################################################################################ def read_log( log_filename ) : """ """ logfile = open( log_filename ) urls_to_download = {} # TODO: ignore commented lines starting with # (useful for debugging) for line in logfile.readlines() : line = line.strip() url = line.split(" ")[0] count = urls_to_download.get( url, 0 ) urls_to_download[ url ] = count + 1 logfile.close() return urls_to_download ################################################################################ if len( sys.argv ) == 4 : log_filename = sys.argv[ 1 ] lang = sys.argv[ 2 ] prefix = sys.argv[ 3 ] else : usage = "Usage: python " + sys.argv[ 0 ] + " <log_file> <lang> <out_folder>" print >> sys.stderr, usage sys.exit( -1 ) urls_to_download = read_log( log_filename ) url_queue = Queue.Queue() clean_queue = Queue.Queue() try : # Create cleaner thread (1 instance) log_file = open( prefix + "/log.txt", "w" ) clean_thread = CleanThread( clean_queue, log_file, lang, prefix ) clean_thread.setDaemon( True ) clean_thread.start() # Create download threads (several instances) for i in range( NUMBER_OF_THREADS ): down_thread = DownloadThread( url_queue, clean_queue ) down_thread.setDaemon( True ) down_thread.start() started = time.time() for url in urls_to_download.keys() : url_queue.put( url ) clean_queue.join() url_queue.join() elapsed = time.time() - started print "Time to download %(nb)d URLs: %(time)d" % { "time": elapsed, \ "nb": len(urls_to_download) } except Exception, err : print >> sys.stderr, "FATAL ERROR " + str(err) finally: log_file.close() ################################################################################ # UNUSED # Remove tables from HTML source, avoiding spurious table lines being # considered as paragraphs # NO. What if the whole website is designed as a table? #def remove_tables( source_text ) : # clean_source_text = "" # in_table = False # for line in source_text.split( "\n" ) : # if not in_table : # if line.find( "<table" ) >= 0 : # #pdb.set_trace() # clean_source_text += line[ :line.find( "<table" ): ] + "\n" # in_table = True # else : # clean_source_text += line + "\n" # else : # if line.find( "</table>" ) >= 0 : # #pdb.set_trace() # clean_source_text += line[ line.find( "</table>" ) + 8:] + "\n" # in_table = False # return clean_source_text ################################################################################
ceramisch/CAMELEON-cc
bin/new_download.py
Python
gpl-3.0
20,346
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- """Table transformer for storage commands""" from azure.cli.core.commands.transform import build_table_output from azure.cli.core.profiles import get_sdk, ResourceType from knack.log import get_logger logger = get_logger(__name__) def transform_container_list(result): return build_table_output(result, [ ('Name', 'name'), ('Lease Status', 'properties.leaseStatus'), ('Last Modified', 'properties.lastModified') ]) def transform_container_show(result): return build_table_output(result, [ ('Name', 'name'), ('Lease Status', 'properties.lease.status'), ('Last Modified', 'properties.lastModified') ]) def transform_blob_output(result): return build_table_output(result, [ ('Name', 'name'), ('Blob Type', 'properties.blobType'), ('Blob Tier', 'properties.blobTier'), ('Length', 'properties.contentLength'), ('Content Type', 'properties.contentSettings.contentType'), ('Last Modified', 'properties.lastModified'), ('Snapshot', 'snapshot') ]) def transform_share_list(result): return build_table_output(result, [ ('Name', 'name'), ('Quota', 'properties.quota'), ('Last Modified', 'properties.lastModified') ]) def transform_file_output(result): """ Transform to convert SDK file/dir list output to something that more clearly distinguishes between files and directories. """ from collections import OrderedDict new_result = [] iterable = result if isinstance(result, list) else result.get('items', result) for item in iterable: new_entry = OrderedDict() entity_type = item['type'] # type property is added by transform_file_directory_result is_dir = entity_type == 'dir' new_entry['Name'] = item['name'] + '/' if is_dir else item['name'] new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength'] new_entry['Type'] = item['type'] new_entry['Last Modified'] = item['properties']['lastModified'] or ' ' new_result.append(new_entry) return sorted(new_result, key=lambda k: k['Name']) def transform_entity_show(result): from collections import OrderedDict timestamp = result.pop('Timestamp') result.pop('etag') # Reassemble the output new_result = OrderedDict() new_result['Partition'] = result.pop('PartitionKey') new_result['Row'] = result.pop('RowKey') for key in sorted(result.keys()): new_result[key] = result[key] new_result['Timestamp'] = timestamp return new_result def transform_message_show(result): from collections import OrderedDict ordered_result = [] for item in result: new_result = OrderedDict() new_result['MessageId'] = item.pop('id') new_result['Content'] = item.pop('content') new_result['InsertionTime'] = item.pop('insertionTime') new_result['ExpirationTime'] = item.pop('expirationTime') for key in sorted(item.keys()): new_result[key] = item[key] ordered_result.append(new_result) return ordered_result def transform_boolean_for_table(result): for key in result: result[key] = str(result[key]) return result def transform_file_directory_result(cli_ctx): """ Transform a the result returned from file and directory listing API. This transformer add and remove properties from File and Directory objects in the given list in order to align the object's properties so as to offer a better view to the file and dir list. """ def transformer(result): if getattr(result, 'next_marker', None): logger.warning('Next Marker:') logger.warning(result.next_marker) t_file, t_dir = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'File', 'Directory', mod='file.models') return_list = [] for each in result: if isinstance(each, t_file): delattr(each, 'content') setattr(each, 'type', 'file') elif isinstance(each, t_dir): setattr(each, 'type', 'dir') return_list.append(each) return return_list return transformer
yugangw-msft/azure-cli
src/azure-cli/azure/cli/command_modules/storage/_format.py
Python
mit
4,592
# /setup.py # # Installation and setup script for polysquare-cmake-linter # # See LICENCE.md for Copyright information """Installation and setup script for polysquare-cmake-linter.""" from setuptools import find_packages from setuptools import setup setup(name="artificial-intelligence", version="0.0.1", description="CITS3003 Artificial Intelligence Examples", long_description_markdown_filename="README.md", author="Sam Spilsbury", author_email="smspillaz@gmail.com", url="http://github.com/polysquare/artificial-intelligence", classifiers=["Development Status :: 3 - Alpha", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License"], license="MIT", keywords="development linters", packages=find_packages(exclude=["tests"]), require=[ "sympy" ], extras_require={ "test": ["coverage", "coveralls", "nose", "nose-parameterized", "testtools"] }, test_suite="nose.collector", zip_safe=True, include_package_data=True)
smspillaz/artificial-intelligence
setup.py
Python
mit
1,589
from slackbot.bot import listen_to @listen_to("has joined the channel") def hello_message(message): channel_body = getattr(message.channel, "_body", None) if channel_body['name'] == u'arc_잡담': username = u'<@{}>'.format(message._get_user_id()) msg = "%s 님 안녕하세요! 자기소개 부탁드려요~ @_@" % username message.send(msg)
HI-ARC/hiarcbot
hiarcbot/plugins/say_hello.py
Python
apache-2.0
376
''' Created on 05-May-2016 @author: dgraja ''' from django.shortcuts import render from django.core.mail import send_mail def compose_email(request): """ Compose and send emails """ data = {"sender": "", "to": "", "cc": "", "subject": "", "body": ""} if request.POST: for key in data.keys(): if key in request.POST: data[key] = request.POST[key] if "send" in request.POST and request.POST["send"] == "send": # Code to send the email print data # send_mail(subject=data["subject"], # message=data["body"], # from_email=data["sender"], # recipient_list=data["to"].replace(',', ';').split(';')) pass return render(request, "simple/send_mail.html", data)
rajadg/Python
django01/application/simple/mailer.py
Python
gpl-3.0
890
# TODO : TRANSFORM INTO A CLASS AND CREATE A REPORT OF REGION TRIMMED #~~~~~~~GLOBAL IMPORTS~~~~~~~# # Standard library packages import from os import remove, path import gzip from time import time from sys import stdout # Third party package import from Bio import SeqIO # Local library packages import from pyDNA.Utilities import import_seq, file_basename, mkdir from Blast import Blastn #~~~~~~~MAIN METHODS~~~~~~~# def mask ( subject_fasta, hit_list, ref_outdir="./references/", ref_outname="masked_ref.fa", compress_ouput=True ): """ Import a reference fasta sequence, Mask positions indicated by hits from a hit_list and write the modified fasta sequence in a new file. @param subject_fasta Fasta sequence of the subject to edit (can be gzipped) @param hit_list List of hit objects. Hits need at least 3 fields named s_id, s_start and s_end coresponding to the name of the sequence matched, and the hit start/end (0 based). @param ref_outdir Directory where the masked reference will be created @param ref_outname Name of the masked reference @param compress_ouput If true the output will be gzipped @return A path to the modified sequence if the hit list was valid. """ # Test if object the first object of hit_list have the require s_id, s_start and s_end fields try: a = hit_list[0].s_id a = hit_list[0].s_start a = hit_list[0].s_end except IndexError: print ("No hit found, The subject fasta file will not be edited") return subject_fasta except AttributeError as E: print ("The list provided does not contain suitable hit object, The subject fasta file will not be edited") return subject_fasta # Initialize output folder mkdir(ref_outdir) # Initialize input fasta file if subject_fasta[-2:].lower() == "gz": in_handle = gzip.open(subject_fasta, "r") else: in_handle = open(subject_fasta, "r") # Initialize output fasta file if compress_ouput: ref_path = path.join (ref_outdir, ref_outname+".gz") out_handle = gzip.open(ref_path, 'w') else: ref_path = path.join (ref_outdir, ref_outname) out_handle = open(ref_path, 'w') # Generate a list of ref that will need to be modified id_list = {hit.s_id:0 for hit in hit_list}.keys() # Iterate over record in the subject fasta file print ("Masking hit positions and writting a new reference for {} ".format(ref_outname)) i=j=0 start_time = time() for record in SeqIO.parse(in_handle, "fasta"): # Progress Marker stdout.write("*") stdout.flush() # Check if the record is in the list of record to modify if record.id in id_list: i+=1 #~print ("Hit found in {}. Editing the sequence".format(record.id)) # Casting Seq type to MutableSeq Type to allow string editing record.seq = record.seq.tomutable() # For each hit in the list of hit found for hit in hit_list: if record.id == hit.s_id: # For all position between start and end coordinates modify the base by N for position in range (hit.s_start, hit.s_end): record.seq[position]= 'n' else: j+=1 #~print ("No hit found in {}".format(record.id)) # Finally write the sequence modified or not out_handle.write(record.format("fasta")) print("") # Report informations print("{} sequence(s) from {} modified in {}s".format(i,ref_outname, round(time()-start_time),2)) # Close files and return the masked ref path in_handle.close() out_handle.close() return ref_path
a-slide/pyDNA
RefMasker.py
Python
gpl-2.0
3,819
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-09-15 12:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0001_initial'), ] operations = [ migrations.CreateModel( name='Post2', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=255)), ('slug', models.SlugField(max_length=255, unique=True)), ('description', models.CharField(max_length=255)), ('content', models.TextField()), ('published', models.BooleanField(default=True)), ('created', models.DateTimeField(auto_now_add=True)), ], ), ]
skyrred/django_local_library
blog/migrations/0002_post2.py
Python
gpl-3.0
890
# Imports operators dynamically while keeping the package API clean, # abstracting the underlying modules from airflow.utils import import_module_attrs as _import_module_attrs # These need to be integrated first as other operators depend on them _import_module_attrs(globals(), { 'check_operator': [ 'CheckOperator', 'ValueCheckOperator', 'IntervalCheckOperator', ], }) _operators = { 'bash_operator': ['BashOperator'], 'python_operator': ['PythonOperator', 'BranchPythonOperator'], 'hive_operator': ['HiveOperator'], 'presto_check_operator': [ 'PrestoCheckOperator', 'PrestoValueCheckOperator', 'PrestoIntervalCheckOperator', ], 'dummy_operator': ['DummyOperator'], 'email_operator': ['EmailOperator'], 'hive_to_samba_operator': ['Hive2SambaOperator'], 'mysql_operator': ['MySqlOperator'], 'sqlite_operator': ['SqliteOperator'], 'mysql_to_hive': ['MySqlToHiveTransfer'], 'postgres_operator': ['PostgresOperator'], 'sensors': [ 'SqlSensor', 'ExternalTaskSensor', 'HivePartitionSensor', 'S3KeySensor', 'S3PrefixSensor', 'HdfsSensor', 'TimeSensor', 'TimeDeltaSensor', 'HttpSensor' ], 'subdag_operator': ['SubDagOperator'], 'hive_stats_operator': ['HiveStatsCollectionOperator'], 's3_to_hive_operator': ['S3ToHiveTransfer'], 'hive_to_mysql': ['HiveToMySqlTransfer'], 's3_file_transform_operator': ['S3FileTransformOperator'], 'http_operator': ['SimpleHttpOperator'], 'hive_to_druid': ['HiveToDruidTransfer'], 'jdbc_operator': ['JdbcOperator'], 'mssql_operator': ['MsSqlOperator'], 'mssql_to_hive': ['MsSqlToHiveTransfer'], 'slack_operator': ['SlackAPIOperator', 'SlackAPIPostOperator'], 'generic_transfer': ['GenericTransfer'], } _import_module_attrs(globals(), _operators) from airflow.models import BaseOperator def integrate_plugins(): """Integrate plugins to the context""" from airflow.plugins_manager import operators as _operators for _operator in _operators: globals()[_operator.__name__] = _operator
bellhops/airflow
airflow/operators/__init__.py
Python
apache-2.0
2,163
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function import json import time import os from collections import namedtuple # Python 2/3 compatibility # Python 2: try: from urlparse import urlparse from urllib import urlencode from urllib2 import urlopen, Request, HTTPError except: pass # Python 3: try: from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request from urllib.error import HTTPError except: pass import sys # Python 2: if sys.version_info < (3, 0): def input(str): return raw_input(str) # Python 3, backward compatibility with unicode test if sys.version_info >= (3, 0): unicode = type(str) version = '1.0.0' submitt_url = \ 'https://www.coursera.org/api/onDemandProgrammingScriptSubmissions.v1' Metadata = namedtuple("Metadata", ['assignment_key', 'name', 'part_data']) Part = namedtuple("Part", ['id', 'input_file', 'solver_file', 'name']) def load_metadata(metadata_file_name='_coursera'): ''' Parses an assignment metadata file Args: metadata_file_name (str): location of the metadata file Returns: metadata as a named tuple structure ''' if not os.path.exists(metadata_file_name): print('metadata file "%s" not found' % metadata_file_name) quit() try: with open(metadata_file_name, 'r') as metadata_file: url = metadata_file.readline().strip() name = metadata_file.readline().strip() part_data = [] for line in metadata_file.readlines(): if ',' in line: line_parts = line.split(',') line_parts = [x.strip() for x in line_parts] assert(len(line_parts) == 4) part_data.append(Part(*line_parts)) if len(url) <= 0: print('Empty url in _coursera file: %s' % metadata_file_name) quit() if len(name) <= 0: print('Empty assignment name in _coursera file: %s' % metadata_file_name) quit() except Exception as e: print('problem parsing assignment metadata file') print('exception message:') print(e) quit() return Metadata(url, name, part_data) def part_prompt(problems): ''' Prompts the user for which parts of the assignment they would like to submit. Args: problems: a list of assignment problems Returns: the selected subset of problems ''' count = 1 print('Hello! These are the assignment parts that you can submit:') for i, problem in enumerate(problems): print(str(count) + ') ' + problem.name) count += 1 print('0) All') part_text = input('Please enter which part(s) you want to submit (0-%d): ' % (count-1)) selected_problems = [] selected_models = [] for item in part_text.split(','): try: i = int(item) except: print('Skipping input "' + item + '". It is not an integer.') continue if i >= count or i < 0: print('Skipping input "' + item + '". It is out of the valid range (0-%d).' % (count-1)) continue if i == 0: selected_problems.extend(problems) continue if i <= len(problems): selected_problems.append(problems[i-1]) if len(selected_problems) <= 0: print('No valid assignment parts identified. Please try again. \n') return part_prompt(problems) else: return selected_problems def compute(metadata, solver_file_override=None): ''' Determines which assignment parts the student would like to submit. Then computes his/her answers to those assignment parts Args: metadata: the assignment metadata solver_file_override: an optional model file to override the metadata default Returns: a dictionary of results in the format Coursera expects ''' if solver_file_override is not None: print('Overriding solver file with: '+solver_file_override) selected_problems = part_prompt(metadata.part_data) results = {} #submission needs empty dict for every assignment part results.update({prob_data.id : {} for prob_data in metadata.part_data}) for problem in selected_problems: if solver_file_override != None: solver_file = solver_file_override else: solver_file = problem.solver_file if not os.path.isfile(solver_file): print('Unable to locate assignment file "%s" in the current working directory.' % solver_file) continue # if a relative path is given, add that patth to system path so import will work if os.path.sep in solver_file: split = solver_file.rfind(os.path.sep) path = solver_file[0:split] file_name = solver_file[split+1:] sys.path.insert(0, path) solver_file = file_name submission = output(problem.input_file, solver_file) if submission != None: results[problem.id] = {'output':submission} print('\n== Computations Complete ...') return results def load_input_data(file_location): with open(file_location, 'r') as input_data_file: input_data = ''.join(input_data_file.readlines()) return input_data def output(input_file, solver_file): ''' Attempts to execute solve_it locally on a given input file. Args: input_file: the assignment problem data of interest solver_file: a python file containing the solve_it function Returns: the submission string in a format that the grader expects ''' try: pkg = __import__(solver_file[:-3]) # remove '.py' extension if not hasattr(pkg, 'solve_it'): print('the solve_it() function was not found in %s' % solver_file) quit() except ImportError: print('import error with python file "%s".' % solver_file) quit() solution = '' start = time.clock() try: solution = pkg.solve_it(load_input_data(input_file)) except Exception as e: print('the solve_it(input_data) method from solver.py raised an exception') print('try testing it with python ./solver.py before running this submission script') print('exception message:') print(str(e)) print('') return 'Local Exception =(' end = time.clock() if not (isinstance(solution, str) or isinstance(solution, unicode)): print('Warning: the solver did not return a string. The given object will be converted with the str() method.') solution = str(solution) print('Submitting: ') print(solution) return solution.strip() + '\n' + str(end - start) def login_dialog(assignment_key, results, credentials_file_location = '_credentials'): ''' Requests Coursera login credentials from the student and submits the student's solutions for grading Args: assignment_key: Coursera's assignment key results: a dictionary of results in Cousera's format credentials_file_location: a file location where login credentials can be found ''' success = False tries = 0 while not success: # stops infinate loop when credentials file is incorrect if tries <= 0: login, token = login_prompt(credentials_file_location) else: login, token = login_prompt('') code, responce = submit_solution(assignment_key, login, token, results) print('\n== Coursera Responce ...') #print(code) print(responce) if code != 401: success = True else: print('\ntry logging in again') tries += 1 def login_prompt(credentials_file_location): ''' Attempts to load credentials from a file, if that fails asks the user. Returns: the user's login and token ''' if os.path.isfile(credentials_file_location): try: with open(credentials_file_location, 'r') as metadata_file: login = metadata_file.readline().strip() token = metadata_file.readline().strip() metadata_file.close() except: login, token = basic_prompt() else: login, token = basic_prompt() return login, token def basic_prompt(): ''' Prompt the user for login credentials. Returns: the user's login and token ''' login = input('User Name (e-mail address): ') token = input('Submission Token (from the assignment page): ') return login, token def submit_solution(assignment_key, email_address, token, results): ''' Sends the student's submission to Coursera for grading via the submission API. Args: assignment_key: Coursera's assignment key email_address: the student's email token: the student's assignment token results: a dictionary of results in Cousera's format Returns: the https response code and a feedback message ''' print('\n== Connecting to Coursera ...') print('Submitting %d of %d parts' % (sum(['output' in v for k,v in results.items()]), len(results))) # build json datastructure parts = {} submission = { 'assignmentKey': assignment_key, 'submitterEmail': email_address, 'secret': token, 'parts': results } # send submission req = Request(submitt_url) req.add_header('Cache-Control', 'no-cache') req.add_header('Content-type', 'application/json') try: res = urlopen(req, json.dumps(submission).encode('utf8')) except HTTPError as e: responce = json.loads(e.read().decode('utf8')) if 'details' in responce and responce['details'] != None and \ 'learnerMessage' in responce['details']: return e.code, responce['details']['learnerMessage'] else: return e.code, 'Unexpected response code, please contact the ' \ 'course staff.\nDetails: ' + responce['message'] code = res.code responce = json.loads(res.read().decode('utf8')) if code >= 200 and code <= 299: return code, 'Your submission has been accepted and will be ' \ 'graded shortly.' return code, 'Unexpected response code, please contact the '\ 'course staff.\nDetails: ' + responce def main(args): ''' 1) Reads a metadata file to customize the submission process to a particular assignment. 2) The compute the student's answers to the assignment parts. 3) Submits the student's answers for grading. Provides the an option for saving the submissions locally. This is very helpful when testing the assignment graders. Args: args: CLI arguments from an argparse parser ''' # needed so that output can import from the cwd sys.path.append(os.getcwd()) if args.metadata is None: metadata = load_metadata() else: print('Overriding metadata file with: '+args.metadata) metadata = load_metadata(args.metadata) print('==\n== '+metadata.name+' Solution Submission \n==') # compute dialog results = compute(metadata, args.override) if sum(['output' in v for k,v in results.items()]) <= 0: return # store submissions if requested if args.record_submission == True: print('Recording submission as files') for sid, submission in results.items(): if 'output' in submission: directory = '_'+sid if not os.path.exists(directory): os.makedirs(directory) submission_file_name = directory+'/submission.sub' print(' writting submission file: '+submission_file_name) with open(submission_file_name,'w') as submission_file: submission_file.write(submission['output']) submission_file.close() return # submit dialog if args.credentials is None: login_dialog(metadata.assignment_key, results) else: print('Overriding credentials file with: '+args.credentials) login_dialog(metadata.assignment_key, results, args.credentials) import argparse def build_parser(): ''' Builds an argument parser for the CLI Returns: parser: an argparse parser ''' parser = argparse.ArgumentParser( description='''The submission script for Discrete Optimization assignments on the Coursera Platform.''', epilog='''Please file bugs on github at: https://github.com/discreteoptimization/assignment/issues. If you would like to contribute to this tool's development, check it out at: https://github.com/discreteoptimization/assignment''' ) parser.add_argument('-v', '--version', action='version', version='%(prog)s '+version) parser.add_argument('-o', '--override', help='overrides the python source file specified in the \'_coursera\' file') parser.add_argument('-m', '--metadata', help='overrides the \'_coursera\' metadata file') parser.add_argument('-c', '--credentials', help='overrides the \'_credentials\' credentials file') parser.add_argument('-rs', '--record_submission', help='records the submission(s) as files', action='store_true') return parser if __name__ == '__main__': parser = build_parser() main(parser.parse_args())
kastnerkyle/discrete-opt
week2/submit.py
Python
bsd-3-clause
13,804
#!/usr/bin/env python #from distutils.core import setup from setuptools import setup, find_packages setup(name='LitleSdkPython', version='9.12.1', description='Vantiv eCommerce SDK for Python', author='Vantiv eCommerce', author_email='SDKSupport@vantiv.com', url='https://developer.vantiv.com/community/ecommerce', packages=['litleSdkPython'], install_requires=[ 'PyXB==1.1.5', 'paramiko==1.14.0'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Environment :: MacOS X' 'Environment :: Plugins' 'Environment :: Win32 (MS Windows)' 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Intended Audience :: Information Technology', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent' 'Operating System :: MacOS', 'Operating System :: Microsoft', 'Operating System :: Unix', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: Office/Business :: Financial', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules' ], )
LitleCo/litle-sdk-for-python
setup.py
Python
mit
1,419
#!/usr/bin/python # Send all 0 to the ledpanel buf=bytearray(32*32*3) out_file = open("/sys/class/ledpanel/rgb_buffer","w") out_file.write(buf) out_file.close()
tanzilli/led-utils
off.py
Python
gpl-2.0
162
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-09-14 01:54 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import storage.models.labels class Migration(migrations.Migration): dependencies = [ ('storage', '0014_auto_20170914_0146'), ] operations = [ migrations.RemoveField( model_name='accesslayer', name='active_flag', ), migrations.RemoveField( model_name='accesslayer', name='created_by', ), migrations.RemoveField( model_name='accesslayer', name='creation_date', ), migrations.RemoveField( model_name='accesslayer', name='last_modified', ), migrations.RemoveField( model_name='accesslayer', name='updated_by', ), migrations.AlterField( model_name='accesslayer', name='source', field=models.ForeignKey(blank=True, default=storage.models.labels.GroupDefaultLabel('Access Layer Source'), help_text='the access layer source', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='access_layer', to='storage.Label'), ), ]
MartinPaulo/resplat
storage/migrations/0015_auto_20170914_0154.py
Python
lgpl-3.0
1,300
from media_tree.models import FileNode from media_tree.contrib.views.listing import FileNodeListingView from media_tree.contrib.views.detail import FileNodeDetailView from media_tree.contrib.views.detail.image import ImageNodeDetailView from django.views.generic.base import TemplateView from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', (r'^$', TemplateView.as_view( template_name="media_tree/base.html" )), url(r'^listing/$', FileNodeListingView.as_view( # notice that queryset can be any iterable, for instance a list: queryset=FileNode.objects.filter(level=0), ), name="demo_listing"), url(r'^files/(?P<path>.+)/$', FileNodeDetailView.as_view( queryset=FileNode.objects.filter(extension='txt') ), name="demo_detail"), url(r'^images/(?P<path>.+)/$', ImageNodeDetailView.as_view( queryset=FileNode.objects.get(path='Example Pictures').get_descendants() ), name="demo_image"), url(r'^admin/', include(admin.site.urls)), ) # do NOT use this on a production server from django.conf import settings from django.conf.urls.static import static urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
bittner/django-media-tree
demo_project/demo_project/urls.py
Python
bsd-3-clause
1,273
# Copyright (c) Mathias Kaerlev 2011-2012. # This file is part of pyspades. # pyspades is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # pyspades is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with pyspades. If not, see <http://www.gnu.org/licenses/>. import math from random import choice from pyspades.constants import * from pyspades.common import prettify_timespan from pyspades.server import parse_command from twisted.internet import reactor from map import check_rotation class InvalidPlayer(Exception): pass class InvalidSpectator(InvalidPlayer): pass class InvalidTeam(Exception): pass def restrict(func, user_types): def new_func(connection, *arg, **kw): return func(connection, *arg, **kw) new_func.func_name = func.func_name new_func.user_types = set(user_types) return new_func def admin(func): return restrict(func, ('admin',)) def name(name): def dec(func): func.func_name = name return func return dec def alias(name): def dec(func): try: func.aliases.append(name) except AttributeError: func.aliases = [name] return func return dec def get_player(protocol, value, spectators = True): ret = None try: if value.startswith('#'): value = int(value[1:]) ret = protocol.players[value] else: players = protocol.players try: ret = players[value] except KeyError: value = value.lower() for player in players.values(): name = player.name.lower() if name == value: return player if name.count(value): ret = player except (KeyError, IndexError, ValueError): pass if ret is None: raise InvalidPlayer() elif not spectators and ret.world_object is None: raise InvalidSpectator() return ret def get_team(connection, value): value = value.lower() if value == 'blue': return connection.protocol.blue_team elif value == 'green': return connection.protocol.green_team raise InvalidTeam() def join_arguments(arg, default = None): if not arg: return default return ' '.join(arg) def parse_maps(pre_maps): maps = [] for n in pre_maps: if n[0]=="#" and len(maps)>0: maps[-1] += " "+n else: maps.append(n) return maps, ', '.join(maps) @admin def kick(connection, value, *arg): reason = join_arguments(arg) player = get_player(connection.protocol, value) player.kick(reason) def get_ban_arguments(connection, arg): duration = None if len(arg): try: duration = int(arg[0]) arg = arg[1:] except (IndexError, ValueError): pass if duration is None: if len(arg)>0 and arg[0] == "perma": arg = arg[1:] else: duration = connection.protocol.default_ban_time reason = join_arguments(arg) return duration, reason @admin def ban(connection, value, *arg): duration, reason = get_ban_arguments(connection, arg) player = get_player(connection.protocol, value) player.ban(reason, duration) @admin def hban(connection, value, *arg): duration = int(60) reason = join_arguments(arg) player = get_player(connection.protocol, value) player.ban(reason, duration) @admin def dban(connection, value, *arg): duration = int(1440) reason = join_arguments(arg) player = get_player(connection.protocol, value) player.ban(reason, duration) @admin def wban(connection, value, *arg): duration = int(10080) reason = join_arguments(arg) player = get_player(connection.protocol, value) player.ban(reason, duration) @admin def banip(connection, ip, *arg): duration, reason = get_ban_arguments(connection, arg) try: connection.protocol.add_ban(ip, reason, duration) except ValueError: return 'Invalid IP address/network' reason = ': ' + reason if reason is not None else '' duration = duration or None if duration is None: return 'IP/network %s permabanned%s' % (ip, reason) else: return 'IP/network %s banned for %s%s' % (ip, prettify_timespan(duration * 60), reason) @admin def unban(connection, ip): try: connection.protocol.remove_ban(ip) return 'IP unbanned' except KeyError: return 'IP not found in ban list' @name('undoban') @admin def undo_ban(connection, *arg): if len(connection.protocol.bans)>0: result = connection.protocol.undo_last_ban() return ('Ban for %s undone' % result[0]) else: return 'No bans to undo!' @admin def say(connection, *arg): value = ' '.join(arg) connection.protocol.send_chat(value) connection.protocol.irc_say(value) @admin def kill(connection, value): player = get_player(connection.protocol, value, False) player.kill() message = '%s killed %s' % (connection.name, player.name) connection.protocol.send_chat(message, irc = True) @admin def heal(connection, player = None): if player is not None: player = get_player(connection.protocol, player, False) message = '%s was healed by %s' % (player.name, connection.name) else: if connection not in connection.protocol.players: raise ValueError() player = connection message = '%s was healed' % (connection.name) player.refill() connection.protocol.send_chat(message, irc = True) def rules(connection): if connection not in connection.protocol.players: raise KeyError() lines = connection.protocol.rules if lines is None: return connection.send_lines(lines) def help(connection): """ This help """ if connection.protocol.help is not None and not connection.admin: connection.send_lines(connection.protocol.help) else: names = [command.func_name for command in command_list if command.func_name in connection.rights] return 'Available commands: %s' % (', '.join(names)) def login(connection, password): """ Login as a user type """ if connection not in connection.protocol.players: raise KeyError() for user_type, passwords in connection.protocol.passwords.iteritems(): if password in passwords: if user_type in connection.user_types: return "You're already logged in as %s" % user_type return connection.on_user_login(user_type, True) if connection.login_retries is None: connection.login_retries = connection.protocol.login_retries - 1 else: connection.login_retries -= 1 if not connection.login_retries: connection.kick('Ran out of login attempts') return return 'Invalid password - you have %s tries left' % ( connection.login_retries) def pm(connection, value, *arg): player = get_player(connection.protocol, value) message = join_arguments(arg) player.send_chat('PM from %s: %s' % (connection.name, message)) return 'PM sent to %s' % player.name @name('admin') def to_admin(connection, *arg): protocol = connection.protocol message = join_arguments(arg) if not message: return "Enter a message you want to send, like /admin I'm stuck" prefix = '(TO ADMINS)' irc_relay = protocol.irc_relay if irc_relay: if irc_relay.factory.bot and irc_relay.factory.bot.colors: prefix = '\x0304' + prefix + '\x0f' irc_relay.send(prefix + ' <%s> %s' % (connection.name, message)) for player in protocol.players.values(): if player.admin and player is not connection: player.send_chat('To ADMINS from %s: %s' % (connection.name, message)) return 'Message sent to admins' def streak(connection): if connection not in connection.protocol.players: raise KeyError() return ('Your current kill streak is %s. Best is %s kills' % (connection.streak, connection.best_streak)) @admin def lock(connection, value): team = get_team(connection, value) team.locked = True connection.protocol.send_chat('%s team is now locked' % team.name) connection.protocol.irc_say('* %s locked %s team' % (connection.name, team.name)) @admin def unlock(connection, value): team = get_team(connection, value) team.locked = False connection.protocol.send_chat('%s team is now unlocked' % team.name) connection.protocol.irc_say('* %s unlocked %s team' % (connection.name, team.name)) @admin def switch(connection, player = None): protocol = connection.protocol if player is not None: player = get_player(protocol, player) elif connection in protocol.players: player = connection else: raise ValueError() if player.team.spectator: player.send_chat("The switch command can't be used on a spectating player.") return if player.invisible: old_team = player.team player.team = player.team.other player.on_team_changed(old_team) player.spawn(player.world_object.position.get()) player.send_chat('Switched to %s team' % player.team.name) if connection is not player and connection in protocol.players: connection.send_chat('Switched %s to %s team' % (player.name, player.team.name)) protocol.irc_say('* %s silently switched teams' % player.name) else: player.respawn_time = protocol.respawn_time player.set_team(player.team.other) protocol.send_chat('%s switched teams' % player.name, irc = True) @name('setbalance') @admin def set_balance(connection, value): try: value = int(value) except ValueError: return 'Invalid value %r. Use 0 for off, 1 and up for on' % value protocol = connection.protocol protocol.balanced_teams = value protocol.send_chat('Balanced teams set to %s' % value) connection.protocol.irc_say('* %s set balanced teams to %s' % ( connection.name, value)) @name('togglebuild') @alias('tb') @admin def toggle_build(connection, player = None): if player is not None: player = get_player(connection.protocol, player) value = not player.building player.building = value msg = '%s can build again' if value else '%s is disabled from building' connection.protocol.send_chat(msg % player.name) connection.protocol.irc_say('* %s %s building for %s' % (connection.name, ['disabled', 'enabled'][int(value)], player.name)) return value = not connection.protocol.building connection.protocol.building = value on_off = ['OFF', 'ON'][int(value)] connection.protocol.send_chat('Building has been toggled %s!' % on_off) connection.protocol.irc_say('* %s toggled building %s' % (connection.name, on_off)) @name('togglekill') @alias('tk') @admin def toggle_kill(connection, player = None): if player is not None: player = get_player(connection.protocol, player) value = not player.killing player.killing = value msg = '%s can kill again' if value else '%s is disabled from killing' connection.protocol.send_chat(msg % player.name) connection.protocol.irc_say('* %s %s killing for %s' % (connection.name, ['disabled', 'enabled'][int(value)], player.name)) return value = not connection.protocol.killing connection.protocol.killing = value on_off = ['OFF', 'ON'][int(value)] connection.protocol.send_chat('Killing has been toggled %s!' % on_off) connection.protocol.irc_say('* %s toggled killing %s' % (connection.name, on_off)) @name('toggleteamkill') @admin def toggle_teamkill(connection): value = not connection.protocol.friendly_fire connection.protocol.friendly_fire = value on_off = ['OFF', 'ON'][int(value)] connection.protocol.send_chat('Friendly fire has been toggled %s!' % on_off) connection.protocol.irc_say('* %s toggled friendly fire %s' % ( connection.name, on_off)) @admin def mute(connection, value): player = get_player(connection.protocol, value) if player.mute: return '%s is already muted' % player.name player.mute = True message = '%s has been muted by %s' % (player.name, connection.name) connection.protocol.send_chat(message, irc = True) @admin def unmute(connection, value): player = get_player(connection.protocol, value) if not player.mute: return '%s is not muted' % player.name player.mute = False message = '%s has been unmuted by %s' % (player.name, connection.name) connection.protocol.send_chat(message, irc = True) def deaf(connection, value = None): if value is not None: if not connection.admin and not connection.rights.deaf: return 'No administrator rights!' connection = get_player(connection.protocol, value) message = '%s deaf' % ('now' if not connection.deaf else 'no longer') connection.protocol.irc_say('%s is %s' % (connection.name, message)) message = "You're " + message if connection.deaf: connection.deaf = False connection.send_chat(message) else: connection.send_chat(message) connection.deaf = True @name('globalchat') @admin def global_chat(connection): connection.protocol.global_chat = not connection.protocol.global_chat connection.protocol.send_chat('Global chat %s' % ('enabled' if connection.protocol.global_chat else 'disabled'), irc = True) @alias('tp') @admin def teleport(connection, player1, player2 = None, silent = False): player1 = get_player(connection.protocol, player1) if player2 is not None: if connection.admin or connection.rights.teleport_other: player, target = player1, get_player(connection.protocol, player2) silent = silent or player.invisible message = ('%s ' + ('silently ' if silent else '') + 'teleported ' '%s to %s') message = message % (connection.name, player.name, target.name) else: return 'No administrator rights!' else: if connection not in connection.protocol.players: raise ValueError() player, target = connection, player1 silent = silent or player.invisible message = '%s ' + ('silently ' if silent else '') + 'teleported to %s' message = message % (player.name, target.name) player.set_location(target.get_location()) if silent: connection.protocol.irc_say('* ' + message) else: connection.protocol.send_chat(message, irc = True) @admin def unstick(connection, player = None): if player is not None: player = get_player(connection.protocol, player) else: player = connection connection.protocol.send_chat("%s unstuck %s" % (connection.name, player.name), irc = True) player.set_location_safe(player.get_location()) @alias('tps') @admin def tpsilent(connection, player1, player2 = None): teleport(connection, player1, player2, silent = True) from pyspades.common import coordinates, to_coordinates @name('goto') @admin def go_to(connection, value): if connection not in connection.protocol.players: raise KeyError() move(connection, connection.name, value, silent = connection.invisible) @admin def move(connection, player, value, silent = False): player = get_player(connection.protocol, player) x, y = coordinates(value) x += 32 y += 32 player.set_location((x, y, connection.protocol.map.get_height(x, y) - 2)) if connection is player: message = ('%s ' + ('silently ' if silent else '') + 'teleported to ' 'location %s') message = message % (player.name, value.upper()) else: message = ('%s ' + ('silently ' if silent else '') + 'teleported %s ' 'to location %s') message = message % (connection.name, player.name, value.upper()) if silent: connection.protocol.irc_say('* ' + message) else: connection.protocol.send_chat(message, irc = True) @admin def where(connection, value = None): if value is not None: connection = get_player(connection.protocol, value) elif connection not in connection.protocol.players: raise ValueError() x, y, z = connection.get_location() return '%s is in %s (%s, %s, %s)' % (connection.name, to_coordinates(x, y), int(x), int(y), int(z)) @admin def god(connection, value = None): if value is not None: connection = get_player(connection.protocol, value) elif connection not in connection.protocol.players: raise ValueError() connection.god = not connection.god if connection.protocol.set_god_build: connection.god_build = connection.god else: connection.god_build = False if connection.god: message = '%s entered GOD MODE!' % connection.name else: message = '%s returned to being a mere human' % connection.name connection.protocol.send_chat(message, irc = True) @name('godbuild') @admin def god_build(connection, player = None): protocol = connection.protocol if player is not None: player = get_player(protocol, player) elif connection in protocol.players: player = connection else: raise ValueError() if not player.god: return 'Placing god blocks is only allowed in god mode' player.god_build = not player.god_build message = ('now placing god blocks' if player.god_build else 'no longer placing god blocks') player.send_chat("You're %s" % message) if connection is not player and connection in protocol.players: connection.send_chat('%s is %s' % (player.name, message)) protocol.irc_say('* %s is %s' % (player.name, message)) @admin def fly(connection, player = None): protocol = connection.protocol if player is not None: player = get_player(protocol, player) elif connection in protocol.players: player = connection else: raise ValueError() player.fly = not player.fly message = 'now flying' if player.fly else 'no longer flying' player.send_chat("You're %s" % message) if connection is not player and connection in protocol.players: connection.send_chat('%s is %s' % (player.name, message)) protocol.irc_say('* %s is %s' % (player.name, message)) from pyspades.contained import KillAction from pyspades.server import create_player, set_tool, set_color, input_data, weapon_input from pyspades.common import make_color @alias('invis') @alias('inv') @admin def invisible(connection, player = None): protocol = connection.protocol if player is not None: player = get_player(protocol, player) elif connection in protocol.players: player = connection else: raise ValueError() player.invisible = not player.invisible player.filter_visibility_data = player.invisible player.god = player.invisible player.god_build = False player.killing = not player.invisible if player.invisible: player.send_chat("You're now invisible") protocol.irc_say('* %s became invisible' % player.name) kill_action = KillAction() kill_action.kill_type = choice([GRENADE_KILL, FALL_KILL]) kill_action.player_id = kill_action.killer_id = player.player_id reactor.callLater(1.0 / NETWORK_FPS, protocol.send_contained, kill_action, sender = player) else: player.send_chat("You return to visibility") protocol.irc_say('* %s became visible' % player.name) x, y, z = player.world_object.position.get() create_player.player_id = player.player_id create_player.name = player.name create_player.x = x create_player.y = y create_player.z = z create_player.weapon = player.weapon create_player.team = player.team.id world_object = player.world_object input_data.player_id = player.player_id input_data.up = world_object.up input_data.down = world_object.down input_data.left = world_object.left input_data.right = world_object.right input_data.jump = world_object.jump input_data.crouch = world_object.crouch input_data.sneak = world_object.sneak input_data.sprint = world_object.sprint set_tool.player_id = player.player_id set_tool.value = player.tool set_color.player_id = player.player_id set_color.value = make_color(*player.color) weapon_input.primary = world_object.primary_fire weapon_input.secondary = world_object.secondary_fire protocol.send_contained(create_player, sender = player, save = True) protocol.send_contained(set_tool, sender = player) protocol.send_contained(set_color, sender = player, save = True) protocol.send_contained(input_data, sender = player) protocol.send_contained(weapon_input, sender = player) if connection is not player and connection in protocol.players: if player.invisible: return '%s is now invisible' % player.name else: return '%s is now visible' % player.name @admin def ip(connection, value = None): if value is None: if connection not in connection.protocol.players: raise ValueError() player = connection else: player = get_player(connection.protocol, value) return 'The IP of %s is %s' % (player.name, player.address[0]) @name('whowas') @admin def who_was(connection, value): value = value.lower() ret = None exact_match = False for name, ip in connection.protocol.player_memory: name_lower = name.lower() if name_lower == value: ret = (name, ip) exact_match = True elif not exact_match and name_lower.count(value): ret = (name, ip) if ret is None: raise InvalidPlayer() return "%s's most recent IP was %s" % ret @name('resetgame') @admin def reset_game(connection): resetting_player = connection # irc compatibility if resetting_player not in connection.protocol.players: for player in connection.protocol.players.values(): resetting_player = player if player.admin: break if resetting_player is connection: return connection.protocol.reset_game(resetting_player) connection.protocol.on_game_end() connection.protocol.send_chat('Game has been reset by %s' % connection.name, irc = True) from map import Map import itertools @name('map') @admin def change_planned_map(connection, *pre_maps): name = connection.name protocol = connection.protocol # parse seed numbering maps, map_list = parse_maps(pre_maps) if not maps: return 'Invalid map name' map = maps[0] protocol.planned_map = check_rotation([map])[0] protocol.send_chat('%s changed next map to %s' % (name, map), irc = True) @name('rotation') @admin def change_rotation(connection, *pre_maps): name = connection.name protocol = connection.protocol maps, map_list = parse_maps(pre_maps) if len(maps) == 0: return 'Usage: /rotation <map1> <map2> <map3>...' ret = protocol.set_map_rotation(maps, False) if not ret: return 'Invalid map in map rotation (%s)' % ret.map protocol.send_chat("%s changed map rotation to %s." % (name, map_list), irc=True) @name('rotationadd') @admin def rotation_add(connection, *pre_maps): name = connection.name protocol = connection.protocol new_maps, map_list = parse_maps(pre_maps) maps = connection.protocol.get_map_rotation() map_list = ", ".join(maps) + map_list maps.extend(new_maps) ret = protocol.set_map_rotation(maps, False) if not ret: return 'Invalid map in map rotation (%s)' % ret.map protocol.send_chat("%s added %s to map rotation." % (name, " ".join(pre_maps)), irc=True) @name('showrotation') def show_rotation(connection): return ", ".join(connection.protocol.get_map_rotation()) @name('revertrotation') @admin def revert_rotation(connection): protocol = connection.protocol maps = protocol.config['maps'] protocol.set_map_rotation(maps, False) protocol.irc_say("* %s reverted map rotation to %s" % (name, maps)) def mapname(connection): return 'Current map: ' + connection.protocol.map_info.name @admin def advance(connection): connection.protocol.advance_rotation('Map advance forced.') @name('timelimit') @admin def set_time_limit(connection, value): value = float(value) protocol = connection.protocol protocol.set_time_limit(value) protocol.send_chat('Time limit set to %s' % value, irc = True) @name('time') def get_time_limit(connection): advance_call = connection.protocol.advance_call if advance_call is None: return 'No time limit set' left = int(math.ceil((advance_call.getTime() - reactor.seconds()) / 60.0)) return 'There are %s minutes left' % left @name('servername') @admin def server_name(connection, *arg): name = join_arguments(arg) protocol = connection.protocol protocol.config['name'] = name protocol.update_format() message = "%s changed servername to to '%s'" % (connection.name, name) print message connection.protocol.irc_say("* " + message) if connection in connection.protocol.players: return message @name('master') @admin def toggle_master(connection): protocol = connection.protocol protocol.set_master_state(not protocol.master) message = ("toggled master broadcast %s" % ['OFF', 'ON'][ int(protocol.master)]) protocol.irc_say("* %s " % connection.name + message) if connection in connection.protocol.players: return ("You " + message) def ping(connection, value = None): if value is None: if connection not in connection.protocol.players: raise ValueError() player = connection else: player = get_player(connection.protocol, value) ping = player.latency if value is None: return ('Your ping is %s ms. Lower ping is better!' % ping) return "%s's ping is %s ms" % (player.name, ping) def intel(connection): if connection not in connection.protocol.players: raise KeyError() flag = connection.team.other.flag if flag.player is not None: if flag.player is connection: return "You have the enemy intel, return to base!" else: return "%s has the enemy intel!" % flag.player.name return "Nobody in your team has the enemy intel" def version(connection): return 'Server version is "%s"' % connection.protocol.server_version @name('server') def server_info(connection): protocol = connection.protocol msg = 'You are playing on "%s"' % protocol.name if protocol.identifier is not None: msg += ' at %s' % protocol.identifier return msg def scripts(connection): scripts = connection.protocol.config.get('scripts', []) return 'Scripts enabled: %s' % (', '.join(scripts)) @admin def fog(connection, r, g, b): r = int(r) g = int(g) b = int(b) connection.protocol.set_fog_color((r, g, b)) def weapon(connection, value): player = get_player(connection.protocol, value) if player.weapon_object is None: name = '(unknown)' else: name = player.weapon_object.name return '%s has a %s' % (player.name, name) command_list = [ help, pm, to_admin, login, kick, intel, ip, who_was, fog, ban, hban, dban, wban, banip, unban, undo_ban, mute, unmute, deaf, global_chat, say, kill, heal, lock, unlock, switch, set_balance, rules, toggle_build, toggle_kill, toggle_teamkill, teleport, tpsilent, go_to, move, unstick, where, god, god_build, fly, invisible, streak, reset_game, toggle_master, change_planned_map, change_rotation, revert_rotation, show_rotation, rotation_add, advance, set_time_limit, get_time_limit, server_name, ping, version, server_info, scripts, weapon, mapname ] commands = {} aliases = {} rights = {} def add(func, name = None): """ Function to add a command from scripts """ if name is None: name = func.func_name name = name.lower() user_types = getattr(func, 'user_types', None) if user_types is not None: for user_type in user_types: if user_type in rights: rights[user_type].add(name) else: rights[user_type] = set([name]) commands[name] = func try: for alias in func.aliases: aliases[alias.lower()] = name except AttributeError: pass for command_func in command_list: add(command_func) # optional commands try: import pygeoip database = pygeoip.GeoIP('./data/GeoLiteCity.dat') @name('from') def where_from(connection, value = None): if value is None: if connection not in connection.protocol.players: raise ValueError() player = connection else: player = get_player(connection.protocol, value) record = database.record_by_addr(player.address[0]) if record is None: return 'Player location could not be determined.' items = [] for entry in ('country_name', 'city', 'region_name'): # sometimes, the record entries are numbers or nonexistent try: value = record[entry] int(value) # if this raises a ValueError, it's not a number continue except KeyError: continue except ValueError: pass if type(value) is not type(''): continue items.append(value) return '%s is from %s' % (player.name, ', '.join(items)) add(where_from) except ImportError: print "('from' command disabled - missing pygeoip)" except (IOError, OSError): print "('from' command disabled - missing data/GeoLiteCity.dat)" def handle_command(connection, command, parameters): command = command.lower() try: command = aliases[command] except KeyError: pass try: command_func = commands[command] except KeyError: return # 'Invalid command' try: if (hasattr(command_func, 'user_types') and command_func.func_name not in connection.rights): return "You can't use this command" return command_func(connection, *parameters) except KeyError: return # 'Invalid command' except TypeError: return 'Invalid number of arguments for %s' % command except InvalidPlayer: return 'No such player' except InvalidTeam: return 'Invalid team specifier' except ValueError: return 'Invalid parameters' def debug_handle_command(connection, command, parameters): # use this when regular handle_command eats errors if connection in connection.protocol.players: connection.send_chat("Commands are in DEBUG mode") command = command.lower() try: command = aliases[command] except KeyError: pass try: command_func = commands[command] except KeyError: return # 'Invalid command' if (hasattr(command_func, 'user_types') and command_func.func_name not in connection.rights): return "You can't use this command" return command_func(connection, *parameters) # handle_command = debug_handle_command def handle_input(connection, input): # for IRC and console return handle_command(connection, *parse_command(input))
NateShoffner/PySnip
feature_server/commands.py
Python
gpl-3.0
32,822
import math import string import sys import struct import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as pyplot import matplotlib.colors as pycolors import matplotlib.cm as cm import matplotlib.patches as patches import numpy as np import cPickle import asciitable import scipy.ndimage import scipy.stats as ss import scipy.signal import scipy as sp import scipy.odr as odr import astropy.io.fits as pyfits import glob import os import make_color_image import make_fake_wht import gzip import tarfile import shutil import cosmocalc import congrid import astropy.io.ascii as ascii sq_arcsec_per_sr = 42545170296.0 c = 3.0e8 def render_only(outfile='HDUDF_v1.pdf',hst_only=False,maglim=28,label='_SB28_',unit='nJySr'): print "reading b" b=pyfits.open('hudf_F606W_Jy.fits')[0].data #hdudf_v.final_array print "reading g" g=pyfits.open('hudf_F850LP_Jy.fits')[0].data #hdudf_z.final_array print "reading r" r=pyfits.open('hudf_F160W_Jy.fits')[0].data #hdudf_h.final_array pixel_arcsec = pyfits.open('hudf_F160W_Jy.fits')[0].header['PIXSCALE'] if unit=='Jy': conv = (1.0e9)*(1.0/pixel_arcsec**2)*sq_arcsec_per_sr #res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'_v1.pdf',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=600,maglim=maglim) #res = render_hdudf(b,g,r,'HUDF'+label+'small_v1.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=60,maglim=maglim) res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'big_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=1200,maglim=maglim) res = render_hdudf(b*conv,g*conv,r*conv,'HUDF'+label+'small_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.10,FWHM_arcsec_g=0.15,FWHM_arcsec_r=0.20,convolve=True,dpi=60,maglim=maglim) if hst_only==True: return print "reading b" b=pyfits.open('hdudf_6mas_F606W_Jy.fits')[0].data #hdudf_v.final_array print "reading g" g=pyfits.open('hdudf_6mas_F850LP_Jy.fits')[0].data#hdudf_z.final_array print "reading r" r=pyfits.open('hdudf_6mas_F160W_Jy.fits')[0].data#hdudf_h.final_array pixel_arcsec = pyfits.open('hdudf_6mas_F160W_Jy.fits')[0].header['PIXSCALE'] if unit=='Jy': conv = (1.0e9)*(1.0/pixel_arcsec**2)*sq_arcsec_per_sr #assume trying for 8m telescope #res = render_hdudf(b*conv,g*conv,r*conv,'HDUDF'+label+'_v1.pdf',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.017,FWHM_arcsec_g=0.025,FWHM_arcsec_r=0.050,convolve=True,dpi=2000,maglim=maglim) #settings for 12m res = render_hdudf(b*conv,g*conv,r*conv,'HDUDF'+label+'big_v4.jpg',pixel_arcsec=pixel_arcsec,FWHM_arcsec_b=0.012,FWHM_arcsec_g=0.018,FWHM_arcsec_r=0.032,convolve=True,dpi=1200,maglim=maglim) return def render_hdudf(b,g,r,filename,pixel_arcsec=0.006,FWHM_arcsec_b=0.012,FWHM_arcsec_g=0.015,FWHM_arcsec_r=0.025,convolve=True,dpi=2000,maglim=28.0): #maglim in mag/arcsec^2 redfact = 1.5*(0.60/1.60)**(1) greenfact = 0.9*(0.60/0.85)**(1) bluefact = 1.2 efflams = [1.60,1.25,0.90,0.775,0.606,0.435,0.814,1.05,1.40] alph=7.0 Q = 5.0 target_ratio = 10.0**(-0.4*(27.0-maglim)) fluxscale = target_ratio*1.0e-14 pixel_Sr = (pixel_arcsec**2)/sq_arcsec_per_sr #new version, already in nJy/Sr to_nJy_per_Sr_b = 1#(1.0e9)*(1.0e14)*(efflams[4]**2)/c #((pixscale/206265.0)^2)* to_nJy_per_Sr_g = 1#(1.0e9)*(1.0e14)*(efflams[2]**2)/c to_nJy_per_Sr_r = 1#(1.0e9)*(1.0e14)*(efflams[0]**2)/c #b_nJySr = to_nJy_per_Sr_b*b #g_nJySr = to_nJy_per_Sr_g*g #r_nJySr = to_nJy_per_Sr_r*r sigma_pixels_b = FWHM_arcsec_b/pixel_arcsec/2.355 sigma_pixels_g = FWHM_arcsec_g/pixel_arcsec/2.355 sigma_pixels_r = FWHM_arcsec_r/pixel_arcsec/2.355 print "sigma pixels: ", sigma_pixels_g if convolve==True: print "convolving images" b = scipy.ndimage.filters.gaussian_filter(b,sigma_pixels_b) g = scipy.ndimage.filters.gaussian_filter(g,sigma_pixels_g) r = scipy.ndimage.filters.gaussian_filter(r,sigma_pixels_r) sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*maglim))*pixel_arcsec*(3.0*FWHM_arcsec_g) print "adding noise, in nJy/Sr: ", sigma_nJy/pixel_Sr Npix = b.shape[0] b = (b*to_nJy_per_Sr_b + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr) g = (g*to_nJy_per_Sr_g + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr) r = (r*to_nJy_per_Sr_r + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr) print "preparing color image" rgbdata = make_color_image.make_interactive_light_nasa(b*fluxscale*bluefact,g*fluxscale*greenfact,r*fluxscale*redfact,alph,Q) print rgbdata.shape print "preparing figure" f9 = pyplot.figure(figsize=(12.0,12.0), dpi=dpi) pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0) axi = pyplot.axes([0.0,0.0,1.0,1.0],frameon=True,axisbg='black') axi.set_xticks([]) ; axi.set_yticks([]) print "rendering color image" axi.imshow(rgbdata,interpolation='nearest',origin='upper',extent=[-1,1,-1,1]) print "saving color image" #pyplot.rcParams['pdf.compression'] = 1 f9.savefig(filename,dpi=dpi,quality=90,bbox_inches='tight',pad_inches=0.0) pyplot.close(f9) #pyplot.rcdefaults() return class mock_hdudf: def __init__(self,Npix,Pix_arcsec,blank_array,filter_string,simdata,Narcmin,eff_lambda_microns,maglim,fwhm,req_filters=[]): self.Npix=Npix self.Pix_arcsec=Pix_arcsec self.fov_arcmin = Narcmin self.blank_array=blank_array*1.0 self.final_array=blank_array*1.0 self.filter_string=filter_string self.simdata=simdata self.image_files=[] self.x_array=[] self.y_array=[] self.N_inf=[] self.eff_lambda_microns = eff_lambda_microns self.maglim = 28.0 self.FWHM_arcsec = fwhm self.req_filters=req_filters self.mstar_list = [] self.redshift_list = [] def find_image(self,mstar,redshift,sfr,seed,xpix,ypix,hmag): sim_simname = self.simdata['col1'] sim_expfact = self.simdata['col2'] sim_sfr = self.simdata['col54'] sim_mstar = self.simdata['col56'] sim_redshift = 1.0/sim_expfact - 1.0 metalmass = self.simdata['col53'] sim_res_pc = self.simdata['col62'] sim_string = self.simdata['col60'] simage_loc = '/Users/gsnyder/Documents/Projects/HydroART_Morphology/Hyades_Data/images_rsync/' self.mstar_list.append(mstar) self.redshift_list.append(redshift) adjust_size=False print " " print "Searching for simulation with mstar,z,seed : ", mstar, redshift, seed wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.3,np.abs(np.log10(sim_mstar)-mstar)<0.1),sim_sfr > -1))[0] Nwi = wide_i.shape[0] if Nwi==0: wide_i = np.where(np.logical_and(np.logical_and(np.abs(sim_redshift-redshift)<0.5,np.abs(np.log10(sim_mstar)-mstar)<0.4),sim_sfr > -1))[0] Nwi = wide_i.shape[0] if Nwi==0 and (mstar < 7.1): print " Can't find good sim, adjusting image parameters to get low mass things " wide_i = np.where(np.abs(sim_redshift-redshift)<0.3)[0] #wide_i is a z range llmi = np.argmin(np.log10(sim_mstar[wide_i])) #the lowest mass in this z range wlmi = np.where(np.abs(np.log10(sim_mstar[wide_i]) - np.log10(sim_mstar[wide_i[llmi]])) < 0.3)[0] #search within 0.3 dex of lowest available sims print " ", wide_i.shape, llmi, wlmi.shape wide_i = wide_i[wlmi] Nwi = wide_i.shape[0] print " ", Nwi adjust_size=True #assert(wide_i.shape[0] > 0) if Nwi==0: print " Could not find roughly appropriate simulation for mstar,z: ", mstar, redshift print " " self.image_files.append('') return 0#np.zeros(shape=(600,600)), -1 print " Found N candidates: ", wide_i.shape np.random.seed(seed) #choose random example and camera rps = np.random.random_integers(0,Nwi-1,1)[0] cn = str(np.random.random_integers(5,8,1)[0]) prefix = os.path.basename(sim_string[wide_i[rps]]) sim_realmstar = np.log10(sim_mstar[wide_i[rps]]) #we picked a sim with this log mstar mstar_factor = sim_realmstar - mstar rad_factor = 1.0 lum_factor = 1.0 if adjust_size==True: rad_factor = 10.0**(mstar_factor*0.5) #must **shrink** images by this factor, total flux by mstar factor lum_factor = 10.0**(mstar_factor) print ">>>FACTORS<<< ", prefix, sim_realmstar, mstar_factor, rad_factor, lum_factor im_folder = simage_loc + prefix +'_skipir/images' im_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_simulation.fits') cn_file = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.filter_string+'_candelized_noise.fits') req1 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[0]+'_simulation.fits') req2 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[1]+'_simulation.fits') req3 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[2]+'_simulation.fits') req4 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[3]+'_simulation.fits') req5 = os.path.join(im_folder, prefix+'_skipir_CAMERA'+cn+'-BROADBAND_'+self.req_filters[4]+'_simulation.fits') ## Actually, probably want to keep trying some possible galaxies/files... is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3) and os.path.lexists(req4) and os.path.lexists(req5) #is_file = os.path.lexists(im_file) and os.path.lexists(cn_file) #and os.path.lexists(req1) and os.path.lexists(req2) and os.path.lexists(req3) if is_file==False: print " Could not find appropriate files: ", im_file, cn_file print " " self.image_files.append('') return 0 #np.zeros(shape=(600,600)), -1 self.image_files.append(im_file) cn_header = pyfits.open(cn_file)[0].header im_hdu = pyfits.open(im_file)[0] scalesim = cn_header.get('SCALESIM') #pc/pix Ps = cosmocalc.cosmocalc(redshift)['PS_kpc'] #kpc/arcsec print " Simulation pixel size at z: ", scalesim print " Plate scale for z: ", Ps print " Desired Kpc/pix at z: ", Ps*self.Pix_arcsec sunrise_image = np.float32(im_hdu.data) #W/m/m^2/Sr Sim_Npix = sunrise_image.shape[0] New_Npix = int( Sim_Npix*(scalesim/(1000.0*Ps*self.Pix_arcsec))/rad_factor ) #rad_factor reduces number of pixels (total size) desired if New_Npix==0: New_Npix=1 print " New galaxy pixel count: ", New_Npix rebinned_image = congrid.congrid(sunrise_image,(New_Npix,New_Npix)) #/lum_factor #lum_factor shrinks surface brightness by mass factor... but we're shrinking size first, so effective total flux already adjusted by this; may need to ^^ SB instead??? or fix size adjust SB? print " New galaxy image shape: ", rebinned_image.shape print " New galaxy image max: ", np.max(rebinned_image) #finite_bool = np.isfinite(rebinned_image) #num_infinite = np.where(finite_bool==False)[0].shape[0] #print " Number of INF pixels: ", num_infinite, prefix #self.N_inf.append(num_infinite) if xpix==-1: xpix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0] ypix = int( (self.Npix-1)*np.random.rand()) #np.random.random_integers(0,self.Npix-1,1)[0] self.x_array.append(xpix) self.y_array.append(ypix) x1_choice = np.asarray([int(xpix-float(New_Npix)/2.0),0]) x1i = np.argmax(x1_choice) x1 = x1_choice[x1i] diff=0 if x1==0: diff = x1_choice[1]-x1_choice[0] x2_choice = np.asarray([x1 + New_Npix - diff,self.Npix]) x2i = np.argmin(x2_choice) x2 = int(x2_choice[x2i]) x1sim = abs(np.min(x1_choice)) x2sim = min(New_Npix,self.Npix-x1) y1_choice = np.asarray([int(ypix-float(New_Npix)/2.0),0]) y1i = np.argmax(y1_choice) y1 = y1_choice[y1i] diff=0 if y1==0: diff = y1_choice[1]-y1_choice[0] y2_choice = np.asarray([y1 + New_Npix - diff,self.Npix]) y2i = np.argmin(y2_choice) y2 = int(y2_choice[y2i]) y1sim = abs(np.min(y1_choice)) y2sim = min(New_Npix,self.Npix-y1) print " Placing new image at x,y in x1:x2, y1:y2 from xsim,ysim, ", xpix, ypix, x1,x2,y1,y2, x1sim, x2sim, y1sim, y2sim #image_slice = np.zeros_like(self.blank_array) print " done creating image slice" #bool_slice = np.int32( np.zeros(shape=(self.Npix,self.Npix))) image_cutout = rebinned_image[x1sim:x2sim,y1sim:y2sim] print " New image shape: ", image_cutout.shape pixel_Sr = (self.Pix_arcsec**2)/sq_arcsec_per_sr #pixel area in steradians: Sr/pixel to_nJy_per_Sr = (1.0e9)*(1.0e14)*(self.eff_lambda_microns**2)/c #((pixscale/206265.0)^2)* #sigma_nJy = 0.3*(2.0**(-0.5))*((1.0e9)*(3631.0/5.0)*10.0**(-0.4*self.maglim))*self.Pix_arcsec*(3.0*self.FWHM_arcsec) to_Jy_per_pix = to_nJy_per_Sr*(1.0e-9)*pixel_Sr #b = b*(to_nJy_per_Sr_b*fluxscale*bluefact) # + np.random.randn(Npix,Npix)*sigma_nJy/pixel_Sr image_cutout = image_cutout*to_Jy_per_pix #image_cutout*to_nJy_per_Sr #image_slice[x1:x2,y1:y2] = image_cutout*1.0 #bool_slice[x1:x2,y1:y2]=1 print " done slicing" #self.final_array += image_slice self.final_array[x1:x2,y1:y2] += image_cutout print " done adding image to final array" #finite_bool = np.isfinite(self.final_array) #num_infinite = np.where(finite_bool==False)[0].shape[0] #print " Final array INF count and max:", num_infinite, np.max(self.final_array) print " " return 1 #sunrise_image,scalesim def write_success_table(self,filename): boolthing = np.ones_like(hdudf_h.mstar_list) i_fail = np.where(np.asarray(hdudf_h.image_files)=='')[0] print boolthing.shape, i_fail.shape print boolthing, i_fail boolthing[i_fail] = 0 data = np.asarray([boolthing,hdudf_h.x_array,hdudf_h.y_array]) asciitable.write(data,filename) return def place_image(self,x,y,galaxy_image,galaxy_pixsize): new_image = self.final_array return new_image if __name__=="__main__": mstar_list = np.asarray([8.0]) redshift_list = np.asarray([2.0]) sfr_list = np.asarray([1.5]) #instead, use observed UDF catalogs udf_hdulist = pyfits.open('data/udf_zbest_sedfit_jen2015.fits') udf_table = udf_hdulist[1].data udf_zbest = udf_table.field('Z_BEST') udf_lmstar = udf_table.field('LMSTAR_BC03') udf_hmag = udf_table.field('MAG_F160W') x_list = np.asarray([2500]) y_list = np.asarray([6000]) #random positions? fi = np.where(udf_hmag > 27.0)[0] fake_zs = np.asarray([udf_zbest[fi],udf_zbest[fi]]).flatten() fake_lmasses = 7.0 - 1.6*np.random.random(fake_zs.shape[0]) fake_hmag = 29.0 + 4.0*np.random.random(fake_zs.shape[0]) udf_zbest = np.append(udf_zbest,fake_zs) udf_lmstar = np.append(udf_lmstar,fake_lmasses) udf_hmag = np.append(udf_hmag,fake_hmag) #Npix = 27800.0/2.0 Npix = 10000.0 #16880 w/ 2.78 arcmin gives 10mas pixels Narcmin = 1.0 Narcsec = Narcmin*60.0 #Npix_hst = 27800.0/8.0 #ish Npix_hst = 1200.0 Pix_arcsec = Narcsec/Npix Pix_arcsec_hst = Narcsec/Npix_hst print "Modeling image with pixel scale (arcsec): ", Pix_arcsec blank_array = np.float32(np.zeros(shape=(Npix,Npix))) print blank_array.shape blank_array_hst = np.float32(np.zeros(shape=(Npix_hst,Npix_hst))) sim_catalog_file = '/Users/gsnyder/Documents/Projects/HydroART_Morphology/Hyades_Data/juxtaposicion-catalog-Nov18_2013/data/sim' simdata = asciitable.read(sim_catalog_file,data_start=1) #print simdata rf = ['F850LP','F606W','F160W','F775W','F125W'] hdudf_h = mock_hdudf(Npix,Pix_arcsec,blank_array,'F160W',simdata,Narcmin,1.60,28.0,0.025,req_filters=rf) hdudf_j = mock_hdudf(Npix,Pix_arcsec,blank_array,'F125W',simdata,Narcmin,1.25,28.0,0.022,req_filters=rf) hdudf_z = mock_hdudf(Npix,Pix_arcsec,blank_array,'F850LP',simdata,Narcmin,0.90,28.0,0.015,req_filters=rf) hdudf_i = mock_hdudf(Npix,Pix_arcsec,blank_array,'F775W',simdata,Narcmin,0.75,28.0,0.014,req_filters=rf) hdudf_v = mock_hdudf(Npix,Pix_arcsec,blank_array,'F606W',simdata,Narcmin,0.60,28.0,0.012,req_filters=rf) hudf_h = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F160W',simdata,Narcmin,1.60,28.0,0.20,req_filters=rf) hudf_z = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F850LP',simdata,Narcmin,0.90,28.0,0.15,req_filters=rf) hudf_v = mock_hdudf(Npix_hst,Pix_arcsec_hst,blank_array_hst,'F606W',simdata,Narcmin,0.60,28.0,0.12,req_filters=rf) udf_success = np.int32(np.zeros_like(udf_lmstar)) for i,z in enumerate(udf_zbest): if i > 50000: continue if i % 4 != 0: continue #i = udf_zbest.shape[0] - i - 1 result = hdudf_h.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) udf_success[i] = result result = hdudf_j.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) result = hdudf_z.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) result = hdudf_i.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) result = hdudf_v.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) hudf_h.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) hudf_z.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) hudf_v.find_image(udf_lmstar[i],z,0.0,i,-1,-1,udf_hmag[i]) #NOTE NOW RETURNS IN Jy/pix!! i_fail = np.where(np.asarray(hdudf_h.image_files)=='')[0] print "Numfail: ", i_fail.shape print udf_lmstar[0:100] print udf_success[0:100] successes = {'udf_z':udf_zbest, 'udf_lmstar':udf_lmstar, 'mockudf_success':udf_success} ascii.write(successes,'hdudf_success_list.txt') #exit() #im,pix_pc = hdudf_h.find_image(mstar_list[0],redshift_list[0],sfr_list[0],1,x_list[0],y_list[0]) #im2 = hdudf_h.modify_and_place(im,x_list[0],y_list[0],redshift_list[0]) print hdudf_h.image_files print hdudf_h.N_inf #WANT ability to know which UDF entries were successful -- save image files. Pickle? FITS table? print np.max(hdudf_h.final_array) new_float = np.float32(hdudf_h.final_array) print np.max(new_float) new_bool = np.isfinite(new_float) print np.where(new_bool==False)[0].shape[0] primhdu = pyfits.PrimaryHDU(new_float) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hdudf_6mas_F160W_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hdudf_j.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hdudf_6mas_F125W_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hdudf_z.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hdudf_6mas_F850LP_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hdudf_i.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hdudf_6mas_F775W_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hdudf_v.final_array)) ; primhdu.header['IMUNIT']=('Jy/pix') ; primhdu.header['PIXSCALE']=(Pix_arcsec, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hdudf_6mas_F606W_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hudf_h.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hudf_F160W_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hudf_z.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hudf_F850LP_Jy.fits',clobber=True) primhdu = pyfits.PrimaryHDU(np.float32(hudf_v.final_array)) ; primhdu.header['IMUNIT']=('nJy/Sr') ; primhdu.header['PIXSCALE']=(Pix_arcsec_hst, 'arcsec') hdulist = pyfits.HDUList([primhdu]) hdulist.writeto('hudf_F606W_Jy.fits',clobber=True) #hdudf_h.write_success_table('F160W_successes.txt') #b=hdudf_v.final_array #g=hdudf_z.final_array #r=hdudf_h.final_array #res = render_hdudf(b,g,r,'HDUDF_v1.pdf',pixel_arcsec=Pix_arcsec)
gsnyder206/mock-surveys
original_illustris/hdst_mockudf.py
Python
mit
21,619
import sys sys.path.append('./lib') import unittest import mock from lib import FileTransport from StringIO import StringIO class TestInputTransport(unittest.TestCase): def test_construct(self): transport = FileTransport('input', 'output') self.assertIsNotNone(transport) self.assertEqual('input', transport.inputFileName) self.assertEqual('output', transport.outputFileName) @mock.patch("__builtin__.open", create=True) def test_read(self, mock_open): mock_open.side_effect = [ mock.mock_open(read_data='1,2,3\n4,5,6\n7,8,9\n').return_value, ] expected = [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9']] transport = FileTransport('input', 'output') actual = transport.read() self.assertEqual(expected, actual) if __name__ == '__main__': unittest.main()
Youhana-Hana/interpolation
test/test_FileTransport.py
Python
mit
880
#!/usr/bin/env python # ***** BEGIN LICENSE BLOCK ***** # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # ***** END LICENSE BLOCK ***** """fx_desktop_build.py. script harness to build nightly firefox within Mozilla's build environment and developer machines alike author: Jordan Lund """ import sys import os # load modules from parent dir sys.path.insert(1, os.path.dirname(sys.path[0])) from mozharness.mozilla.building.buildbase import BUILD_BASE_CONFIG_OPTIONS, \ BuildingConfig, BuildScript class FxDesktopBuild(BuildScript, object): def __init__(self): buildscript_kwargs = { 'config_options': BUILD_BASE_CONFIG_OPTIONS, 'all_actions': [ 'clobber', 'clone-tools', 'checkout-sources', 'setup-mock', 'build', 'upload-files', # upload from BB to TC 'sendchange', 'check-test', 'package-source', 'multi-l10n', 'generate-build-stats', 'update', ], 'require_config_file': True, # Default configuration 'config': { 'is_automation': True, "pgo_build": False, "debug_build": False, "pgo_platforms": ['linux', 'linux64', 'win32', 'win64'], # nightly stuff "nightly_build": False, 'balrog_credentials_file': 'oauth.txt', 'taskcluster_credentials_file': 'oauth.txt', 'periodic_clobber': 168, # hg tool stuff 'default_vcs': 'hgtool', "tools_repo": "https://hg.mozilla.org/build/tools", "repo_base": "https://hg.mozilla.org", 'tooltool_url': 'https://api.pub.build.mozilla.org/tooltool/', "graph_selector": "/server/collect.cgi", # only used for make uploadsymbols 'old_packages': [ "%(objdir)s/dist/firefox-*", "%(objdir)s/dist/fennec*", "%(objdir)s/dist/seamonkey*", "%(objdir)s/dist/thunderbird*", "%(objdir)s/dist/install/sea/*.exe" ], 'stage_product': 'firefox', 'platform_supports_post_upload_to_latest': True, 'use_branch_in_symbols_extra_buildid': True, 'latest_mar_dir': '/pub/mozilla.org/firefox/nightly/latest-%(branch)s', 'compare_locales_repo': 'https://hg.mozilla.org/build/compare-locales', 'compare_locales_rev': 'RELEASE_AUTOMATION', 'compare_locales_vcs': 'hgtool', 'influx_credentials_file': 'oauth.txt', 'build_resources_path': '%(abs_src_dir)s/obj-firefox/.mozbuild/build_resources.json', # try will overwrite these 'clone_with_purge': False, 'clone_by_revision': False, 'tinderbox_build_dir': None, 'to_tinderbox_dated': True, 'release_to_try_builds': False, 'include_post_upload_builddir': False, 'use_clobberer': True, 'stage_username': 'ffxbld', 'stage_ssh_key': 'ffxbld_rsa', 'virtualenv_modules': [ 'requests==2.2.1', 'PyHawk-with-a-single-extra-commit==0.1.5', 'taskcluster==0.0.15', ], 'virtualenv_path': 'venv', # }, 'ConfigClass': BuildingConfig, } super(FxDesktopBuild, self).__init__(**buildscript_kwargs) def _pre_config_lock(self, rw_config): """grab buildbot props if we are running this in automation""" super(FxDesktopBuild, self)._pre_config_lock(rw_config) c = self.config if c['is_automation']: # parse buildbot config and add it to self.config self.info("We are running this in buildbot, grab the build props") self.read_buildbot_config() ### if c.get('stage_platform'): platform_for_log_url = c['stage_platform'] if c.get('pgo_build'): platform_for_log_url += '-pgo' # postrun.py uses stage_platform buildbot prop as part of the log url self.set_buildbot_property('stage_platform', platform_for_log_url, write_to_file=True) else: self.fatal("'stage_platform' not determined and is required in your config") # helpers def query_abs_dirs(self): if self.abs_dirs: return self.abs_dirs c = self.config abs_dirs = super(FxDesktopBuild, self).query_abs_dirs() if not c.get('app_ini_path'): self.fatal('"app_ini_path" is needed in your config for this ' 'script.') dirs = { # BuildFactories in factory.py refer to a 'build' dir on the slave. # This contains all the source code/objdir to compile. However, # there is already a build dir in mozharness for every mh run. The # 'build' that factory refers to I named: 'src' so # there is a seperation in mh. for example, rather than having # '{mozharness_repo}/build/build/', I have '{ # mozharness_repo}/build/src/' 'abs_src_dir': os.path.join(abs_dirs['abs_work_dir'], 'src'), 'abs_obj_dir': os.path.join(abs_dirs['abs_work_dir'], 'src', self._query_objdir()), 'abs_tools_dir': os.path.join(abs_dirs['abs_work_dir'], 'tools'), 'abs_app_ini_path': c['app_ini_path'] % { 'obj_dir': os.path.join(abs_dirs['abs_work_dir'], 'src', self._query_objdir()) }, 'compare_locales_dir': os.path.join(abs_dirs['abs_work_dir'], 'compare-locales'), } abs_dirs.update(dirs) self.abs_dirs = abs_dirs return self.abs_dirs # Actions {{{2 # read_buildbot_config in BuildingMixin # clobber in BuildingMixin -> PurgeMixin # if Linux config: # reset_mock in BuildingMixing -> MockMixin # setup_mock in BuildingMixing (overrides MockMixin.mock_setup) if __name__ == '__main__': fx_desktop_build = FxDesktopBuild() fx_desktop_build.run_and_exit()
lissyx/build-mozharness
scripts/fx_desktop_build.py
Python
mpl-2.0
6,956
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom SQLAlchemy types.""" import netaddr from sqlalchemy import types class IPAddress(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.IPAddress(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.IPAddress): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.IPAddress " "type.") % {'type': type(value), 'value': value}) return str(value) class CIDR(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.IPNetwork(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.IPNetwork): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.IPNetwork " "type.") % {'type': type(value), 'value': value}) return str(value) class MACAddress(types.TypeDecorator): impl = types.String(64) def process_result_value(self, value, dialect): return netaddr.EUI(value) def process_bind_param(self, value, dialect): if not isinstance(value, netaddr.EUI): raise AttributeError(_("Received type '%(type)s' and value " "'%(value)s'. Expecting netaddr.EUI " "type.") % {'type': type(value), 'value': value}) return str(value)
wolverineav/neutron
neutron/db/sqlalchemytypes.py
Python
apache-2.0
2,356
def osdetect(buildout): import sys import platform import os platforms = ['default'] if sys.platform == 'darwin': platforms.insert(0, 'darwin') mac_ver = platform.mac_ver() if mac_ver[0].startswith('10.5'): platforms.insert(0, 'darwin-leopard') elif mac_ver[0].startswith('10.6'): platforms.insert(0, 'darwin-snowleopard') if sys.maxint > 2147483647: platforms.insert(0, 'darwin-snowleopard-64') elif mac_ver[0].startswith('10.7'): platforms.insert(0, 'darwin-lion') elif mac_ver[0].startswith('10.8'): platforms.insert(0, 'darwin-mountainlion') elif mac_ver[0].startswith('10.9'): platforms.insert(0, 'darwin-mavericks') elif mac_ver[0].startswith('10.10'): platforms.insert(0, 'darwin-yosemite') elif mac_ver[0].startswith('10.11'): platforms.insert(0, 'darwin-elcapitan') elif mac_ver[0].startswith('10.12'): platforms.insert(0, 'darwin-sierra') elif mac_ver[0].startswith('10.13'): platforms.insert(0, 'darwin-highsierra') elif sys.platform == 'linux2': platforms.insert(0, 'linux2') dist, version, name = [x.lower() for x in platform.dist()] platforms.insert(0, '-'.join([sys.platform, dist])) platforms.insert(0, '-'.join([sys.platform, dist, version])) if name: platforms.insert(0, '-'.join([sys.platform, dist, name])) elif platform.machine() == 'x86_64': platforms.insert(0, 'x86_64') if os.path.exists('/usr/lib/i386-linux-gnu'): platforms.insert(0, 'i386-linux-gnu') buildout._logger.debug("Detected these platforms: %s" % ", ".join(platforms)) variants = {} parts = set() for key in buildout.keys(): if ':' not in key: continue part, variant = key.split(':') variants.setdefault(variant, []).append((part, key)) parts.add(part) for platform_name in platforms: for part, key in variants.get(platform_name, []): if part in buildout._raw: continue buildout._raw[part] = buildout._raw[key].copy()
upiq/plonebuild
python/src/osdetect.py
Python
mit
2,246
''' LICENSING ------------------------------------------------- daemoniker: Cross-platform daemonization tools. Copyright (C) 2016 Muterra, Inc. Contributors ------------ Nick Badger badg@muterra.io | badg@nickbadger.com | nickbadger.com This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ------------------------------------------------------ ''' # ############################################### # Boilerplate # ############################################### # Logging shenanigans import logging # Py2.7+, but this is Py3.5.1+ from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler()) # Control * imports. __all__ = [ 'Daemonizer', 'daemonize', 'SignalHandler1', 'IGNORE_SIGNAL', 'send', 'SIGINT', 'SIGTERM', 'SIGABRT', ] # ############################################### # Library # ############################################### # Submodules from . import exceptions from . import utils from ._signals_common import IGNORE_SIGNAL from ._signals_common import send from .exceptions import SIGINT from .exceptions import SIGTERM from .exceptions import SIGABRT # Add in toplevel stuff from .utils import platform_specificker platform_switch = platform_specificker( linux_choice = 'unix', win_choice = 'windows', # Dunno if this is a good idea but might as well try cygwin_choice = None, osx_choice = 'unix', other_choice = 'unix' ) if platform_switch == 'unix': from ._daemonize_unix import Daemonizer from ._daemonize_unix import daemonize from ._signals_unix import SignalHandler1 elif platform_switch == 'windows': from ._daemonize_windows import Daemonizer from ._daemonize_windows import daemonize from ._signals_windows import SignalHandler1 else: raise RuntimeError( 'Your runtime environment is unsupported by daemoniker.' )
Muterra/py_daemoniker
daemoniker/__init__.py
Python
unlicense
2,633
# # Album_Sound_CheckAppDelegate.py # Album Sound Check # # Created by Scott Robertson on 1/17/09. # Copyright __MyCompanyName__ 2009. All rights reserved. # from Foundation import * from AppKit import * class Album_Sound_CheckAppDelegate(NSObject): def applicationDidFinishLaunching_(self, sender): NSLog("Application did finish launching.")
spr/album-sound-check
Album_Sound_CheckAppDelegate.py
Python
gpl-2.0
361
from django.urls import re_path from . import views urlpatterns = [re_path(r"^/vote/(?P<username>\w+)/(?P<position>.+)$", views.vote_for_user, name="vote_for_user")]
tjcsl/ion
intranet/apps/nomination/urls.py
Python
gpl-2.0
168
"""Core functions of mtorrentd.""" import os import importlib.util import libtorrent import tempfile import shutil import requests from urllib import parse from sys import exit from time import sleep from .paths import SITE_MODULES def session_login(site, username, password, session): """When login is needed for session.""" payload = { 'username': username, 'password': password } session.post(parse.urljoin(site['url'], site['login_path']), data=payload) return session def load_site_module(site): """Load module to use it as a function.""" user_module = os.path.join(SITE_MODULES['user'], site + '.py') system_module = os.path.join(SITE_MODULES['system'], site + '.py') if os.path.isfile(user_module): spec = importlib.util.spec_from_file_location(site, user_module) elif os.path.isfile(system_module): spec = importlib.util.spec_from_file_location(site, system_module) else: print('Site module not found. Check github for documentation and create a new site module here: %s' %(SITE_MODULES['user'])) exit(73) site_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(site_module) return site_module def write_torrent_to_file(full_torrent_path, torrent_content, download_dir): if os.path.isfile(full_torrent_path): print('Download aborted. Torrent file already exists.') try: with open(full_torrent_path, "wb") as f: f.write(torrent_content) print('Torrent added here: ' + full_torrent_path) except PermissionError: print('[ERROR] Denied permission to save torrent here: %s' % download_dir) exit(77) except FileNotFoundError as err: print('[ERROR] Create the directory to store the torrent file in.', err) exit(77) def download_torrent(torrent_link, download_dir, torrent_name=None, session=None) -> None: """Download .torrent from link.""" if torrent_name: full_torrent_path = os.path.join(download_dir, torrent_name + '.torrent') else: torrent_name = torrent_link.split('/') torrent_name = torrent_name[len(torrent_name) - 1] full_torrent_path = os.path.join(download_dir, torrent_name) if session: torrent_file = session.get(torrent_link) else: with requests.Session() as session: torrent_file = session.get(torrent_link) write_torrent_to_file(full_torrent_path, torrent_file.content, download_dir) def download_magnet2torrent(torrent_link, download_dir, torrent_name=None) -> None: """Download magnet link.""" tempdir = tempfile.mkdtemp() libtorrent_session = libtorrent.session() params = { 'url': torrent_link, 'save_path': tempdir, 'storage_mode': libtorrent.storage_mode_t(2), 'paused': False, 'auto_managed': True, 'duplicate_is_error': True } handle = libtorrent_session.add_torrent(params) print("Downloading Metadata (this may take a while)") while (not handle.has_metadata()): try: sleep(1) except KeyboardInterrupt: print("Aborting...") libtorrent_session.pause() print("Cleanup dir " + tempdir) shutil.rmtree(tempdir) exit(0) libtorrent_session.pause() print("Done") if torrent_name: full_torrent_path = os.path.join(download_dir, torrent_name + '.torrent') else: torrent_name = handle.torrent_file().name() full_torrent_path = os.path.join(download_dir, torrent_name + '.torrent') torrent_info = handle.get_torrent_info() torrent_file = libtorrent.create_torrent(torrent_info) write_torrent_to_file(full_torrent_path, libtorrent.bencode(torrent_file.generate()), download_dir) print("Saved! Cleaning up dir: " + tempdir) libtorrent_session.remove_torrent(handle) shutil.rmtree(tempdir)
arivarton/multi-torrent-downloader
mtorrentd/core.py
Python
gpl-3.0
3,987
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2017-01-30 19:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shuup', '0029_personcontact_names'), ] operations = [ migrations.AlterField( model_name='order', name='order_date', field=models.DateTimeField(db_index=True, editable=False, verbose_name='order date'), ), migrations.AlterField( model_name='product', name='created_on', field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created on'), ), migrations.AlterField( model_name='shop', name='modified_on', field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='modified on'), ) ]
shoopio/shoop
shuup/core/migrations/0030_add_db_indices.py
Python
agpl-3.0
910
# -*- coding: utf-8 -*- from __future__ import unicode_literals import pytest from rtv.clipboard import copy_linux, copy_osx from rtv.exceptions import ProgramError try: from unittest import mock except ImportError: import mock def test_copy(): with mock.patch('subprocess.Popen') as Popen, \ mock.patch('subprocess.call') as call: # Mock out the subprocess calls p = mock.Mock() p.communicate = mock.Mock() Popen.return_value = p # If the `which` command can't find a program to use call.return_value = 1 # Returns an error code with pytest.raises(ProgramError): copy_linux('test') call.return_value = 0 copy_linux('test') assert Popen.call_args[0][0] == ['xsel', '-b', '-i'] p.communicate.assert_called_with(input='test'.encode('utf-8')) copy_linux('test ❤') p.communicate.assert_called_with(input='test ❤'.encode('utf-8')) copy_osx('test') assert Popen.call_args[0][0] == ['pbcopy', 'w'] p.communicate.assert_called_with(input='test'.encode('utf-8')) copy_osx('test ❤') p.communicate.assert_called_with(input='test ❤'.encode('utf-8'))
michael-lazar/rtv
tests/test_clipboard.py
Python
mit
1,238
# Copyright (C) 2011-2015 by the Free Software Foundation, Inc. # # This file is part of GNU Mailman. # # GNU Mailman is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # GNU Mailman is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # GNU Mailman. If not, see <http://www.gnu.org/licenses/>. """Test addresses.""" __all__ = [ 'TestAddress', ] import unittest from mailman.email.validate import InvalidEmailAddressError from mailman.interfaces.address import ExistingAddressError from mailman.interfaces.usermanager import IUserManager from mailman.model.address import Address from mailman.testing.layers import ConfigLayer from zope.component import getUtility class TestAddress(unittest.TestCase): """Test addresses.""" layer = ConfigLayer def setUp(self): self._usermgr = getUtility(IUserManager) self._address = self._usermgr.create_address('FPERSON@example.com') def test_invalid_email_string_raises_exception(self): with self.assertRaises(InvalidEmailAddressError): Address('not_a_valid_email_string', '') def test_local_part_differs_only_by_case(self): with self.assertRaises(ExistingAddressError) as cm: self._usermgr.create_address('fperson@example.com') self.assertEqual(cm.exception.address, 'FPERSON@example.com') def test_domain_part_differs_only_by_case(self): with self.assertRaises(ExistingAddressError) as cm: self._usermgr.create_address('fperson@EXAMPLE.COM') self.assertEqual(cm.exception.address, 'FPERSON@example.com') def test_mixed_case_exact_match(self): with self.assertRaises(ExistingAddressError) as cm: self._usermgr.create_address('FPERSON@example.com') self.assertEqual(cm.exception.address, 'FPERSON@example.com')
khushboo9293/mailman
src/mailman/model/tests/test_address.py
Python
gpl-3.0
2,260
""" As described in http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html """ import os from celery import Celery os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings") app = Celery("{{ cookiecutter.project_name }}") # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object("django.conf:settings", namespace="CELERY") app.autodiscover_tasks()
mitodl/cookiecutter-djangoapp
{{ cookiecutter.project_name }}/main/celery.py
Python
bsd-2-clause
442
# Copyright (c) 2017, Daniele Venzano # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Zoe App shop module.""" import logging import os import json import markdown from zoe_lib.config import get_conf log = logging.getLogger(__name__) ZAPP_MANIFEST_VERSION = 1 # The manifest version this Zoe Shop can understand class ZAppParameter: """A ZApp parameter that should be exposed to the user interface.""" def __init__(self, param_manifest): self.kind = param_manifest['kind'] self.name = param_manifest['name'] self.readable_name = param_manifest['readable_name'] self.description = param_manifest['description'] self.default = param_manifest['default'] if param_manifest['type'] == 'string': # convert to HTML5 input types self.type = "text" elif param_manifest['type'] == 'int': self.type = "number" self.max = param_manifest['max'] self.min = param_manifest['min'] self.step = param_manifest['step'] else: self.type = "text" class ZApp: """A ZApp.""" def __init__(self, zapp_id, manifest, manifest_index): self.id = zapp_id self.manifest_index = manifest_index zapp = manifest['zapps'][manifest_index] self.category = zapp['category'] self.readable_name = zapp['name'] self.readable_description_file = zapp['readable_descr'] self.readable_description = self.read_description() self.json_file = zapp['description'] self.zoe_description = self.parse_json_description() if 'labels' in zapp: self.labels = zapp['labels'] else: self.labels = [] self.parameters = [] self.parse_parameters(zapp) if 'logo' in zapp: self.logo = zapp['logo'] else: self.logo = 'logo.png' if 'enabled_for' in zapp: self.enabled_for = zapp['enabled_for'] else: self.enabled_for = ["all"] if 'disabled_for' in zapp: self.disabled_for = zapp['disabled_for'] else: self.disabled_for = [] def parse_parameters(self, zapp_manifest): """Translates the parameters from the manifest into objects.""" for param in zapp_manifest['parameters']: self.parameters.append(ZAppParameter(param)) def read_description(self): """Reads and renders the README.md file.""" mdown = open(os.path.join(get_conf().zapp_shop_path, self.id, self.readable_description_file), 'r', encoding='utf-8').read() return markdown.markdown(mdown, extensions=['markdown.extensions.extra', 'markdown.extensions.codehilite']) def parse_json_description(self): """Reads the classic json description.""" return json.load(open(os.path.join(get_conf().zapp_shop_path, self.id, self.json_file), 'r')) def zshop_list_apps(role): """List the ZApp repos.""" dirs = [d for d in os.listdir(get_conf().zapp_shop_path) if os.path.isdir(os.path.join(get_conf().zapp_shop_path, d)) and os.path.exists(os.path.join(get_conf().zapp_shop_path, d, "manifest.json"))] zapps = [] for adir in dirs: zapps += zshop_read_manifest(adir) zapp_cat = {} for zapp in zapps: if not role.can_access_full_zapp_shop: if role.name in zapp.disabled_for: continue if role.name not in zapp.enabled_for and "all" not in zapp.enabled_for: continue if zapp.category in zapp_cat: zapp_cat[zapp.category].append(zapp) else: zapp_cat[zapp.category] = [zapp] return zapp_cat def zshop_read_manifest(zapp_id): """Reads and decodes the manifest file.""" manifest_path = os.path.join(get_conf().zapp_shop_path, zapp_id, "manifest.json") manifest = json.load(open(manifest_path)) if manifest['version'] != ZAPP_MANIFEST_VERSION: log.warning("Cannot load ZApp {}, this Zoe understands manifests version {} only".format(zapp_id, ZAPP_MANIFEST_VERSION)) return [] zapps = [] for idx in range(len(manifest['zapps'])): zapps.append(ZApp(zapp_id, manifest, idx)) return zapps def get_logo(zapp: ZApp): """Return the ZApp PNG logo image file contents.""" logo_path = os.path.join(get_conf().zapp_shop_path, zapp.id, zapp.logo) return open(logo_path, "rb").read()
DistributedSystemsGroup/zoe
zoe_api/zapp_shop.py
Python
apache-2.0
4,947
# -*- coding: utf-8 -*- import unittest from collections import deque class Pilha(): def __init__(self): self._lista = deque() def __len__(self): return len(self._lista) def vazia(self): return not bool(self._lista) def topo(self): if self._lista: return self._lista[-1] raise PilhaVaziaErro() def empilhar(self, valor): self._lista.append(valor) def desempilhar(self): try: return self._lista.pop() except IndexError: raise PilhaVaziaErro() class PilhaVaziaErro(Exception): pass class PilhaTestes(unittest.TestCase): def test_topo_lista_vazia(self): pilha = Pilha() self.assertTrue(pilha.vazia()) self.assertRaises(PilhaVaziaErro, pilha.topo) def test_empilhar_um_elemento(self): pilha = Pilha() pilha.empilhar('A') self.assertFalse(pilha.vazia()) self.assertEqual('A', pilha.topo()) def test_empilhar_dois_elementos(self): pilha = Pilha() pilha.empilhar('A') pilha.empilhar('B') self.assertFalse(pilha.vazia()) self.assertEqual('B', pilha.topo()) def test_desempilhar_pilha_vazia(self): pilha = Pilha() self.assertRaises(PilhaVaziaErro, pilha.desempilhar) def test_desempilhar(self): pilha = Pilha() letras = 'ABCDE' for letra in letras: pilha.empilhar(letra) for letra_em_ordem_reversa in reversed(letras): letra_desempilhada = pilha.desempilhar() self.assertEqual(letra_em_ordem_reversa, letra_desempilhada)
trgomes/estrutura-de-dados
aula4/pilha.py
Python
mit
1,658
#encoding: utf-8 import plugnplay import os from ..core import Plugin, log import sys class IAppLoader(plugnplay.Interface): """ Main interface for implementing an additional WSGI app loader """ def can_load(self, app_path): ''' Return True/False if a custom load is able to load the WSGI app that is in tha path passed as a parameter. ''' pass def load_app(self, app_path, app_full_name): ''' Return the WSGI application object for tha app in app_path @ap_path: Path on disk where the app is located. Already inserted into sys.path. This is --app-path + 'app/' @app_full_name: Full qualified name for the WSGI application object ''' pass def load_app(app_path, wsgi_app_full_name): if app_path: absolute_path = os.path.abspath(os.path.expanduser(app_path)) log.debug("Adding %s to sys.path" % absolute_path) sys.path.append(absolute_path) if wsgi_app_full_name: log.info("Loading WSGI application object: %s" % wsgi_app_full_name) return import_object(wsgi_app_full_name) app_loaders = IAppLoader.implementors() log.debug("Found {0} loaders: {1}".format(len(app_loaders), app_loaders)) for loader in app_loaders: if loader.can_load(app_path): log.info("Using AppLoader: %s" % loader.__class__.__name__) return loader.load_app(absolute_path, wsgi_app_full_name) raise Exception("No Loader found for app %s and no --wsgi-app option found\n" % app_path) def import_object(full_object_name): ''' Imports an object from a full qualified name. ex: app.module.submodule.objectname, returns an instance of objectname ''' parts = full_object_name.strip().split('.') mod = '.'.join(parts[:-1]) obj_name = parts[-1] imported = __import__(mod, fromlist=obj_name) return getattr(imported, obj_name) # Loaders implementation class PyRoutesLoader(Plugin): implements = [IAppLoader] def can_load(self, app_path): settings = os.path.join(app_path, 'pyroutes_settings.py') return os.path.exists(settings) def load_app(self, app_path, app_full_name): if app_full_name: return import_object(app_full_name) dirs = [d for d in os.listdir(app_path) if os.path.isdir(os.path.join(app_path, d))] # A normal pyroutes application should have only 3 folders: templates/ tests/ <app-name>/ # Here we do: "import <app-name>" app_dir = filter(lambda d: d not in ('templates', 'tests'), dirs) __import__(app_dir[0]) # This should import all @routes from the app __import__(app_dir[0], fromlist='pyroutes_settings') import pyroutes return pyroutes.application import djangoloader
daltonmatos/wsgid
wsgid/loaders/__init__.py
Python
bsd-3-clause
2,821
#! /usr/bin/env python # This file is part of the dvbobjects library. # # Copyright © 2005-2013 Lorenzo Pallara l.pallara@avalpa.com # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from math import floor def MJD_convert(year, month, day): if (month == 1) or (month == 2): l = 1 else: l = 0 return 14956 + day + (floor((year - l) * 365.25)) + (floor((month + 1 + l * 12) * 30.6001))
0xalen/opencaster_isdb-tb
libs/dvbobjects/dvbobjects/utils/MJD.py
Python
gpl-2.0
1,057
""" Generate header file with macros defining MicroPython version info. This script works with Python 2.6, 2.7, 3.3 and 3.4. """ from __future__ import print_function import sys import os import datetime import subprocess def get_version_info_from_git(): # Python 2.6 doesn't have check_output, so check for that try: subprocess.check_output subprocess.check_call except AttributeError: return None # Note: git describe doesn't work if no tag is available try: git_tag = subprocess.check_output(["git", "describe", "--dirty", "--always", "--tags"], stderr=subprocess.STDOUT, universal_newlines=True).strip() except subprocess.CalledProcessError as er: if er.returncode == 128: # git exit code of 128 means no repository found return None git_tag = "" except OSError: return None try: git_hash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"], stderr=subprocess.STDOUT, universal_newlines=True).strip() except subprocess.CalledProcessError: git_hash = "unknown" except OSError: return None try: # Check if there are any modified files. subprocess.check_call(["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], stderr=subprocess.STDOUT) # Check if there are any staged files. subprocess.check_call(["git", "diff-index", "--cached", "--quiet", "HEAD", "--"], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: git_hash += "-dirty" except OSError: return None # Try to extract MicroPython version from git tag if git_tag.startswith("v"): ver = git_tag[1:].split("-")[0].split(".") if len(ver) == 2: ver.append("0") else: ver = ["0", "0", "1"] return git_tag, git_hash, ver def get_version_info_from_docs_conf(): with open(os.path.join(os.path.dirname(sys.argv[0]), "..", "docs", "conf.py")) as f: for line in f: if line.startswith("release = '"): ver = line.strip()[10:].strip("'") git_tag = "v" + ver ver = ver.split(".") if len(ver) == 2: ver.append("0") return git_tag, "<no hash>", ver return None def make_version_header(filename): # Get version info using git, with fallback to docs/conf.py info = get_version_info_from_git() if info is None: info = get_version_info_from_docs_conf() git_tag, git_hash, ver = info # Generate the file with the git and version info file_data = """\ // This file was generated by py/makeversionhdr.py #define MICROPY_GIT_TAG "%s" #define MICROPY_GIT_HASH "%s" #define MICROPY_BUILD_DATE "%s" #define MICROPY_VERSION_MAJOR (%s) #define MICROPY_VERSION_MINOR (%s) #define MICROPY_VERSION_MICRO (%s) #define MICROPY_VERSION_STRING "%s.%s.%s" """ % (git_tag, git_hash, datetime.date.today().strftime("%Y-%m-%d"), ver[0], ver[1], ver[2], ver[0], ver[1], ver[2]) # Check if the file contents changed from last time write_file = True if os.path.isfile(filename): with open(filename, 'r') as f: existing_data = f.read() if existing_data == file_data: write_file = False # Only write the file if we need to if write_file: print("Generating %s" % filename) with open(filename, 'w') as f: f.write(file_data) if __name__ == "__main__": make_version_header(sys.argv[1])
turbinenreiter/micropython
py/makeversionhdr.py
Python
mit
3,555
import unittest from tree import TreeNode class Solution: def maxDepth(self, root): """ :type root: TreeNode :rtype: int """ if not root: return 0 return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1 class Test(unittest.TestCase): def test_serialize(self): self._test([1, 1, 1, None, 1, 1, None, None, None, 1, None], 4) def _test(self, vals, expected): root = TreeNode.from_array(vals) actual = Solution().maxDepth(root) self.assertEqual(expected, actual) if __name__ == '__main__': unittest.main()
chrisxue815/leetcode_python
problems/test_0104_recursive.py
Python
unlicense
631
# # This is an extension to the Nautilus file manager to allow better # integration with the Subversion source control system. # # Copyright (C) 2006-2008 by Jason Field <jason@jasonfield.com> # Copyright (C) 2007-2008 by Bruce van der Kooij <brucevdkooij@gmail.com> # Copyright (C) 2008-2010 by Adam Plumb <adamplumb@gmail.com> # # RabbitVCS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # RabbitVCS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>. # import os import gettext as _gettext from locale import getdefaultlocale # Hack to make RabbitVCS win in the battle against TortoiseHg try: import mercurial.demandimport mercurial.demandimport.enable = lambda: None except Exception, e: pass version = "0.15.2" APP_NAME = "RabbitVCS" TEMP_DIR_PREFIX = "rabbitvcs-" LOCALE_DIR = "%s/locale" % os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if not os.path.exists(LOCALE_DIR): LOCALE_DIR = "/usr/share/locale" WEBSITE = "http://www.rabbitvcs.org/" langs = [] language = os.environ.get('LANGUAGE', None) if language: langs += language.split(":") if getdefaultlocale()[0] != None: langs += [getdefaultlocale()[0]] _gettext.bindtextdomain(APP_NAME, LOCALE_DIR) _gettext.textdomain(APP_NAME) gettext = _gettext.translation(APP_NAME, LOCALE_DIR, languages=langs, fallback=True) def package_name(): """ Report the application name in a form appropriate for building package files. """ return APP_NAME.lower() def package_version(): """ Report the version number of the application, minus any name extensions. """ app_version = version.split('-')[0] # TODO: sanity-check app_version: make sure it's just digits and dots return app_version def package_identifier(): """ Return a package identifier suitable for use in a package file. """ return "%s-%s" % (package_name(), package_version()) def package_prefix(): """ Return the prefix of the local RabbitVCS installation """ try: from rabbitvcs.buildinfo import rabbitvcs_prefix return rabbitvcs_prefix except ImportError, e: return "" def get_icon_path(): """ Return the path to the icon folder """ try: from rabbitvcs.buildinfo import icon_path return icon_path except ImportError, e: return "%s/data/icons/hicolor" % os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CloCkWeRX/rabbitvcs
rabbitvcs/__init__.py
Python
gpl-2.0
2,964
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ from rogerthat.models.news import NewsItem from rogerthat.rpc.service import ServiceApiException class NewsNotFoundException(ServiceApiException): def __init__(self, news_id): message = u'News with id %d not found' % news_id super(NewsNotFoundException, self).__init__(self.BASE_CODE_NEWS + 1, message) class CannotUnstickNewsException(ServiceApiException): def __init__(self): message = u'Cannot unstick news' super(CannotUnstickNewsException, self).__init__(self.BASE_CODE_NEWS + 2, message) class TooManyNewsButtonsException(ServiceApiException): def __init__(self): message = u'Too many news buttons. Maximum is 3 buttons, when flags are set to 0.' super(TooManyNewsButtonsException, self).__init__(self.BASE_CODE_NEWS + 3, message) class CannotChangePropertyException(ServiceApiException): def __init__(self, property_name): message = u'Property \'%s\' cannot be changed after publishing the news item.' % property_name super(CannotChangePropertyException, self).__init__(self.BASE_CODE_NEWS + 4, message) class MissingNewsArgumentException(ServiceApiException): def __init__(self, param): message = u'Parameter %s is missing' % param super(MissingNewsArgumentException, self).__init__(self.BASE_CODE_NEWS + 5, message) class InvalidNewsTypeException(ServiceApiException): def __init__(self, news_type): message = u'News type %s is not valid. Allowed types are %s' % (news_type, u', '.join(NewsItem.TYPES)) super(InvalidNewsTypeException, self).__init__(self.BASE_CODE_NEWS + 6, message) class NoPermissionToNewsException(ServiceApiException): def __init__(self, requesting_user): message = u'You (%s) don\'t have permission to this news item.' % requesting_user super(NoPermissionToNewsException, self).__init__(self.BASE_CODE_NEWS + 7, message) class ValueTooLongException(ServiceApiException): def __init__(self, prop, max_length): message = 'The value of the property \'%s\' is too long. Only %d characters are allowed.' % (prop, max_length) super(ValueTooLongException, self).__init__(self.BASE_CODE_NEWS + 8, message) class DemoServiceException(ServiceApiException): def __init__(self, app_id): message = 'A demo service may only publish news in demo apps. %s is not a demo app.' % app_id super(DemoServiceException, self).__init__(self.BASE_CODE_NEWS + 9, message) class InvalidScheduledTimestamp(ServiceApiException): def __init__(self): message = u'Scheduled timestamp must be in the future' super(InvalidScheduledTimestamp, self).__init__(self.BASE_CODE_NEWS + 11, message) class EmptyActionButtonCaption(ServiceApiException): def __init__(self): message = u'Action button caption is empty' super(EmptyActionButtonCaption, self).__init__(self.BASE_CODE_NEWS + 13, message) class InvalidActionButtonFlowParamsException(ServiceApiException): def __init__(self, button): message = u'This flow_params of action button %s must be parseable as json' % button super(InvalidActionButtonFlowParamsException, self).__init__(self.BASE_CODE_NEWS + 15, message)
our-city-app/oca-backend
src/rogerthat/exceptions/news.py
Python
apache-2.0
3,868
#!/usr/bin/env python import sys, os from glob import glob try: import setuptools except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup NAME = 'msgpack-numpy' VERSION = '0.3.6' AUTHOR = 'Lev Givon' AUTHOR_EMAIL = 'lev@columbia.edu' URL = 'https://github.com/lebedov/msgpack-numpy' DESCRIPTION = 'Numpy data serialization using msgpack' LONG_DESCRIPTION = DESCRIPTION DOWNLOAD_URL = URL LICENSE = 'BSD' CLASSIFIERS = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Scientific/Engineering', 'Topic :: Software Development'] if __name__ == "__main__": if os.path.exists('MANIFEST'): os.remove('MANIFEST') setup( name = NAME, version = VERSION, author = AUTHOR, author_email = AUTHOR_EMAIL, license = LICENSE, classifiers = CLASSIFIERS, description = DESCRIPTION, long_description = LONG_DESCRIPTION, url = URL, py_modules = ['msgpack_numpy'], install_requires = ['numpy', 'msgpack-python>=0.3.0'] )
pombredanne/msgpack-numpy
setup.py
Python
bsd-3-clause
1,407
#!/usr/bin/python3 # get the filename from command line import sys file_in = sys.argv[1] # imports import listener myListener = listener.Listener() myListener.getAudioData(file_in) myListener.calculateStats() myListener.printStats() #print('comparing against "zero" file that is as low noise as possible') #myListener.getAudioData('zero.wav') #myListener.calculateStats() #myListener.printStats()
austinmroczek/pauser
testStats.py
Python
mit
402
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow. """ from CIM14.ENTSOE.StateVariables.Wires.ShuntCompensator import ShuntCompensator from CIM14.ENTSOE.StateVariables.Wires.TapChanger import TapChanger nsURI = "http://iec.ch/TC57/2009/CIM-schema-cim14?profile=http://iec.ch/TC57/2007/profile#Wires" nsPrefix = "cimWires"
rwl/PyCIM
CIM14/ENTSOE/StateVariables/Wires/__init__.py
Python
mit
1,629
def check_all_rows(grid): i = 0 while i < len(grid): j = 0 dic = set() while j < len(grid[i]): if grid[i][j] != '.' and grid[i][j] in dic: return False else: dic.add(grid[i][j]) j += 1 i += 1 return True def check_all_column(grid): i = 0 l = len(grid) column = 0 while column < l: j = 0 row = 0 dic = set() while row < l: if grid[row][column] != '.' and grid[row][column] in dic: return False else: dic.add(grid[row][column]) row +=1 column += 1 return True def create_sub_grid(grid): dic = set() for e in grid: if e != '.' and e in dic: return False else: dic.add(e) return True def check_sub_array(grid): arr = [] for row in range(0,9,3): for i in range(0,9, 3): a = [] for j in range(row, row + 3): tmp = [] for col in range(i, i + 3): tmp.append(grid[j][col]) a.append(tmp) arr.append(a) print(arr) arr = [] for row in range(0,9,3): for i in range(0,9, 3): a = [] for j in range(row, row + 3): for col in range(i, i + 3): a.append(grid[j][col]) arr.append(a) for row in arr: if not create_sub_grid(row): return False return True def sudoku2(grid): return check_all_column(grid) and check_all_rows(grid) and check_sub_array(grid)
emirot/codefights
interviewPractice/sudoku2.py
Python
apache-2.0
1,688
# Copyright 2017 Kensho Technologies, Inc. import unittest from schematics.exceptions import ConversionError, ValidationError from schematics.types import StringType, IntType from grift.property_types import DictType, ListType, HTTPType class TestDictType(unittest.TestCase): def test_dict(self): d = {'a': 1, 'b': ['one', 'two']} self.assertDictEqual(d, DictType().to_native(d)) def test_string_dict(self): d = '{"a": 1, "b": ["one", "two"]}' self.assertDictEqual({'a': 1, 'b': ['one', 'two']}, DictType().to_native(d)) def test_string_list(self): # even if you can json.loads a string, may not be a dict with self.assertRaises(ConversionError): DictType().to_native('[1, 2, 3]') class TestListType(unittest.TestCase): def test_untyped(self): untyped_list = ListType(member_type=None) native = untyped_list.to_native([1, 'a', None]) self.assertEqual(native, [1, 'a', None]) native = untyped_list.to_native('1|a|') self.assertEqual(native, ['1', 'a', '']) native = untyped_list.to_native(['a', [1, 2]]) self.assertEqual(native, ['a', [1, 2]]) def test_delim(self): untyped_list = ListType(member_type=None, string_delim='#') # test specifying another delimiter native = untyped_list.to_native('a#b#c') self.assertEqual(native, ['a', 'b', 'c']) # test consecutive delimiters/whitespace padding native = untyped_list.to_native(' a## b #c ') self.assertEqual(native, [' a', '', ' b ', 'c ']) def test_typed(self): typed_list = ListType(member_type=StringType()) # check good conversion native = typed_list.to_native([1, 'a']) self.assertEqual(native, ['1', 'a']) native = typed_list.to_native('1|a') self.assertEqual(native, ['1', 'a']) # test conversion failure with self.assertRaises(ConversionError): typed_list.to_native(['a', [1, 2]]) def test_nested(self): inner_list = ListType(StringType(), string_delim='#') nested_list = ListType(member_type=inner_list, string_delim='|') # test conversion of inner list native = nested_list.to_native([[1, 2], [3, 4, 5], ['abc']]) self.assertEqual(native, [['1', '2'], ['3', '4', '5'], ['abc']]) native = nested_list.to_native('1#2|3#4#5|abc') self.assertEqual(native, [['1', '2'], ['3', '4', '5'], ['abc']]) with self.assertRaises(ConversionError): nested_list.to_native([['a', 'b'], [{'a': 1}]]) def test_validate_length(self): list_type = ListType(min_length=2, max_length=4) # test edges list_type.validate([1, 2]) list_type.validate([1, 2, 3, 4]) with self.assertRaises(ValidationError): list_type.validate([1]) with self.assertRaises(ValidationError): list_type.validate([1, 2, 3, 4, 5]) with self.assertRaises(ValidationError): list_type.validate(None) def test_validate_member_type(self): list_type = ListType(member_type=IntType(choices=[1, 2])) list_type.validate([1, 2]) with self.assertRaises(ConversionError): list_type.validate(['a', 'b']) with self.assertRaises(ValidationError): list_type.validate([1, 3]) class TestHTTPType(unittest.TestCase): def test_http(self): network_type = HTTPType(max_tries=2) network_type.validate('http://google.com') with self.assertRaises(ValidationError): network_type.validate('http://bad_http_url')
cranti/grift
grift/tests/test_property_type.py
Python
apache-2.0
3,673
import numpy import sys from mmdlab.datareader import readfile_metgas_boxes from mmdlab.utils import showable from mmdlab.utils import saveimg from mmdlab.utils import offscreen from mmdlab.utils import init_mlab_scene from mayavi import mlab import time import pylab from numba import jit from numba import float64, int8 print "Initing scene" def rotate(deg): for rot in xrange(deg): mlab.view(azimuth=rot) print "saving to ./advid/{num:03d}.png".format(num=rot) mlab.savefig("./advid/{num:03d}.png".format(num=rot)) print "Reading data" fig = mlab.figure('Viz', size=(1366,768)) #for i,f in enumerate(sys.argv[1:]): # i=i+74 m,g = readfile_metgas_boxes(sys.argv[1]) print "Drawing metal" mlab.points3d(m.x, m.y, m.z, mode="point", scale_mode="none",scale_factor=m.d, colormap="black-white" ,color=(1,1,1)) print "Drawing gas" mlab.points3d(g.x, g.y, g.z, mode="point", scale_mode="none",scale_factor=g.d, colormap="cool",color=(0.,0.,1)) #mlab.show() #rotate(360) #mlab.view(0.0, 90.0, 40.432823181152344, [ 20.02327204, 20.03512955, 14.38742304]) #mlab.view(azimuth=0,elevation=90) # print mlab.view() #mlab.savefig("./advid/{num:05d}.png".format(num=i)) #mlab.clf() mlab.show()
detorto/mdvis
src/draw_points.py
Python
mit
1,207
""" Python 'utf-32' Codec """ import codecs, sys ### Codec APIs encode = codecs.utf_32_encode def decode(input, errors='strict'): return codecs.utf_32_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): codecs.IncrementalEncoder.__init__(self, errors) self.encoder = None def encode(self, input, final=False): if self.encoder is None: result = codecs.utf_32_encode(input, self.errors)[0] if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result return self.encoder(input, self.errors)[0] def reset(self): codecs.IncrementalEncoder.reset(self) self.encoder = None def getstate(self): # state info we return to the caller: # 0: stream is in natural order for this platform # 2: endianness hasn't been determined yet # (we're never writing in unnatural order) return (2 if self.encoder is None else 0) def setstate(self, state): if state: self.encoder = None else: if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def __init__(self, errors='strict'): codecs.BufferedIncrementalDecoder.__init__(self, errors) self.decoder = None def _buffer_decode(self, input, errors, final): if self.decoder is None: (output, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, final) if byteorder == -1: self.decoder = codecs.utf_32_le_decode elif byteorder == 1: self.decoder = codecs.utf_32_be_decode elif consumed >= 4: raise UnicodeError("UTF-32 stream does not start with BOM") return (output, consumed) return self.decoder(input, self.errors, final) def reset(self): codecs.BufferedIncrementalDecoder.reset(self) self.decoder = None def getstate(self): # additonal state info from the base class must be None here, # as it isn't passed along to the caller state = codecs.BufferedIncrementalDecoder.getstate(self)[0] # additional state info we pass to the caller: # 0: stream is in natural order for this platform # 1: stream is in unnatural order # 2: endianness hasn't been determined yet if self.decoder is None: return (state, 2) addstate = int((sys.byteorder == "big") != (self.decoder is codecs.utf_32_be_decode)) return (state, addstate) def setstate(self, state): # state[1] will be ignored by BufferedIncrementalDecoder.setstate() codecs.BufferedIncrementalDecoder.setstate(self, state) state = state[1] if state == 0: self.decoder = (codecs.utf_32_be_decode if sys.byteorder == "big" else codecs.utf_32_le_decode) elif state == 1: self.decoder = (codecs.utf_32_le_decode if sys.byteorder == "big" else codecs.utf_32_be_decode) else: self.decoder = None class StreamWriter(codecs.StreamWriter): def __init__(self, stream, errors='strict'): self.encoder = None codecs.StreamWriter.__init__(self, stream, errors) def reset(self): codecs.StreamWriter.reset(self) self.encoder = None def encode(self, input, errors='strict'): if self.encoder is None: result = codecs.utf_32_encode(input, errors) if sys.byteorder == 'little': self.encoder = codecs.utf_32_le_encode else: self.encoder = codecs.utf_32_be_encode return result else: return self.encoder(input, errors) class StreamReader(codecs.StreamReader): def reset(self): codecs.StreamReader.reset(self) try: del self.decode except AttributeError: pass def decode(self, input, errors='strict'): (object, consumed, byteorder) = \ codecs.utf_32_ex_decode(input, errors, 0, False) if byteorder == -1: self.decode = codecs.utf_32_le_decode elif byteorder == 1: self.decode = codecs.utf_32_be_decode elif consumed>=4: raise UnicodeError("UTF-32 stream does not start with BOM") return (object, consumed) ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
Microvellum/Fluid-Designer
win64-vc/2.78/python/lib/encodings/utf_32.py
Python
gpl-3.0
5,128
from itertools import chain from django.test import Client, TestCase from django.core.urlresolvers import reverse from users.models import User from problems.models import Problem from courses.models import ProblemSet, Course # Create your tests here. class BasicViewsTestCase(TestCase): fixtures = [] def setUp(self): self.user = User.objects.create_user( username='gregor', email='gregor@jerse.info', password='gregor') self.other_user = User.objects.create_user( username='sonja', email='sonja@jerse.info', password='sonja') self.course = Course.objects.create() prob_set = ProblemSet.objects.create(course=self.course) visible_prob_set = ProblemSet.objects.create(course=self.course, visible=True) problem = Problem.objects.create(problem_set=prob_set) visible_problem = Problem.objects.create(problem_set=visible_prob_set) self.views = { 'public': [ ('login', dict()), ('terms_of_service', dict()), ('help', dict()), ('help_students', dict()), ('help_teachers', dict()), ], 'authenticated': [ ('homepage', dict()), ('problem_set_detail', {'problem_set_pk': visible_prob_set.pk}), ('course_detail', {'course_pk': self.course.pk}), ('problem_attempt_file', {'problem_pk': visible_problem.pk}), ('problem_set_attempts', {'problem_set_pk': visible_prob_set.pk}), ('problem_solution', {'problem_pk': visible_problem.pk, 'user_pk': self.user.pk}), ], 'student': [ ], 'teacher_redirect': [ ('problem_move', {'problem_pk': problem.pk, 'shift': 1}), ('problem_move', {'problem_pk': problem.pk, 'shift': -1}), ], 'teacher': [ ('problem_edit_file', {'problem_pk': problem.pk}), ('problem_set_edit', {'problem_set_pk': visible_problem.pk}), ('problem_set_detail', {'problem_set_pk': prob_set.pk}), ('problem_attempt_file', {'problem_pk': problem.pk}), ('problem_set_attempts', {'problem_set_pk': prob_set.pk}), ('problem_solution', {'problem_pk': problem.pk, 'user_pk': self.user.pk}), ('problem_solution', {'problem_pk': visible_problem.pk, 'user_pk': self.other_user.pk}), ('problem_move', {'problem_pk': visible_problem.pk, 'shift': 1}), ('problem_move', {'problem_pk': visible_problem.pk, 'shift': -1}), ('problem_set_progress', {'problem_set_pk': prob_set.pk}), ] } self.default_redirect_view_name = 'login' self.client = Client() def login(self): self.assertTrue(self.client.login(username='gregor', password='gregor'), "Login failed") def logout(self): self.client.logout() def assertCode(self, view, response, code): message = "{2}: expected {1}, got {0}.".format(response.status_code, code, view) if response.status_code == 302: message = message[:-1] + ' (redirects to {0}).'.format(response.url) self.assertEqual(response.status_code, code, message) def assertRedirect(self, view, kwargs, redirect_view_name=None): url = reverse(view, kwargs=kwargs) response = self.client.get(url) self.assertCode(view, response, 302) if redirect_view_name is not None: self.assertRedirects(response, '{0}?next={1}'.format(reverse(redirect_view_name), url)) return response def assertOK(self, view, kwargs): response = self.client.get(reverse(view, kwargs=kwargs)) self.assertCode(view, response, 200) def assertDenied(self, view, kwargs): response = self.client.get(reverse(view, kwargs=kwargs)) self.assertCode(view, response, 403) def testUnauthenticated(self): """ Unauthenticated user must be redirected to login page except for public pages. """ public_views = [view for view, args in self.views['public']] for view, args in chain.from_iterable(list(self.views.values())): if view not in public_views: self.assertRedirect(view, args, 'login') else: self.assertOK(view, args) def testPublicAuthenticated(self): """ Authenticated user should receive status 200 on public pages. """ try: self.login() for view, args in self.views['public'] + self.views['authenticated']: self.assertOK(view, args) finally: self.logout() def testPrivateAuthenticated(self): """ Authenticated user should receive 403 (denied) on non-public pages. """ try: self.login() public_views = [view for view, _ in self.views['public'] + self.views['authenticated']] for view, args in chain.from_iterable(list(self.views.values())): if view not in public_views: self.assertDenied(view, args) finally: self.logout() def testStudent(self): """ Student should receive 200 on views allowed to students. Students should receive 403 otherwise. """ try: self.login() denied = self.views['teacher'] + self.views['teacher_redirect'] for view, args in denied + self.views['student']: self.assertDenied(view, args) self.course.enroll_student(self.user) for view, args in self.views['student']: self.assertOK(view, args) for view, args in denied: self.assertDenied(view, args) finally: self.logout() self.course.unenroll_student(self.user) def testTeacher(self): """ Teacher should always receive 200 or 303 (redirect). """ try: self.login() self.course.teachers.add(self.user) redirect_views = [view for view, args in self.views['teacher_redirect']] for view, args in chain.from_iterable(list(self.views.values())): if view not in redirect_views: print(("OK: " + view)) self.assertOK(view, args) else: print(("Redirect: " + view)) self.assertRedirect(view, args) finally: self.logout()
ul-fmf/projekt-tomo
web/web/tests.py
Python
agpl-3.0
6,921
''' Created on Jul 18, 2011 @author: andi ''' __all__ = [ "log", "notify", "htmldecode", "fetchHttp", "getUID", "getPersistent", "setPersistent", "sayHi"] import os, re, time import threading import urllib, urllib2, HTMLParser import datetime import uuid import xbmc, xbmcaddon import simplejson as json addon = xbmcaddon.Addon() LOGFILE = os.path.join( addon.getAddonInfo('path'), "resources", "log.txt"); DATAFILE = os.path.join( addon.getAddonInfo('path'), "resources", "data.json"); entitydict = { "E4": u"\xE4", "F6": u"\xF6", "FC": u"\xFC", "C4": u"\xE4", "D6": u"\xF6", "DC": u"\xDC", "2013": u"\u2013"} def log( msg): msg = msg.encode( "latin-1") logf = open( LOGFILE, "a") logf.write( "%s: " % datetime.datetime.now().strftime( "%Y-%m-%d %I:%M:%S")) logf.write( msg) logf.write( '\n') logf.close() xbmc.log("### %s" % msg, level=xbmc.LOGNOTICE) def notify( title, message): xbmc.executebuiltin("XBMC.Notification("+title+","+message+")") def htmldecode( s): try: h = HTMLParser.HTMLParser() s = h.unescape( s) for k in entitydict.keys(): s = s.replace( "&#x" + k + ";", entitydict[k]) except UnicodeDecodeError: pass return s def fetchHttp( url, args={}, hdrs={}, post=False): log( "fetchHttp(%s): %s" % ("POST" if post else "GET", url)) if args: log( "args-keys: %s" % args.keys()) hdrs["User-Agent"] = "Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0" if post: req = urllib2.Request( url, urllib.urlencode( args), hdrs) else: url = url + "?" + urllib.urlencode( args) req = urllib2.Request( url, None, hdrs) response = urllib2.urlopen( req) encoding = re.findall("charset=([a-zA-Z0-9\-]+)", response.headers['content-type']) text = response.read() if len(encoding): responsetext = unicode( text, encoding[0] ); else: responsetext = text response.close() return responsetext def getPersistent( key, default = None): if not os.path.exists( DATAFILE): return default data = json.load( file( DATAFILE, "r")) if key in data.keys(): return data[key] return default def setPersistent( key, value): if os.path.exists( DATAFILE): data = json.load( file( DATAFILE, "r")) else: data = dict(); data[key] = value json.dump( data, file( DATAFILE, "w")) def getUID(): uid = getPersistent( "uid") if not uid: uid = str( uuid.uuid1()) setPersistent( "uid", uid) return uid def _sendhome( data): from jsonrpc import ServiceProxy homeurl = "http://www.mindmade.org/~andi/research/python/server.py" proxy = ServiceProxy( homeurl) result = proxy.log( data) log( "result %s" % result) def sayHi(): HI_PERIOD = 60 * 60 * 6 # actions within 6 hours are treated as one session lastusage = getPersistent( "lastusage", 0) now = int( time.time()) if now - lastusage > HI_PERIOD: usagecount = getPersistent( "usagecount", 0) usagecount += 1 uid = getUID() addonid = addon.getAddonInfo( "id") version = addon.getAddonInfo( "version") logparams = { "userid": uid, "addon": addonid, "version": version, "usagecount": usagecount} t = threading.Thread( target=_sendhome, args=(logparams,)) t.setDaemon( True) t.start() setPersistent( "lastusage", now) setPersistent( "usagecount", usagecount)
skandi/xbmc-addon-mindmadetools
lib/mindmade.py
Python
gpl-3.0
3,544
from __future__ import absolute_import, division, print_function import os from bcolz import ctable, carray import numpy as np from toolz import keyfilter import datashape from datashape import discover import shutil from ..numpy_dtype import dshape_to_numpy from ..append import append from ..convert import convert, ooc_types from ..resource import resource from ..drop import drop from ..chunks import chunks keywords = ['cparams', 'dflt', 'expectedlen', 'chunklen', 'rootdir'] @discover.register((ctable, carray)) def discover_bcolz(c, **kwargs): return datashape.from_numpy(c.shape, c.dtype) @append.register((ctable, carray), np.ndarray) def numpy_append_to_bcolz(a, b, **kwargs): a.append(b) a.flush() return a @append.register((ctable, carray), object) def numpy_append_to_bcolz(a, b, **kwargs): return append(a, convert(chunks(np.ndarray), b, **kwargs), **kwargs) @convert.register(ctable, np.ndarray, cost=2.0) def convert_numpy_to_bcolz_ctable(x, **kwargs): return ctable(x, **keyfilter(keywords.__contains__, kwargs)) @convert.register(carray, np.ndarray, cost=2.0) def convert_numpy_to_bcolz_carray(x, **kwargs): return carray(x, **keyfilter(keywords.__contains__, kwargs)) @convert.register(np.ndarray, (carray, ctable), cost=1.0) def convert_bcolz_to_numpy(x, **kwargs): return x[:] @append.register((carray, ctable), chunks(np.ndarray)) def append_carray_with_chunks(a, c, **kwargs): for chunk in c: append(a, chunk) a.flush() return a @convert.register(chunks(np.ndarray), (ctable, carray), cost=1.2) def bcolz_to_numpy_chunks(x, chunksize=2**20, **kwargs): def load(): first_n = min(1000, chunksize) first = x[:first_n] yield first for i in range(first_n, x.shape[0], chunksize): yield x[i: i + chunksize] return chunks(np.ndarray)(load) @resource.register('.*\.bcolz/?') def resource_bcolz(uri, dshape=None, expected_dshape=None, **kwargs): if os.path.exists(uri): try: return ctable(rootdir=uri) except IOError: # __rootdirs__ doesn't exist because we aren't a ctable return carray(rootdir=uri) else: if not dshape: raise ValueError("Must specify either existing bcolz directory or" " valid datashape") dshape = datashape.dshape(dshape) dt = dshape_to_numpy(dshape) shape_tail = tuple(map(int, dshape.shape[1:])) # tail of shape if dshape.shape[0] == datashape.var: shape = (0,) + shape_tail else: shape = (int(dshape.shape[0]),) + shape_tail x = np.empty(shape=shape, dtype=dt) kwargs = keyfilter(keywords.__contains__, kwargs) expectedlen = kwargs.pop('expectedlen', int(expected_dshape[0]) if expected_dshape is not None and isinstance(expected_dshape[0], datashape.Fixed) else None) if datashape.predicates.isrecord(dshape.measure): return ctable(x, rootdir=uri, expectedlen=expectedlen, **kwargs) else: return carray(x, rootdir=uri, expectedlen=expectedlen, **kwargs) @drop.register((carray, ctable)) def drop_bcolz(b, **kwargs): b.flush() shutil.rmtree(b.rootdir) ooc_types |= set((carray, ctable))
ContinuumIO/odo
odo/backends/bcolz.py
Python
bsd-3-clause
3,424
#!/usr/bin/env python import analyze_conf import sys import datetime, glob, job_stats, os, subprocess, time import numpy import scipy, scipy.stats import argparse import re import multiprocessing import functools import tspl, tspl_utils, imbalance, masterplot, uncorrelated def do_mp(arg): (file,thresh,out_dir)=arg masterplot.mp_wrapper(file,'lines',thresh,out_dir,'imbalance', header='Potentially Imbalanced',wide=True) masterplot.mp_wrapper(file,'percentile',thresh,out_dir,'imbalance', header='Potentially Imbalanced (Percentiles)', wide=True) def do_un(arg): file,output_dir=arg k1={'amd64' : ['amd64_core','cpu'], 'intel_snb' : [ 'intel_snb', 'cpu'],} k2={'amd64' : ['SSE_FLOPS', 'user'], 'intel_snb' : ['LOAD_L1D_ALL','user'],} try: ts=tspl.TSPLSum(file,k1,k2) except tspl.TSPLException as e: return uncorrelated.plot_correlation(ts,uncorrelated.pearson(ts),'',output_dir) def main(): parser=argparse.ArgumentParser(description='Deal with a directory of pickle' ' files nightly') parser.add_argument('-p', help='Set number of processes', nargs=1, type=int, default=[1]) parser.add_argument('-o', help='Output directory', nargs=1, type=str, default=['.'], metavar='output_dir') parser.add_argument('threshold', help='Treshold ratio for std dev:mean', nargs='?', default=0.25) parser.add_argument('filearg', help='File, directory, or quoted' ' glob pattern', nargs='?',default='jobs') n=parser.parse_args() filelist=tspl_utils.getfilelist(n.filearg) pool = multiprocessing.Pool(processes=n.p[0]) m = multiprocessing.Manager() ratios = m.dict() k1={'amd64' : ['amd64_core'], 'intel_snb' : [ 'intel_snb'],} k2={'amd64' : ['DRAM'], 'intel_snb' : ['LOAD_L1D_ALL'],} partial_imbal=functools.partial(imbalance.compute_imbalance, k1=k1, k2=k2, threshold=float(n.threshold), plot_flag=False,full_flag=False, ratios=ratios) if len(filelist) != 0: pool.map(partial_imbal,filelist) pool.close() pool.join() badfiles=[] th=[] dirs=[] for i in ratios.keys(): v=ratios[i][0] if v > float(n.threshold): for f in filelist: if re.search(i,f): badfiles.append(f) th.append(v) dirs.append(n.o[0]) if len(badfiles) != 0 or len(th) != 0 or len(dirs) != 0: pool = multiprocessing.Pool(processes=n.p[0]) pool.map(do_mp,zip(badfiles,th,dirs)) # Pool.starmap should exist.... pool.close() pool.join() bad_users=imbalance.find_top_users(ratios) #### Not presently useful #### pool.map(do_un,zip(badfiles,dirs)) if __name__ == "__main__": main()
ubccr/tacc_stats
analyze/process_pickles/nightly.py
Python
lgpl-2.1
2,983
# Copyright (c) 2018 Charles University, Faculty of Arts, # Institute of the Czech National Corpus # Copyright (c) 2018 Tomas Machalek <tomas.machalek@gmail.com> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2 # dated June, 1991. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import re import copy from collections import OrderedDict, defaultdict import os import logging from controller import exposed import actions.user import plugins from plugins.abstract.corpora import (AbstractSearchableCorporaArchive, BrokenCorpusInfo, CorplistProvider, TokenConnect, KwicConnect, DictLike, TagsetInfo) import l10n from plugins.rdbms_corparch.backend import ManateeCorpora from plugins.rdbms_corparch.backend.sqlite import Backend from plugins.rdbms_corparch.registry import RegModelSerializer, RegistryConf try: from markdown import markdown except ImportError: def markdown(s): return s def parse_query(tag_prefix, query): """ Parses a search query: <query> ::= <label> | <desc_part> <label> ::= <tag_prefix> <desc_part> returns: 2-tuple (list of description substrings, list of labels/keywords) """ if query is not None: tokens = re.split(r'\s+', query.strip()) else: tokens = [] query_keywords = [] substrs = [] for t in tokens: if len(t) > 0: if t[0] == tag_prefix: query_keywords.append(t[1:]) else: substrs.append(t) return substrs, query_keywords class CorpusListItem(DictLike): def __init__(self, id=None, corpus_id=None, name=None, description=None, size=0, path=None, featured=False, keywords=None): self.id = id self.corpus_id = corpus_id self.name = name self.description = description self.size = size self.size_info = l10n.simplify_num(size) self.path = path self.featured = featured self.found_in = [] self.keywords = [] if keywords is None else keywords def __unicode__(self): return u'CorpusListItem({0})'.format(self.__dict__) def __repr__(self): return self.__unicode__() class DeafultCorplistProvider(CorplistProvider): """ Corpus listing and filtering service """ def __init__(self, plugin_api, corparch, tag_prefix): """ arguments: plugin_api -- a controller.PluginApi instance corparch -- a plugins.abstract.corpora.AbstractSearchableCorporaArchive instance tag_prefix -- a string determining how a tag (= keyword or label) is recognized """ self._plugin_api = plugin_api self._corparch = corparch self._tag_prefix = tag_prefix def search(self, plugin_api, query, offset=0, limit=None, filter_dict=None): if query is False: # False means 'use default values' query = '' if filter_dict.get('minSize'): min_size = l10n.desimplify_num(filter_dict.get('minSize'), strict=False) else: min_size = 0 if filter_dict.get('maxSize'): max_size = l10n.desimplify_num(filter_dict.get('maxSize'), strict=False) else: max_size = None if filter_dict.get('requestable'): requestable = bool(int(filter_dict.get('requestable'))) else: requestable = False if filter_dict.get('favOnly'): favourites_only = bool(int(filter_dict.get('favOnly'))) else: favourites_only = False if offset is None: offset = 0 else: offset = int(offset) if limit is None: limit = int(self._corparch.max_page_size) else: limit = int(limit) user_items = self._corparch.user_items.get_user_items(plugin_api) favourite_corpora = { item.main_corpus_id: item.ident for item in user_items if item.is_single_corpus} def get_found_in(corp, phrases): ans = [] for phrase in phrases: phrase = phrase.lower() if phrase not in corp.name.lower() and phrase in corp.description.lower(): ans.append('defaultCorparch__found_in_desc') break return ans query_substrs, query_keywords = parse_query(self._tag_prefix, query) normalized_query_substrs = [s.lower() for s in query_substrs] used_keywords = set() rows = self._corparch.list_corpora(plugin_api, substrs=normalized_query_substrs, min_size=min_size, max_size=max_size, requestable=requestable, offset=offset, limit=limit + 1, keywords=query_keywords, favourites=tuple(favourite_corpora.keys()) if favourites_only else ()).values() ans = [] for i, corp in enumerate(rows): used_keywords.update(corp.keywords) corp.keywords = self._corparch.get_l10n_keywords(corp.keywords, plugin_api.user_lang) corp.fav_id = favourite_corpora.get(corp.id, None) corp.found_in = get_found_in(corp, normalized_query_substrs) ans.append(corp.to_dict()) if i == limit - 1: break return dict(rows=ans, nextOffset=offset + limit if len(rows) > limit else None, keywords=l10n.sort(used_keywords, loc=plugin_api.user_lang), query=query, current_keywords=query_keywords, filters=dict(filter_dict)) @exposed(return_type='json', access_level=1, skip_corpus_init=True) def get_favorite_corpora(ctrl, request): with plugins.runtime.CORPARCH as ca: return ca.export_favorite(ctrl._plugin_api) class RDBMSCorparch(AbstractSearchableCorporaArchive): """ A corparch plug-in implementation based on a relational database (sqlite/mysql - depends on backend). The main advantages over default_corparch are: 1) no redundancies (e.g. references, text type descriptions) 2) referential integrity 3) optimized data loading """ LABEL_OVERLAY_TRANSPARENCY = 0.20 def __init__(self, backend, user_items, tag_prefix, max_num_hints, max_page_size, registry_lang): """ arguments: backend -- a database backend user_items -- user_items plug-in tag_prefix -- a string used to distinguish search labels (tags) from actual searched strings max_num_hints -- max_page_size -- registry_lang -- """ self._backend = backend self._user_items = user_items self._tag_prefix = tag_prefix self._max_num_hints = int(max_num_hints) self._max_page_size = int(max_page_size) self._registry_lang = registry_lang self._corpus_info_cache = {} self._keywords = None # keyword (aka tags) database for corpora; None = not loaded yet self._colors = {} self._descriptions = defaultdict(lambda: {}) self._tc_providers = {} self._kc_providers = {} self._mc = ManateeCorpora() @property def max_page_size(self): return self._max_page_size @property def user_items(self): return self._user_items @property def backend(self): return self._backend def _parse_color(self, code): code = code.lower() transparency = self.LABEL_OVERLAY_TRANSPARENCY if code[0] == '#': code = code[1:] r, g, b = [int('0x%s' % code[i:i + 2], 0) for i in range(0, len(code), 2)] return 'rgba(%d, %s, %d, %01.2f)' % (r, g, b, transparency) elif code.find('rgb') == 0: m = re.match(r'rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)', code, re.IGNORECASE) if m: return 'rgba(%s, %s, %s, %01.2f)' % (m.group(1), m.group(2), m.group(3), transparency) raise ValueError('Invalid color code: %s' % code) def get_label_color(self, label_id): return self._colors.get(label_id, None) def _corp_info_from_row(self, row): if row: ans = self.create_corpus_info() ans.id = row['id'] ans.web = row['web'] ans.sentence_struct = row['sentence_struct'] ans.collator_locale = row['collator_locale'] ans.speech_segment = row['speech_segment'] ans.speaker_id_attr = row['speaker_id_attr'] ans.speech_overlap_attr = row['speech_overlap_attr'] ans.speech_overlap_val = row['speech_overlap_val'] ans.use_safe_font = row['use_safe_font'] ans.metadata.id_attr = row['id_attr'].encode('utf-8') if row['id_attr'] else None ans.metadata.label_attr = row['label_attr'].encode( 'utf-8') if row['label_attr'] else None ans.metadata.featured = bool(row['featured']) ans.metadata.database = row['database'] ans.metadata.keywords = [x for x in ( row['keywords'].split(',') if row['keywords'] else []) if x] ans.metadata.desc = row['ttdesc_id'] ans.metadata.group_duplicates = bool(row['bib_group_duplicates']) ans.manatee.encoding = row['encoding'] ans.manatee.description = row['info'] ans.manatee.size = row['size'] ans.manatee.lang = row['language'] ans.manatee.name = row['name'] return ans return None def _export_untranslated_label(self, plugin_api, text): """ This plug-in is able to load multi-language descriptions so here we don't have to add any stuff here """ return text def corpus_list_item_from_row(self, plugin_api, row): keywords = [x for x in (row['keywords'].split(',') if row['keywords'] else []) if x] return CorpusListItem(id=row['id'], corpus_id=row['id'], name=row['name'], description=self._export_untranslated_label(plugin_api, row['info']), size=row['size'], featured=row['featured'], path=None, keywords=keywords) def list_corpora(self, plugin_api, substrs=None, keywords=None, min_size=0, max_size=None, requestable=False, offset=0, limit=-1, favourites=()): user_id = plugin_api.user_dict['id'] ans = OrderedDict() for row in self._backend.load_all_corpora(user_id, substrs=substrs, keywords=keywords, min_size=min_size, max_size=max_size, requestable=requestable, offset=offset, limit=limit, favourites=favourites): ans[row['id']] = self.corpus_list_item_from_row(plugin_api, row) return ans def get_l10n_keywords(self, id_list, lang_code): all_keywords = self.all_keywords(lang_code) ans = [] for keyword_id in id_list: if keyword_id in all_keywords: ans.append((keyword_id, all_keywords[keyword_id])) else: ans.append((keyword_id, keyword_id)) return ans def _localize_corpus_info(self, data, lang_code): """ Updates localized values from data (please note that not all the data are localized - e.g. paths to files) by a single variant given passed lang_code. """ ans = copy.deepcopy(data) lang_code = lang_code.split('_')[0] if ans.metadata.desc is not None and lang_code in self._descriptions: ans.metadata.desc = self._descriptions[lang_code][ans.metadata.desc] else: ans.metadata.desc = '' ans.metadata.keywords = self.get_l10n_keywords(ans.metadata.keywords, lang_code) return ans @staticmethod def _get_iso639lang(lang): """ return 2-letter version of a lang-code """ return lang.split('_')[0] def all_keywords(self, lang): if self._keywords is None: self._keywords = defaultdict(lambda: OrderedDict()) for row in self._backend.load_all_keywords(): # id, label_cs, label_en, color self._keywords['cs'][row['id']] = row['label_cs'] self._keywords['en'][row['id']] = row['label_en'] self._colors[row['id']] = self._parse_color(row['color']) if row['color'] else None lang_key = self._get_iso639lang(lang) return self._keywords[lang_key] def _get_tckc_providers(self, corpus_id): if corpus_id not in self._tc_providers and corpus_id not in self._kc_providers: self._tc_providers[corpus_id] = TokenConnect() self._kc_providers[corpus_id] = KwicConnect() data = self._backend.load_tckc_providers(corpus_id) for row in data: if row['type'] == 'tc': self._tc_providers[corpus_id].providers.append((row['provider'], row['is_kwic_view'])) elif row['type'] == 'kc': self._kc_providers[corpus_id].providers.append(row['provider']) return self._tc_providers[corpus_id], self._kc_providers[corpus_id] def _fetch_corpus_info(self, corpus_id): if corpus_id not in self._corpus_info_cache: row = self._backend.load_corpus(corpus_id) corp = self._corp_info_from_row(row) if corp: corp.tagsets = [TagsetInfo().from_dict(row2) for row2 in self._backend.load_corpus_tagsets(corpus_id)] self._corpus_info_cache[corpus_id] = corp for art in self._backend.load_corpus_articles(corpus_id): if art['role'] == 'default': corp.citation_info.default_ref = markdown(art['entry']) elif art['role'] == 'standard': corp.citation_info.article_ref.append(markdown(art['entry'])) elif art['role'] == 'other': corp.citation_info.other_bibliography = markdown(art['entry']) if row['ttdesc_id'] not in self._descriptions: for drow in self._backend.load_ttdesc(row['ttdesc_id']): self._descriptions['cs'][row['ttdesc_id']] = drow['text_cs'] self._descriptions['en'][row['ttdesc_id']] = drow['text_en'] return self._corpus_info_cache.get(corpus_id, None) def get_corpus_info(self, user_lang, corp_name): """ Obtain full corpus info """ if corp_name: try: # get rid of path-like corpus ID prefix corp_name = corp_name.lower() corp_info = self._fetch_corpus_info(corp_name) if corp_info is not None: if user_lang is not None: ans = self._localize_corpus_info(corp_info, lang_code=user_lang) else: ans = corp_info ans.manatee = self._mc.get_info(corp_name) ans.token_connect, ans.kwic_connect = self._get_tckc_providers(corp_name) ans.metadata.interval_attrs = self._backend.load_interval_attrs(corp_name) return ans return BrokenCorpusInfo(name=corp_name) except TypeError as ex: logging.getLogger(__name__).warning( 'Failed to fetch corpus info for {0}: {1}'.format(corp_name, ex)) return BrokenCorpusInfo(name=corp_name) else: return BrokenCorpusInfo() def mod_corplist_menu(self, plugin_api, menu_item): if not plugin_api.user_is_anonymous: menu_item.add_args(('requestable', '1')) def create_corplist_provider(self, plugin_api): return DeafultCorplistProvider(plugin_api, self, self._tag_prefix) def _export_favorite(self, plugin_api): ans = [] for item in plugins.runtime.USER_ITEMS.instance.get_user_items(plugin_api): tmp = item.to_dict() tmp['description'] = self._export_untranslated_label( plugin_api, self._mc.get_info(item.main_corpus_id).description) ans.append(tmp) return ans def initial_search_params(self, plugin_api, query, filter_dict=None): query_substrs, query_keywords = parse_query(self._tag_prefix, query) all_keywords = self.all_keywords(plugin_api.user_lang) exp_keywords = [(k, lab, k in query_keywords, self.get_label_color(k)) for k, lab in all_keywords.items()] return { 'keywords': exp_keywords, 'filters': { 'maxSize': filter_dict.getlist('maxSize'), 'minSize': filter_dict.getlist('minSize'), 'name': query_substrs } } def rebuild_registry(self, registry_path, variant, proc_aligned=False): logging.getLogger(__name__).info('Rebuilding registry {0}'.format(registry_path)) rc = RegistryConf(corpus_id=os.path.basename(registry_path), variant=variant, backend=self._backend) rc.load() if not os.path.exists(os.path.dirname(registry_path)): os.makedirs(os.path.dirname(registry_path)) s = RegModelSerializer(add_heading=True) with open(registry_path, 'w') as fw: fw.write(s.serialize(rc).encode(rc.encoding)) if proc_aligned: for aligned in rc.aligned: self.rebuild_registry(os.path.join( os.path.dirname(registry_path), aligned), variant) def export_actions(self): return {actions.user.User: [get_favorite_corpora]} def _export_featured(self, plugin_api): return [dict(r) for r in self.backend.load_featured_corpora(plugin_api.user_lang)] def export(self, plugin_api): return dict( favorite=self._export_favorite(plugin_api), featured=self._export_featured(plugin_api), corpora_labels=[(k, lab, self.get_label_color(k)) for k, lab in self.all_keywords(plugin_api.user_lang).items()], tag_prefix=self._tag_prefix, max_num_hints=self._max_num_hints, max_page_size=self.max_page_size ) @plugins.inject(plugins.runtime.USER_ITEMS) def create_instance(conf, user_items): return RDBMSCorparch(backend=Backend(db_path=conf.get('plugins', 'corparch')['default:file']), user_items=user_items, tag_prefix=conf.get('plugins', 'corparch')['default:tag_prefix'], max_num_hints=conf.get('plugins', 'corparch')['default:max_num_hints'], max_page_size=conf.get('plugins', 'corparch').get('default:default_page_list_size', None), registry_lang=conf.get('corpora', 'manatee_registry_locale', 'en_US'))
ufal/lindat-kontext
lib/plugins/rdbms_corparch/__init__.py
Python
gpl-2.0
20,019
from __future__ import absolute_import, division, print_function, with_statement import contextlib import functools import sys import textwrap import time import platform import weakref from tornado.concurrent import return_future from tornado.escape import url_escape from tornado.httpclient import AsyncHTTPClient from tornado.ioloop import IOLoop from tornado.log import app_log from tornado import stack_context from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test from tornado.test.util import unittest, skipOnTravis from tornado.web import Application, RequestHandler, asynchronous, HTTPError from tornado import gen skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available') skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython', 'Not CPython implementation') class GenEngineTest(AsyncTestCase): def setUp(self): super(GenEngineTest, self).setUp() self.named_contexts = [] def named_context(self, name): @contextlib.contextmanager def context(): self.named_contexts.append(name) try: yield finally: self.assertEqual(self.named_contexts.pop(), name) return context def run_gen(self, f): f() return self.wait() def delay_callback(self, iterations, callback, arg): """Runs callback(arg) after a number of IOLoop iterations.""" if iterations == 0: callback(arg) else: self.io_loop.add_callback(functools.partial( self.delay_callback, iterations - 1, callback, arg)) @return_future def async_future(self, result, callback): self.io_loop.add_callback(callback, result) def test_no_yield(self): @gen.engine def f(): self.stop() self.run_gen(f) def test_inline_cb(self): @gen.engine def f(): (yield gen.Callback("k1"))() res = yield gen.Wait("k1") self.assertTrue(res is None) self.stop() self.run_gen(f) def test_ioloop_cb(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.stop() self.run_gen(f) def test_exception_phase1(self): @gen.engine def f(): 1 / 0 self.assertRaises(ZeroDivisionError, self.run_gen, f) def test_exception_phase2(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") 1 / 0 self.assertRaises(ZeroDivisionError, self.run_gen, f) def test_exception_in_task_phase1(self): def fail_task(callback): 1 / 0 @gen.engine def f(): try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.stop() self.run_gen(f) def test_exception_in_task_phase2(self): # This is the case that requires the use of stack_context in gen.engine def fail_task(callback): self.io_loop.add_callback(lambda: 1 / 0) @gen.engine def f(): try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.stop() self.run_gen(f) def test_with_arg(self): @gen.engine def f(): (yield gen.Callback("k1"))(42) res = yield gen.Wait("k1") self.assertEqual(42, res) self.stop() self.run_gen(f) def test_with_arg_tuple(self): @gen.engine def f(): (yield gen.Callback((1, 2)))((3, 4)) res = yield gen.Wait((1, 2)) self.assertEqual((3, 4), res) self.stop() self.run_gen(f) def test_key_reuse(self): @gen.engine def f(): yield gen.Callback("k1") yield gen.Callback("k1") self.stop() self.assertRaises(gen.KeyReuseError, self.run_gen, f) def test_key_reuse_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) yield gen.Callback((1, 2)) self.stop() self.assertRaises(gen.KeyReuseError, self.run_gen, f) def test_key_mismatch(self): @gen.engine def f(): yield gen.Callback("k1") yield gen.Wait("k2") self.stop() self.assertRaises(gen.UnknownKeyError, self.run_gen, f) def test_key_mismatch_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) yield gen.Wait((2, 3)) self.stop() self.assertRaises(gen.UnknownKeyError, self.run_gen, f) def test_leaked_callback(self): @gen.engine def f(): yield gen.Callback("k1") self.stop() self.assertRaises(gen.LeakedCallbackError, self.run_gen, f) def test_leaked_callback_tuple(self): @gen.engine def f(): yield gen.Callback((1, 2)) self.stop() self.assertRaises(gen.LeakedCallbackError, self.run_gen, f) def test_parallel_callback(self): @gen.engine def f(): for k in range(3): self.io_loop.add_callback((yield gen.Callback(k))) yield gen.Wait(1) self.io_loop.add_callback((yield gen.Callback(3))) yield gen.Wait(0) yield gen.Wait(3) yield gen.Wait(2) self.stop() self.run_gen(f) def test_bogus_yield(self): @gen.engine def f(): yield 42 self.assertRaises(gen.BadYieldError, self.run_gen, f) def test_bogus_yield_tuple(self): @gen.engine def f(): yield (1, 2) self.assertRaises(gen.BadYieldError, self.run_gen, f) def test_reuse(self): @gen.engine def f(): self.io_loop.add_callback((yield gen.Callback(0))) yield gen.Wait(0) self.stop() self.run_gen(f) self.run_gen(f) def test_task(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) self.stop() self.run_gen(f) def test_wait_all(self): @gen.engine def f(): (yield gen.Callback("k1"))("v1") (yield gen.Callback("k2"))("v2") results = yield gen.WaitAll(["k1", "k2"]) self.assertEqual(results, ["v1", "v2"]) self.stop() self.run_gen(f) def test_exception_in_yield(self): @gen.engine def f(): try: yield gen.Wait("k1") raise Exception("did not get expected exception") except gen.UnknownKeyError: pass self.stop() self.run_gen(f) def test_resume_after_exception_in_yield(self): @gen.engine def f(): try: yield gen.Wait("k1") raise Exception("did not get expected exception") except gen.UnknownKeyError: pass (yield gen.Callback("k2"))("v2") self.assertEqual((yield gen.Wait("k2")), "v2") self.stop() self.run_gen(f) def test_orphaned_callback(self): @gen.engine def f(): self.orphaned_callback = yield gen.Callback(1) try: self.run_gen(f) raise Exception("did not get expected exception") except gen.LeakedCallbackError: pass self.orphaned_callback() def test_multi(self): @gen.engine def f(): (yield gen.Callback("k1"))("v1") (yield gen.Callback("k2"))("v2") results = yield [gen.Wait("k1"), gen.Wait("k2")] self.assertEqual(results, ["v1", "v2"]) self.stop() self.run_gen(f) def test_multi_delayed(self): @gen.engine def f(): # callbacks run at different times responses = yield [ gen.Task(self.delay_callback, 3, arg="v1"), gen.Task(self.delay_callback, 1, arg="v2"), ] self.assertEqual(responses, ["v1", "v2"]) self.stop() self.run_gen(f) @skipOnTravis @gen_test def test_multi_performance(self): # Yielding a list used to have quadratic performance; make # sure a large list stays reasonable. On my laptop a list of # 2000 used to take 1.8s, now it takes 0.12. start = time.time() yield [gen.Task(self.io_loop.add_callback) for i in range(2000)] end = time.time() self.assertLess(end - start, 1.0) @gen_test def test_future(self): result = yield self.async_future(1) self.assertEqual(result, 1) @gen_test def test_multi_future(self): results = yield [self.async_future(1), self.async_future(2)] self.assertEqual(results, [1, 2]) def test_arguments(self): @gen.engine def f(): (yield gen.Callback("noargs"))() self.assertEqual((yield gen.Wait("noargs")), None) (yield gen.Callback("1arg"))(42) self.assertEqual((yield gen.Wait("1arg")), 42) (yield gen.Callback("kwargs"))(value=42) result = yield gen.Wait("kwargs") self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((), dict(value=42)), result) self.assertEqual(dict(value=42), result.kwargs) (yield gen.Callback("2args"))(42, 43) result = yield gen.Wait("2args") self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((42, 43), {}), result) self.assertEqual((42, 43), result.args) def task_func(callback): callback(None, error="foo") result = yield gen.Task(task_func) self.assertTrue(isinstance(result, gen.Arguments)) self.assertEqual(((None,), dict(error="foo")), result) self.stop() self.run_gen(f) def test_stack_context_leak(self): # regression test: repeated invocations of a gen-based # function should not result in accumulated stack_contexts def _stack_depth(): head = stack_context._state.contexts[1] length = 0 while head is not None: length += 1 head = head.old_contexts[1] return length @gen.engine def inner(callback): yield gen.Task(self.io_loop.add_callback) callback() @gen.engine def outer(): for i in range(10): yield gen.Task(inner) stack_increase = _stack_depth() - initial_stack_depth self.assertTrue(stack_increase <= 2) self.stop() initial_stack_depth = _stack_depth() self.run_gen(outer) def test_stack_context_leak_exception(self): # same as previous, but with a function that exits with an exception @gen.engine def inner(callback): yield gen.Task(self.io_loop.add_callback) 1 / 0 @gen.engine def outer(): for i in range(10): try: yield gen.Task(inner) except ZeroDivisionError: pass stack_increase = len(stack_context._state.contexts) - initial_stack_depth self.assertTrue(stack_increase <= 2) self.stop() initial_stack_depth = len(stack_context._state.contexts) self.run_gen(outer) def function_with_stack_context(self, callback): # Technically this function should stack_context.wrap its callback # upon entry. However, it is very common for this step to be # omitted. def step2(): self.assertEqual(self.named_contexts, ['a']) self.io_loop.add_callback(callback) with stack_context.StackContext(self.named_context('a')): self.io_loop.add_callback(step2) @gen_test def test_wait_transfer_stack_context(self): # Wait should not pick up contexts from where callback was invoked, # even if that function improperly fails to wrap its callback. cb = yield gen.Callback('k1') self.function_with_stack_context(cb) self.assertEqual(self.named_contexts, []) yield gen.Wait('k1') self.assertEqual(self.named_contexts, []) @gen_test def test_task_transfer_stack_context(self): yield gen.Task(self.function_with_stack_context) self.assertEqual(self.named_contexts, []) def test_raise_after_stop(self): # This pattern will be used in the following tests so make sure # the exception propagates as expected. @gen.engine def f(): self.stop() 1 / 0 with self.assertRaises(ZeroDivisionError): self.run_gen(f) def test_sync_raise_return(self): # gen.Return is allowed in @gen.engine, but it may not be used # to return a value. @gen.engine def f(): self.stop(42) raise gen.Return() result = self.run_gen(f) self.assertEqual(result, 42) def test_async_raise_return(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) self.stop(42) raise gen.Return() result = self.run_gen(f) self.assertEqual(result, 42) def test_sync_raise_return_value(self): @gen.engine def f(): raise gen.Return(42) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_sync_raise_return_value_tuple(self): @gen.engine def f(): raise gen.Return((1, 2)) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_async_raise_return_value(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(42) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_async_raise_return_value_tuple(self): @gen.engine def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return((1, 2)) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_return_value(self): # It is an error to apply @gen.engine to a function that returns # a value. @gen.engine def f(): return 42 with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) def test_return_value_tuple(self): # It is an error to apply @gen.engine to a function that returns # a value. @gen.engine def f(): return (1, 2) with self.assertRaises(gen.ReturnValueIgnoredError): self.run_gen(f) @skipNotCPython def test_task_refcounting(self): # On CPython, tasks and their arguments should be released immediately # without waiting for garbage collection. @gen.engine def f(): class Foo(object): pass arg = Foo() self.arg_ref = weakref.ref(arg) task = gen.Task(self.io_loop.add_callback, arg=arg) self.task_ref = weakref.ref(task) yield task self.stop() self.run_gen(f) self.assertIs(self.arg_ref(), None) self.assertIs(self.task_ref(), None) class GenCoroutineTest(AsyncTestCase): def setUp(self): # Stray StopIteration exceptions can lead to tests exiting prematurely, # so we need explicit checks here to make sure the tests run all # the way through. self.finished = False super(GenCoroutineTest, self).setUp() def tearDown(self): super(GenCoroutineTest, self).tearDown() assert self.finished @gen_test def test_sync_gen_return(self): @gen.coroutine def f(): raise gen.Return(42) result = yield f() self.assertEqual(result, 42) self.finished = True @gen_test def test_async_gen_return(self): @gen.coroutine def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(42) result = yield f() self.assertEqual(result, 42) self.finished = True @gen_test def test_sync_return(self): @gen.coroutine def f(): return 42 result = yield f() self.assertEqual(result, 42) self.finished = True @skipBefore33 @gen_test def test_async_return(self): # It is a compile-time error to return a value in a generator # before Python 3.3, so we must test this with exec. # Flatten the real global and local namespace into our fake globals: # it's all global from the perspective of f(). global_namespace = dict(globals(), **locals()) local_namespace = {} exec(textwrap.dedent(""" @gen.coroutine def f(): yield gen.Task(self.io_loop.add_callback) return 42 """), global_namespace, local_namespace) result = yield local_namespace['f']() self.assertEqual(result, 42) self.finished = True @skipBefore33 @gen_test def test_async_early_return(self): # A yield statement exists but is not executed, which means # this function "returns" via an exception. This exception # doesn't happen before the exception handling is set up. global_namespace = dict(globals(), **locals()) local_namespace = {} exec(textwrap.dedent(""" @gen.coroutine def f(): if True: return 42 yield gen.Task(self.io_loop.add_callback) """), global_namespace, local_namespace) result = yield local_namespace['f']() self.assertEqual(result, 42) self.finished = True @gen_test def test_sync_return_no_value(self): @gen.coroutine def f(): return result = yield f() self.assertEqual(result, None) self.finished = True @gen_test def test_async_return_no_value(self): # Without a return value we don't need python 3.3. @gen.coroutine def f(): yield gen.Task(self.io_loop.add_callback) return result = yield f() self.assertEqual(result, None) self.finished = True @gen_test def test_sync_raise(self): @gen.coroutine def f(): 1 / 0 # The exception is raised when the future is yielded # (or equivalently when its result method is called), # not when the function itself is called). future = f() with self.assertRaises(ZeroDivisionError): yield future self.finished = True @gen_test def test_async_raise(self): @gen.coroutine def f(): yield gen.Task(self.io_loop.add_callback) 1 / 0 future = f() with self.assertRaises(ZeroDivisionError): yield future self.finished = True @gen_test def test_pass_callback(self): @gen.coroutine def f(): raise gen.Return(42) result = yield gen.Task(f) self.assertEqual(result, 42) self.finished = True @gen_test def test_replace_yieldpoint_exception(self): # Test exception handling: a coroutine can catch one exception # raised by a yield point and raise a different one. @gen.coroutine def f1(): 1 / 0 @gen.coroutine def f2(): try: yield f1() except ZeroDivisionError: raise KeyError() future = f2() with self.assertRaises(KeyError): yield future self.finished = True @gen_test def test_swallow_yieldpoint_exception(self): # Test exception handling: a coroutine can catch an exception # raised by a yield point and not raise a different one. @gen.coroutine def f1(): 1 / 0 @gen.coroutine def f2(): try: yield f1() except ZeroDivisionError: raise gen.Return(42) result = yield f2() self.assertEqual(result, 42) self.finished = True @gen_test def test_replace_context_exception(self): # Test exception handling: exceptions thrown into the stack context # can be caught and replaced. @gen.coroutine def f2(): self.io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 10) except ZeroDivisionError: raise KeyError() future = f2() with self.assertRaises(KeyError): yield future self.finished = True @gen_test def test_swallow_context_exception(self): # Test exception handling: exceptions thrown into the stack context # can be caught and ignored. @gen.coroutine def f2(): self.io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 10) except ZeroDivisionError: raise gen.Return(42) result = yield f2() self.assertEqual(result, 42) self.finished = True class GenSequenceHandler(RequestHandler): @asynchronous @gen.engine def get(self): self.io_loop = self.request.connection.stream.io_loop self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.write("1") self.io_loop.add_callback((yield gen.Callback("k2"))) yield gen.Wait("k2") self.write("2") # reuse an old key self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.finish("3") class GenCoroutineSequenceHandler(RequestHandler): @gen.coroutine def get(self): self.io_loop = self.request.connection.stream.io_loop self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.write("1") self.io_loop.add_callback((yield gen.Callback("k2"))) yield gen.Wait("k2") self.write("2") # reuse an old key self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.finish("3") class GenCoroutineUnfinishedSequenceHandler(RequestHandler): @asynchronous @gen.coroutine def get(self): self.io_loop = self.request.connection.stream.io_loop self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") self.write("1") self.io_loop.add_callback((yield gen.Callback("k2"))) yield gen.Wait("k2") self.write("2") # reuse an old key self.io_loop.add_callback((yield gen.Callback("k1"))) yield gen.Wait("k1") # just write, don't finish self.write("3") class GenTaskHandler(RequestHandler): @asynchronous @gen.engine def get(self): io_loop = self.request.connection.stream.io_loop client = AsyncHTTPClient(io_loop=io_loop) response = yield gen.Task(client.fetch, self.get_argument('url')) response.rethrow() self.finish(b"got response: " + response.body) class GenExceptionHandler(RequestHandler): @asynchronous @gen.engine def get(self): # This test depends on the order of the two decorators. io_loop = self.request.connection.stream.io_loop yield gen.Task(io_loop.add_callback) raise Exception("oops") class GenCoroutineExceptionHandler(RequestHandler): @asynchronous @gen.coroutine def get(self): # This test depends on the order of the two decorators. io_loop = self.request.connection.stream.io_loop yield gen.Task(io_loop.add_callback) raise Exception("oops") class GenYieldExceptionHandler(RequestHandler): @asynchronous @gen.engine def get(self): io_loop = self.request.connection.stream.io_loop # Test the interaction of the two stack_contexts. def fail_task(callback): io_loop.add_callback(lambda: 1 / 0) try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.finish('ok') class UndecoratedCoroutinesHandler(RequestHandler): @gen.coroutine def prepare(self): self.chunks = [] yield gen.Task(IOLoop.current().add_callback) self.chunks.append('1') @gen.coroutine def get(self): self.chunks.append('2') yield gen.Task(IOLoop.current().add_callback) self.chunks.append('3') yield gen.Task(IOLoop.current().add_callback) self.write(''.join(self.chunks)) class AsyncPrepareErrorHandler(RequestHandler): @gen.coroutine def prepare(self): yield gen.Task(IOLoop.current().add_callback) raise HTTPError(403) def get(self): self.finish('ok') class GenWebTest(AsyncHTTPTestCase): def get_app(self): return Application([ ('/sequence', GenSequenceHandler), ('/coroutine_sequence', GenCoroutineSequenceHandler), ('/coroutine_unfinished_sequence', GenCoroutineUnfinishedSequenceHandler), ('/task', GenTaskHandler), ('/exception', GenExceptionHandler), ('/coroutine_exception', GenCoroutineExceptionHandler), ('/yield_exception', GenYieldExceptionHandler), ('/undecorated_coroutine', UndecoratedCoroutinesHandler), ('/async_prepare_error', AsyncPrepareErrorHandler), ]) def test_sequence_handler(self): response = self.fetch('/sequence') self.assertEqual(response.body, b"123") def test_coroutine_sequence_handler(self): response = self.fetch('/coroutine_sequence') self.assertEqual(response.body, b"123") def test_coroutine_unfinished_sequence_handler(self): response = self.fetch('/coroutine_unfinished_sequence') self.assertEqual(response.body, b"123") def test_task_handler(self): response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence'))) self.assertEqual(response.body, b"got response: 123") def test_exception_handler(self): # Make sure we get an error and not a timeout with ExpectLog(app_log, "Uncaught exception GET /exception"): response = self.fetch('/exception') self.assertEqual(500, response.code) def test_coroutine_exception_handler(self): # Make sure we get an error and not a timeout with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"): response = self.fetch('/coroutine_exception') self.assertEqual(500, response.code) def test_yield_exception_handler(self): response = self.fetch('/yield_exception') self.assertEqual(response.body, b'ok') def test_undecorated_coroutines(self): response = self.fetch('/undecorated_coroutine') self.assertEqual(response.body, b'123') def test_async_prepare_error_handler(self): response = self.fetch('/async_prepare_error') self.assertEqual(response.code, 403) if __name__ == '__main__': unittest.main()
Drvanon/Game
venv/lib/python3.3/site-packages/tornado/test/gen_test.py
Python
apache-2.0
28,352
from . import accounting from . import proc from . import sessions
borg-project/borg
borg/unix/__init__.py
Python
mit
68
# -*- coding: utf-8 -*- # Copyright(C) 2013 Christophe Lampin # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from datetime import datetime import re import urllib from decimal import Decimal from weboob.tools.browser import BasePage from weboob.capabilities.bill import Subscription, Detail, Bill __all__ = ['AmeliBasePage', 'LoginPage', 'HomePage', 'AccountPage', 'LastPaymentsPage', 'PaymentDetailsPage', 'BillsPage'] # Ugly array to avoid the use of french locale FRENCH_MONTHS = [u'janvier', u'février', u'mars', u'avril', u'mai', u'juin', u'juillet', u'août', u'septembre', u'octobre', u'novembre', u'décembre'] class AmeliBasePage(BasePage): def is_logged(self): return len(self.document.xpath('//a[@id="logout"]')) > 0 class LoginPage(AmeliBasePage): def login(self, login, password): self.browser.select_form('connexionCompteForm') self.browser["connexioncompte_2numSecuriteSociale"] = login.encode('utf8') self.browser["connexioncompte_2codeConfidentiel"] = password.encode('utf8') self.browser.submit() class HomePage(AmeliBasePage): def on_loaded(self): pass class AccountPage(AmeliBasePage): def iter_subscription_list(self): idents = self.document.xpath('//div[contains(@class, "blocfond")]') enfants = 0 for ident in idents: if len(ident.xpath('.//h4')) == 0: continue name = self.parser.tocleanstring(ident.xpath('.//h4')[0]) lis = ident.xpath('.//li') if len(lis) > 3: number = re.sub('[^\d]+', '', ident.xpath('.//li')[3].text) else: enfants = enfants + 1 number = "AFFILIE" + str(enfants) sub = Subscription(number) sub._id = number sub.label = unicode(name) sub.subscriber = unicode(name) yield sub class LastPaymentsPage(AmeliBasePage): def iter_last_payments(self): list_table = self.document.xpath('//table[@id="ligneTabDerniersPaiements"]') if len(list_table) > 0: table = list_table[0].xpath('.//tr') for tr in table: list_a = tr.xpath('.//a') if len(list_a) == 0: continue yield list_a[0].attrib.get('href') class PaymentDetailsPage(AmeliBasePage): def iter_payment_details(self, sub): if sub._id.isdigit(): idx = 0 else: idx = sub._id.replace('AFFILIE', '') if len(self.document.xpath('//div[@class="centrepage"]/h3')) > idx or self.document.xpath('//table[@id="DetailPaiement3"]') > idx: id_str = self.document.xpath('//div[@class="centrepage"]/h3')[idx].text.strip() m = re.match('.*le (.*) pour un montant de.*', id_str) if m: id_str = m.group(1) id_date = datetime.strptime(id_str, '%d/%m/%Y').date() id = sub._id + "." + datetime.strftime(id_date, "%Y%m%d") table = self.document.xpath('//table[@id="DetailPaiement3"]')[idx].xpath('.//tr') line = 1 last_date = None for tr in table: tds = tr.xpath('.//td') if len(tds) == 0: continue date_str = tds[0].text det = Detail() det.id = id + "." + str(line) det.label = unicode(tds[1].text.strip()) if date_str is None or date_str == '': det.infos = u'' det.datetime = last_date else: det.infos = u'Payé ' + unicode(re.sub('[^\d,-]+', '', tds[2].text)) + u'€ / Base ' + unicode(re.sub('[^\d,-]+', '', tds[3].text)) + u'€ / Taux ' + unicode(re.sub('[^\d,-]+', '', tds[4].text)) + '%' det.datetime = datetime.strptime(date_str, '%d/%m/%Y').date() last_date = det.datetime det.price = Decimal(re.sub('[^\d,-]+', '', tds[5].text).replace(',', '.')) line = line + 1 yield det class BillsPage(AmeliBasePage): def iter_bills(self, sub): table = self.document.xpath('//table[@id="tableauDecompte"]')[0].xpath('.//tr') for tr in table: list_tds = tr.xpath('.//td') if len(list_tds) == 0: continue date_str = list_tds[0].text month_str = date_str.split()[0] date = datetime.strptime(re.sub(month_str, str(FRENCH_MONTHS.index(month_str) + 1), date_str), "%m %Y").date() amount = list_tds[1].text if amount is None: continue amount = re.sub(' euros', '', amount) bil = Bill() bil.id = sub._id + "." + date.strftime("%Y%m") bil.date = date bil.label = u''+amount.strip() bil.format = u'pdf' filedate = date.strftime("%m%Y") bil._url = '/PortailAS/PDFServletReleveMensuel.dopdf' bil._args = {'PDF.moisRecherche': filedate} yield bil def get_bill(self, bill): self.location(bill._url, urllib.urlencode(bill._args))
blckshrk/Weboob
modules/ameli/pages.py
Python
agpl-3.0
5,953
#!/usr/bin/env python from setuptools import setup import os from sbp.version import VERSION CLASSIFIERS = [ 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Operating System :: POSIX :: Linux', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 2.7', ] PACKAGES = [ 'sbp', 'sbp.client', 'sbp.client.drivers', 'sbp.client.loggers', ] PLATFORMS = [ 'linux', 'osx', 'win32', ] PACKAGE_DATA = { 'sbp' : [ 'RELEASE-VERSION', ] } cwd = os.path.abspath(os.path.dirname(__file__)) with open(cwd + '/README.rst') as f: readme = f.read() with open(cwd + '/requirements.txt') as f: INSTALL_REQUIRES = [i.strip() for i in f.readlines()] with open(cwd + '/test_requirements.txt') as f: TEST_REQUIRES = [i.strip() for i in f.readlines()] setup(name='sbp', description='Python bindings for Swift Binary Protocol', long_description=readme, version=VERSION, author='Swift Navigation', author_email='dev@swiftnav.com', url='https://github.com/swift-nav/libsbp', classifiers=CLASSIFIERS, packages=PACKAGES, platforms=PLATFORMS, package_data=PACKAGE_DATA, install_requires=INSTALL_REQUIRES, tests_require=TEST_REQUIRES, use_2to3=False, zip_safe=False)
swift-nav/libsbp
python/setup.py
Python
lgpl-3.0
1,535
import logging class EventError(Exception): """Exception class for event handling errors. Attributes: message -- explanation of the error """ def __init__(self, message): self.message = message class EventHandler: """ Handler of events. Can listen to an REST endpoint, process the received events and invoke the appropriate commands on a CecController object """ def __init__(self, session, config): """ Constructor. :param session: SessionHandler to be used to call commands. :param config: ConfigOptions holding info on how json events are formed etc. :return: None """ self._session = session self._config = config def __enter__(self): self._session.initialize() self._config.read_from_file() return self def __exit__(self, exc_type, exc_val, exc_tb): self._session.cleanup() def listen_for_events(self, event_timeout): """ Listens on the given URL for events and dispatches the type of event to the right function for further processing. Processes one response at a time. If you want to listen indefinitely you must loop outside. :argument event_timeout: Number of seconds for timing when listening. -1 for no timeout. :return: None """ import requests try: if event_timeout is -1: response = requests.get(self._config.rest_url) else: response = requests.get(self._config.rest_url, timeout=event_timeout) except requests.exceptions.Timeout: raise EventError("Request to " + self._config.rest_url + " timed out") # Evaluate successful response (code=200, json, well formed). if response.status_code is self._config.rest_success_code: try: self.process_json_response(response.json()) except EventError as error: raise EventError(self._config.rest_url + " - " + error.message) else: raise EventError("Error: " + self._config.rest_url + " responded with status code: " + str(response.status_code)) def process_json_response(self, json_data): """ Parses the received json as specified in the config, and calls for further process in case of playback events. :param json_data: Received response in json format. :return: None """ logging.debug("Event received:\n---------" + str(json_data) + "\n---------") try: if self._config.events in json_data: for event in json_data[self._config.events]: if self._config.pb_notif in event.keys(): self._process_single_playback_event(event) else: raise EventError("Response malformed, block " + self._config.events + " not found.") except TypeError: raise EventError("Response malformed, TypeError") def _process_single_playback_event(self, event): """ Processes the given playback event and triggers the respective command. Expects the following structure: {str: int}. :param event: type of event to process :return: None """ n_type = event[self._config.pb_notif] if n_type is self._config.pb_notif_active_device: self._session.active(True) elif n_type is self._config.pb_notif_inactive_device: self._session.active(False) elif n_type is self._config.pb_notif_play: self._session.play() elif n_type is self._config.pb_notif_stop or n_type is self._config.pb_notif_pause: self._session.pause(self._config.power_off_delay_mins * 60) else: logging.debug("Type of playback event not recognised.") class ConfigOptions: """ Handles configuration options, including reading from disk (sample_config.ini) """ def __init__(self): self._rest_url = "" self._rest_success_code = 200 # Standard HTTP success response code self._events = "" self._pb_notif = "" self._pb_notif_stop = -1 self._pb_notif_play = -1 self._pb_notif_pause = -1 self._pb_notif_active_device = -1 self._pb_notif_inactive_device = -1 self._power_off_delay_mins = 10 @property def rest_url(self): return self._rest_url @property def rest_success_code(self): return self._rest_success_code @property def events(self): return self._events @property def pb_notif(self): return self._pb_notif @property def pb_notif_stop(self): return self._pb_notif_stop @property def pb_notif_play(self): return self._pb_notif_play @property def pb_notif_pause(self): return self._pb_notif_pause @property def pb_notif_active_device(self): return self._pb_notif_active_device @property def pb_notif_inactive_device(self): return self._pb_notif_inactive_device @property def power_off_delay_mins(self): return self._power_off_delay_mins def read_from_file(self): """ Reads from .config.ini in the same directory the necessary configuration params. """ import configparser config = configparser.ConfigParser() config.optionxform = str # All possible locations for configuration files, in precedence order. import os config_files = [os.path.join("/etc/audio-device-controller", "config.ini"), os.path.join(os.path.expanduser("~/.audio-device-controller"), "config.ini"), os.path.join(os.curdir, "config.ini")] read_files = config.read(config_files) logging.debug("File(s) config.ini found at:" + ", ".join(read_files)) # Check that the parser could read at least one file, and then extract the data. if len(read_files) > 0: self._rest_url = config.get("EventServer", "rest_url", fallback="") self._events = config.get("MediaFormat", "events", fallback="") self._pb_notif = config.get("MediaFormat", "pb_notif", fallback="") self._pb_notif_stop = config.getint("MediaFormat", "pb_notif_stop", fallback=-1) self._pb_notif_play = config.getint("MediaFormat", "pb_notif_play", fallback=-1) self._pb_notif_pause = config.getint("MediaFormat", "pb_notif_pause", fallback=-1) self._pb_notif_active_device = config.getint("MediaFormat", "pb_notif_active_device", fallback=-1) self._pb_notif_inactive_device = config.getint("MediaFormat", "pb_notif_inactive_device", fallback=-1) self._power_off_delay_mins = config.getint("DeviceControl", "power_off_delay_mins", fallback=10) logging.info(self) else: raise ValueError("Failed to open config.ini") def __str__(self): # pragma: no cover """ Returns a string with the current configuration. :return: str """ ret = "".join( ["Configuration options\n=======================", "\nURL: ", self.rest_url, "\nEvents: ", self.events, "\nPB notification: ", self.pb_notif, "\nPB stop: ", str(self.pb_notif_stop), "\nPB play: ", str(self.pb_notif_play), "\nPB pause: ", str(self.pb_notif_pause), "\nPB active device: ", str(self.pb_notif_active_device), "\nPB inactive device: ", str(self.pb_notif_inactive_device), "\nPB power off delay: ", str(self.power_off_delay_mins)]) return ret
inphinitum/audio-device-controller
audio_device_controller/events.py
Python
gpl-2.0
8,110
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-07-06 20:41 from __future__ import unicode_literals from django.db import migrations, connection def _table_exists(db_cursor, tablename): "Returns bool if table exists or not" return tablename in connection.introspection.table_names() class Migration(migrations.Migration): dependencies = [ ('comms', '0014_auto_20170705_1736'), ] db_cursor = connection.cursor() if not _table_exists(db_cursor, "channels.channeldb_db_receivers_players"): # OBS - this is run BEFORE migrations are run! operations = [] else: operations = [ migrations.RemoveField( model_name='channeldb', name='db_subscriptions', # this is now db_account_subscriptions ), migrations.RemoveField( model_name='msg', name='db_receivers_players', ), migrations.RemoveField( model_name='msg', name='db_sender_players', ), ]
feend78/evennia
evennia/comms/migrations/0015_auto_20170706_2041.py
Python
bsd-3-clause
1,091
#Copyright 2009 Humanitarian International Services Group # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. ''' Created July, 2009 BadRequestException and subclasses, all subclass UtakaException with an httpStatus of 400 @author: Andrew ''' from utaka.src.exceptions.UtakaException import UtakaException import utaka.src.Config as Config #400 class BadRequestException(UtakaException): def __init__(self, args): UtakaException.__init__(self, args, 400) class AmbiguousGrantByEmailAddress(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The e-mail address you provided is associated with more than one account.', 'Code' : 'BadRequest'}) class BadDigestException(BadRequestException): def __init__(self, expectedDigest, calculatedDigest): BadRequestException.__init__(self, {'Message' : 'The Content-MD5 you specified did not match what we received', 'ExpectedDigest' : expectedDigest, 'CalculatedDigest' : calculatedDigest, 'Code' : 'BadDigest'}) class CredentialsNotSupported(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'This request does not support credentials', 'Code' : 'CredentialsNotSupported'}) class EntityTooSmallException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your proposed upload is smaller than the minimum allowed object size', 'Code' : 'EntityTooSmall'}) class EntityTooLargeException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your proposed upload exceeds the maximum allowed object size', 'Code' : 'EntityTooLarge'}) class ExpiredTokenException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The provided token has expired.', 'Code' : 'ExpiredToken'}) class IncompleteBodyException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'You did not provide the number of bytes specified by the Content-Length HTTP Header', 'Code' : 'IncompleteBody'}) class IncorrectNumberOfFilesInPostRequestException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'POST requires exactly one file upload per request', 'Code' : 'IncorrectNumberOfFilesInPostRequest'}) class InlineDataTooLargeException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Inline data exceeds the maximum allowed size', 'Code' : 'InlineDataTooLarge'}) class InvalidArgumentException(BadRequestException): def __init__(self, argValue, argName, msg='Invalid Argument'): BadRequestException.__init__(self, {'Message' : msg, 'Code' : 'InvalidArgument', 'ArgumentValue' : argValue, 'ArgumentName' : argName}) class InvalidArgumentAuthorizationException(InvalidArgumentException): def __init__(self, argValue): headerPrefix = str(Config.get('authentication', 'prefix')) InvalidArgumentException.__init__(self, argValue, 'Authorization', ("Authorization header is invalid. Expected " + headerPrefix + " AccessKeyId:signature")) class InvalidArgumentAuthorizationSpacingException(InvalidArgumentException): def __init__(self, argValue): InvalidArgumentException.__init__(self, argValue, 'Authorization', "Authorization header is invalid -- one and only one ' '(space) required") class InvalidArgumentMetadataDirectiveException(InvalidArgumentException): def __init__(self, argValue): InvalidArgumentException.__init__(self, argValue, 'MetadataDirective', 'A specified metadata directive value must be either REPLACE or COPY.') class InvalidArgumentQueryStringConflictException(InvalidArgumentException): def __init__(self, conflictParamA, conflictParamB): InvalidArgumentException.__init__(self, conflictParamA, 'ResourceType', "Conflicting query string parameters: %s and %s" % (str(conflictParamA), str(conflictParamB))) class InvalidBucketNameException(BadRequestException): def __init__(self, bucketName): BadRequestException.__init__(self, {'Message' : 'The specified bucket is not valid', 'Code' : 'InvalidBucketName', 'BucketName' : bucketName}) class InvalidDigestException(BadRequestException): def __init__(self, contentMD5): BadRequestException.__init__(self, {'Message' : 'The Content-MD5 you specified is not valid', 'Code' : 'InvalidDigest', 'Content-MD5' : contentMD5}) class InvalidLocationConstraintException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The specified location constraint is not valid', 'Code' : 'InvalidLocationConstraint'}) class InvalidPolicyDocumentException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The content of the form does not meet the conditions specified in the policy document', 'Code' : 'InvalidPolicyDocument'}) class InvalidSOAPRequestException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The SOAP request body is invalid', 'Code' : 'InvalidSOAPRequest'}) class InvalidStorageClassException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The storage class you specified is not valid', 'Code' : 'InvalidStorageClass'}) class InvalidTargetBucketForLoggingException(BadRequestException): def __init__(self, targetBucket): BadRequestException.__init__(self, {'Message' : 'The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group.', 'Code' : 'InvalidTargetBucketForLogging', 'TargetBucket' : targetBucket}) class InvalidTokenException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The provided token is malformed or otherwise invalid', 'Code' : 'InvalidTokenException'}) class InvalidURIException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : "Couldn't parse the specified URI.", 'Code' : 'InvalidURI'}) class KeyTooLongException(BadRequestException): def __init__(self, args): BadRequestException.__init__(self, {'Message' : 'Your key is too long', 'Code' : 'KeyTooLong'}) class MalformedACLErrorException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' :'The XML you provided was not well-formed or did not validate against our published schema', 'Code' : 'MalformedACL'}) class MalformedPOSTRequestException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The body of your POST request is not well-formed multipart/form-data.', 'Code' : 'MalformedPOSTRequest'}) class MalformedXMLException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The XML you provided was not well-formed or did not validate against our published schema', 'Code' : 'MalformedXML'}) class MaxMessageLengthExceededException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your request was too big', 'Code' : 'MaxMessageLengthExceeded'}) class MaxPostPreDataLengthExceededErrorException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your POST request fields preceding the upload file were too large.', 'Code' : 'MaxPostPreDataLengthExceededError'}) class MetadataTooLargeException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your metadata eaders exceed the maximum allowed metadata size.', 'Code' : 'MetadataTooLarge'}) class MissingRequestBodyErrorException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Request body is empty', 'Code' : 'MissingRequestBodyError'}) class MissingSecurityElementException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The SOAP 1.1 request is missing a security element', 'Code' : 'MissingSecurityElement'}) class MissingSecurityHeaderException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your request was missing a required header', 'Code' : 'MissingSecurityHeader'}) class MissingHeaderException(BadRequestException): def __init__(self, header, headerDescription): BadRequestException.__init__(self, {'Message' : 'Your request was missing a required header', 'Code' : 'MissingHeader', 'Header' : header, 'HeaderDescription' : headerDescription}) class NoLoggingStatusForKeyException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'There is no such thing as a logging status sub-resource for a key', 'Code' : 'NoLoggingStatusForKey'}) class RequestIsNotMultiPartContentException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Bucket POST must be of the enclosure-type multipart/form-data.', 'Code' : 'RequestIsNotMultiPartContent'}) class RequestTimeoutException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Your socket connection to the server was not read from or written to within the timeout period', 'Code' : 'RequestTimeout'}) class RequestTorrentOfBucketErrorException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'Requesting the torrent file of a bucket is not permitted', 'Code' : 'RequestTorrentOfBucketError'}) class TokenRefreshRequiredException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The provided token must be refreshed', 'Code' : 'TokenRefreshRequired'}) class TooManyBucketsException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'You have attempted to create more buckets than allowed', 'Code' : 'TooManyBuckets'}) class UnexpectedContentException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'This request does not support content', 'Code' : 'UnexpectedContent'}) class UnresolvableGrantByEmailAddressException(BadRequestException): def __init__(self, email): BadRequestException.__init__(self, {'Message' : 'The e-mail address you provided does not match any account on record', 'Code' : 'UnresolvableGrantByEmailAddress', 'E-mail' : email}) class UserKeyMustBeSpecifiedException(BadRequestException): def __init__(self): BadRequestException.__init__(self, {'Message' : 'The bucket POST must contain the specified field name. If it is specified, please check the order of the fields.', 'Code' : 'UserKeyMustBeSpecified'}) class UseridNotValidException(BadRequestException): def __init__(self, userid): BadRequestException.__init__(self, {'Message' : 'Userid should be a positive integer greater than 2.', 'Code' : 'UseridNotValid', 'Userid' : userid}) class UseridNotFoundException(BadRequestException): def __init__(self, userid): BadRequestException.__init__(self, {'Code' : 'UseridNotFound', 'Description' : 'The userid you provided was not found', 'Userid' : userid})
mattmillr/utaka
src/exceptions/BadRequestException.py
Python
apache-2.0
11,951
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Device registration helpers for the Google Assistant API.""" import json import os import uuid import google.auth.transport.requests import aiy.assistant.auth_helpers _DEVICE_MODEL = "voice-kit" _DEVICE_MANUFACTURER = "AIY Projects" _DEVICE_NAME = "Voice Kit" _DEVICE_TYPE = "action.devices.types.LIGHT" _DEVICE_ID_FILE = os.path.join( aiy.assistant.auth_helpers._VR_CACHE_DIR, 'device_id.json') def _get_project_id(): with open(aiy.assistant.auth_helpers._ASSISTANT_CREDENTIALS_FILE) as f: client_secrets_data = json.load(f) return client_secrets_data["installed"]["project_id"] def _get_api_url(*args): return "/".join( ("https://embeddedassistant.googleapis.com/v1alpha2/projects",) + args) def _load_ids(id_path): with open(id_path, 'r') as f: id_data = json.load(f) return id_data["model_id"], id_data["device_id"] def _save_ids(id_path, model_id, device_id): if not os.path.exists(os.path.dirname(id_path)): os.makedirs(os.path.dirname(id_path)) id_data = { "model_id": model_id, "device_id": device_id, } with open(id_path, 'w') as f: json.dump(id_data, f) def _get_model_id(credentials, session, project_id): model_id = "%s-%s" % (project_id, _DEVICE_MODEL) payload = { "device_model_id": model_id, "project_id": project_id, "device_type": _DEVICE_TYPE, "manifest": { "manufacturer": _DEVICE_MANUFACTURER, "product_name": _DEVICE_NAME, }, } r = session.post(_get_api_url(project_id, "deviceModels"), data=json.dumps(payload)) # Ignore 409, which means we've already created the model ID. if r.status_code != 409: r.raise_for_status() return model_id def get_ids(credentials, model_id=None): """get_ids gets a Device ID for use with the Google Assistant SDK. It optionally also gets a Device Model ID if one is not given. The IDs are cached on disk so that a device keeps a consistent ID. Returns: a tuple: (model_id, device_id) """ if os.path.exists(_DEVICE_ID_FILE): return _load_ids(_DEVICE_ID_FILE) session = google.auth.transport.requests.AuthorizedSession(credentials) project_id = _get_project_id() model_id = model_id or _get_model_id(credentials, session, project_id) device_id = "%s-%s" % (model_id, uuid.uuid4()) # We can hardcode client_type as SDK_SERVICE, because the Assistant Library # creates its own device_id. payload = { "id": device_id, "model_id": model_id, "client_type": "SDK_SERVICE", } r = session.post(_get_api_url(project_id, "devices"), data=json.dumps(payload)) r.raise_for_status() _save_ids(_DEVICE_ID_FILE, model_id, device_id) return model_id, device_id if __name__ == "__main__": credentials = aiy.assistant.auth_helpers.get_assistant_credentials() print("ids:", get_ids(credentials))
t1m0thyj/aiyprojects-raspbian
src/aiy/assistant/device_helpers.py
Python
apache-2.0
3,681
# -*- coding: utf-8 -*- """ Logging module for the remote control """ import logging REMOTE_LOG = logging.getLogger("REMOTE_LOG")
LoiX07/photobooth
tools/remote_log.py
Python
gpl-3.0
132
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2015 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. from sqlalchemy import Integer, Boolean, Float, UnicodeText, Text from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.dialects.postgresql import JSONB, ARRAY from sqlalchemy.ext.mutable import MutableList from pybossa.core import db from pybossa.model import DomainObject, make_timestamp from pybossa.model.task_run import TaskRun class Task(db.Model, DomainObject): '''An individual Task which can be performed by a user. A Task is associated to a project. ''' __tablename__ = 'task' #: Task.ID id = Column(Integer, primary_key=True) #: UTC timestamp when the task was created. created = Column(Text, default=make_timestamp) #: Project.ID that this task is associated with. project_id = Column(Integer, ForeignKey('project.id', ondelete='CASCADE'), nullable=False) #: Task.state: ongoing or completed. state = Column(UnicodeText, default=u'ongoing') quorum = Column(Integer, default=0) #: If the task is a calibration task calibration = Column(Integer, default=0) #: Priority of the task from 0.0 to 1.0 priority_0 = Column(Float, default=0) #: Task.info field in JSON with the data for the task. info = Column(JSONB) #: Number of answers to collect for this task. n_answers = Column(Integer, default=30) #: Array of User IDs that favorited this task fav_user_ids = Column(MutableList.as_mutable(ARRAY(Integer))) task_runs = relationship(TaskRun, cascade='all, delete, delete-orphan', backref='task') def pct_status(self): """Returns the percentage of Tasks that are completed""" if self.n_answers != 0 and self.n_answers is not None: return float(len(self.task_runs)) / self.n_answers else: # pragma: no cover return float(0)
PyBossa/pybossa
pybossa/model/task.py
Python
agpl-3.0
2,582
#!/usr/bin/python # # The Crawler # from __future__ import with_statement from contextlib import closing from HTMLParser import HTMLParser import gzip import shelve import math import sys import urlmanager import re #class myParser(HTMLParser): class WebNode(HTMLParser): words = dict() wc = 0 title = '' settitle = False neighborlist = dict() setneighbor = '' def handle_starttag(self,tag, attrs): realtag = tag.lower() if realtag == "title" : # and ('type', 'story') in attrs: self.settile = True elif realtag == "a": for (x, y) in attrs: if x == "href": self.setneighbor = y self.neighborlist[self.setneighbor] = '' def handle_endtag(self, tag): pass def handle_data(self,data): if self.settitle: self.title = data self.settitle = False if self.setneighbor: self.neighborlist[self.setneighbor] = data self.setneighbor = '' dwords = data.upper().split() for ew in dwords: ew = ew if ew.isalpha() else ''.join(re.split('\W|\d', ew)) self.wc += 1 try: self.words[ew] += 1 except: self.words[ew] = 1 def readself(self, url): """Just read the content of the URL and feed it to the Parser, which is self""" urlf = urlmanager.openurl(url) if urlf: mycontent = urlf.read() try: self.feed(mycontent) return True except: return False tfs = dict() # Better create a shelf def docalculations(self): """Calculate the tf values for each word in the WebNode/Page""" for (ew, occ) in self.words.iteritems(): self.tfs[ew] = occ * 2**32 * 1.0 / self.wc self.words.clear() # We don't need it anymore. Do we? def prepareneighborlist(self): urlp = re.compile('(http:\/\/.+)(.+\.(htm.?|com|info|de|php|stm))?(\/)?', re.IGNORECASE) # urltp = re.compile('<a (?:.*?)href="(\S*?)">(.*?)</a>', re.DOTALL) stem = urlp.match(self._name).group(1) if stem[-1] != '/': stem += '/' for en in self.neighborlist.keys(): if not urlp.match(en): self.neighborlist[stem + en] = self.neighborlist[en] self.neighborlist.pop(en) def setup(self, name, alias=''): print "Opening ", name, "as ", alias self._name = name self.title = alias self.tfs = {} if self.readself(name): self.docalculations() self.prepareneighborlist() # print len(self.tfs) return self.neighborlist def __str__(self): return repr(self._name) def add_neighbor(self, j, jdescri=''): self.neighborlist[j] = jdescri import os class WebGraph(object): def __init__(self, levels=999): # Adjust it to make infinity self.nodes = dict() self.levels = levels def add_node(self, i, alias='', level=0): if level == self.levels: return False if i not in self.nodes: self.nodes[i] = WebNode() neighborstobe = self.nodes[i].setup(i, alias) for en, ena in neighborstobe.items(): if self.add_node(en, ena, level+1): self.add_edge(i, en) # tfidfs = dict() def collect_words(self): D = len(self.nodes) for node in self.nodes.itervalues(): nname = node._name for wd, octfs in node.tfs.iteritems(): if wd in self._tfidf: x = self._tfidf[wd] + list([(octfs, nname)]) self._tfidf[wd] = x else: self._tfidf[wd] = list([(octfs, nname)]) def compute_tfidf(self): D = len(self.nodes) for wd, item in self._tfidf.iteritems(): Dext, Rext = self.tfidfs.get(wd, [0, []]) Di = len(item) + Dext idf = math.log(D*1.0/Di) # check this Rst = [ (x[0] * idf, x[1]) for x in item] + Rext Rst.sort() Rst.reverse() self.tfidfs[wd] = [Di, Rst] def create_tfidf(self): self._tfidf = shelve.open(os.tmpnam()) self.collect_words() self.tfidfs = shelve.open('.tfidfsfileforcrawler') self.compute_tfidf() self._tfidf.close() def add_edge(self, i, j, ialias='', jalias=''): if i not in self.nodes: self.add_node(i, self.levels-1) if j not in self.nodes: self.add_node(j, self.levels-1) self.nodes[i].add_neighbor(self.get_node(j)) def get_node(self, i): return self.nodes.get(i, None) def has_edge(self, i, j): return j in self.nodes.get(i, WebNode('')).neighbors # One might want to look up the ``get'' method of dicts in # python. This was not disussed in the lecture. def nodes(self): return self.nodes.keys() def edges(self): for i, iN in self.nodes.iteritems(): for j in iN.neighbors: yield (i, j.name) # The graph shall be created by specifying the default level of # recursion. It would have been better to move it to each page's # preference. Well so it is. And edges could be added and tfidf would be # updated (BUG: TODO: if a page comes a second time, it will get added # again to the list of pages for every word in it - easy to debug, but # lazy. And after all the operations, the shelf is created and could be # closed. Next time you open the thing, it will still be ready for use. # MAKE SURE TO CALL Graph.tfidfs.close() at the end. import sys print "Creating WebGraph.....", G = WebGraph(1)#int(sys.argv[1])) print "Created." G.add_node("http://news.bbc.co.uk", "BBC") #G.add_node(sys.argv[2], sys.argv[3]) G.create_tfidf() for x,y in G.tfidfs.iteritems(): print x, "-", y G.tfidfs.close()
sillyfellow/interesting_problems
Python/tfidf/crawler.py
Python
gpl-2.0
6,085
# Rekall Memory Forensics # Copyright (c) 2008-2011 Volatile Systems # Copyright 2013 Google Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # """ @author: Bradley L Schatz @license: GNU General Public License 2.0 or later @contact: bradley@schatzforensic.com.au This file provides support for windows Windows 7 SP 0. """ # pylint: disable=protected-access from rekall import addrspace from rekall import kb from rekall import obj from rekall import utils from rekall.plugins.overlays.windows import common def TagOffset(x): if x.obj_profile.metadata("arch") == "AMD64": return x.obj_offset - 12 return x.obj_offset - 4 # In windows 7 the VadRoot is actually composed from _MMADDRESS_NODEs instead of # _MMVAD structs. win7_overlays = { '_EPROCESS': [None, { # A symbolic link to the real vad root. 'RealVadRoot': lambda x: x.VadRoot.BalancedRoot }], '_MMADDRESS_NODE': [None, { 'Tag': [TagOffset, ['String', dict(length=4)]], }], '_MMVAD_SHORT': [None, { 'Tag': [TagOffset, ['String', dict(length=4)]], 'Start': lambda x: x.StartingVpn << 12, 'End': lambda x: ((x.EndingVpn + 1) << 12) - 1, 'Length': lambda x: x.End - x.Start + 1, 'CommitCharge': lambda x: x.u.VadFlags.CommitCharge, }], '_MMVAD': [None, { 'Tag': [TagOffset, ['String', dict(length=4)]], 'ControlArea': lambda x: x.Subsection.ControlArea, 'Start': lambda x: x.StartingVpn << 12, 'End': lambda x: ((x.EndingVpn + 1) << 12) - 1, 'Length': lambda x: x.End - x.Start + 1, 'CommitCharge': lambda x: x.u.VadFlags.CommitCharge, }], '_MMVAD_LONG': [None, { 'Tag': [TagOffset, ['String', dict(length=4)]], 'ControlArea': lambda x: x.Subsection.ControlArea, 'Start': lambda x: x.StartingVpn << 12, 'End': lambda x: ((x.EndingVpn + 1) << 12) - 1, 'Length': lambda x: x.End - x.Start + 1, 'CommitCharge': lambda x: x.u.VadFlags.CommitCharge, }], "_CONTROL_AREA": [None, { 'FilePointer': [None, ['_EX_FAST_REF', dict( target="_FILE_OBJECT" )]], }], "_OBJECT_HEADER": [None, { "InfoMask": [None, ["Flags", dict( maskmap=utils.Invert({ 0x01: "CreatorInfo", 0x2: "NameInfo", 0x4: "HandleInfo", 0x8: "QuotaInfo", 0x10: "ProcessInfo", 0x20: "AuditInfo", 0x40: "PaddingInfo", }), target="unsigned char", )]], }], '_MM_SESSION_SPACE': [None, { # Specialized iterator to produce all the _IMAGE_ENTRY_IN_SESSION # records. 'ImageIterator': lambda x: x.ImageList.list_of_type( "_IMAGE_ENTRY_IN_SESSION", "Link") }], '_IMAGE_ENTRY_IN_SESSION': [None, { 'ImageBase': lambda x: x.Address.v() & ~7 }], } class _OBJECT_HEADER(common._OBJECT_HEADER): """A Rekall Memory Forensics object to handle Windows 7 object headers. Windows 7 changes the way objects are handled: References: http://www.codemachine.com/article_objectheader.html The headers look like this: _POOL_HEADER # These are optional headers: _OBJECT_HEADER_PROCESS_INFO _OBJECT_HEADER_QUOTA_INFO _OBJECT_HEADER_HANDLE_INFO _OBJECT_HEADER: ..... InfoMask .... When the object manager wants to access a specific optional header, it can use the constant lookup table nt!ObpInfoMaskToOffset to quickly calculate the offset of that header (The headers always appear in the same order): table = profile.get_constant_object( "ObpInfoMaskToOffset", target="Array", target_args=dict( target="byte" count=0x80 ) ) option_header_offset = table[ OBJECT_HEADER->InfoMask & (DesiredHeaderBit | (DesiredHeaderBit-1))] """ # This specifies the order the headers are found below the # _OBJECT_HEADER. It is obtained using "nt!ObpInfoMaskToOffset" which is a # lookup table. optional_header_mask = ( ('CreatorInfo', '_OBJECT_HEADER_CREATOR_INFO', 0x01), ('NameInfo', '_OBJECT_HEADER_NAME_INFO', 0x02), ('HandleInfo', '_OBJECT_HEADER_HANDLE_INFO', 0x04), ('QuotaInfo', '_OBJECT_HEADER_QUOTA_INFO', 0x08), ('ProcessInfo', '_OBJECT_HEADER_PROCESS_INFO', 0x10), ('AuditInfo', '_OBJECT_HEADER_AUDIT_INFO', 0x20), ('PaddingInfo', '_OBJECT_HEADER_PADDING_INFO', 0x40), ) def _GetOptionalHeader(self, struct_name, desired_bit): if not self.InfoMask & desired_bit: return obj.NoneObject("Header not set") lookup = self.obj_session.GetParameter("ObpInfoMaskToOffset") offset = lookup[self.InfoMask & (desired_bit | (desired_bit - 1))] return self.obj_profile.Object( struct_name, offset=self.obj_offset - offset, vm=self.obj_vm, parent=self) def get_object_type(self, vm=None): """Return the object's type as a string.""" return self.obj_session.GetParameter("ObjectTypeMap")[ self.TypeIndex].Name.v() @utils.safe_property def TypeIndex(self): """In windows 10 the type index is obfuscated. Windows 10 obfuscates the object type using a cookie: ------ nt!ObpRemoveObjectRoutine ------: 0xf801a628e7e0 0xf801a628e7e0 MOV [RSP+0x10], RBX 0xf801a628e7e5 MOV [RSP+0x18], RBP 0xf801a628e7ea MOV [RSP+0x20], RSI 0xf801a628e7ef PUSH RDI 0xf801a628e7f0 SUB RSP, 0x50 0xf801a628e7f4 MOV RBX, RCX // RCX is object header. 0xf801a628e7f7 LEA RDI, [RIP-0x48f1e] 0x0 nt!ObTypeIndexTable 0xf801a628e7fe MOV RAX, RCX 0xf801a628e801 MOVZX ESI, DL 0xf801a628e804 SHR RAX, 0x8 // Shift address by 8 0xf801a628e808 MOVZX ECX, AL 0xf801a628e80b MOVZX EAX, BYTE [RBX+0x18] // _OBJECT_HEADER.TypeIndex 0xf801a628e80f XOR RCX, RAX // XOR with object type 0xf801a628e812 MOVZX EAX, BYTE [RIP-0x493ed] 0x1dd4015af55 nt!ObHeaderCookie 0xf801a628e819 XOR RCX, RAX // XOR with cookie 0xf801a628e81c MOV RDI, [RDI+RCX*8] // Dereference table. """ cookie = self.obj_profile.get_constant_object( "ObHeaderCookie", target="byte").v() # Windows 7 has no cookie. if cookie == None: return self.m("TypeIndex") # Windows 10 xors the virtual address into this field so we need to use # the virtual address to decode it. # We are operating on the physical address space. We need to find the # virtual address. if self.obj_vm.metadata("image"): # Resolve the virtual address for this physical address. resolver = self.obj_session.GetParameter( "physical_address_resolver") vaddr, _ = resolver.PA2VA_for_DTB( self.obj_offset, self.obj_session.GetParameter("dtb"), userspace=False) # This hit does not exist in the kernel Address Space. if vaddr is None: return 0 else: vaddr = self.obj_offset return ((vaddr >> 8) ^ cookie ^ int(self.m("TypeIndex"))) & 0xFF def is_valid(self): """Determine if the object makes sense.""" # These need to be reasonable. pointer_count = int(self.PointerCount) if pointer_count > 0x100000 or pointer_count < 0: return False handle_count = int(self.HandleCount) if handle_count > 0x1000 or handle_count < 0: return False # Must be one of the types revealed by the object_types plugins. if self.TypeIndex >= 50 or self.TypeIndex < 1: return False return True # Build properties for the optional headers for _name, _y, _z in _OBJECT_HEADER.optional_header_mask: setattr(_OBJECT_HEADER, _name, property( lambda x, y=_y, z=_z: x._GetOptionalHeader(y, z))) class _MMADDRESS_NODE(common.VadTraverser): """In win7 the base of all Vad objects is _MMADDRESS_NODE. The Vad structures can be either _MMVAD_SHORT or _MMVAD or _MMVAD_LONG. At the base of each struct there is an _MMADDRESS_NODE which contains the LeftChild and RightChild members. In order to traverse the tree, we follow the _MMADDRESS_NODE and create the required _MMVAD type at each point depending on their tags. """ ## The actual type depends on this tag value. tag_map = {'Vadl': '_MMVAD_LONG', 'VadS': '_MMVAD_SHORT', 'Vad ': '_MMVAD', 'VadF': '_MMVAD_SHORT', 'Vadm': '_MMVAD_LONG', } class _POOL_HEADER(common._POOL_HEADER): """A class for pool headers""" MAX_PREAMBLE_SIZE = 0x50 @utils.safe_property def NonPagedPool(self): return self.PoolType.v() % 2 == 0 and self.PoolType.v() > 0 @utils.safe_property def PagedPool(self): return self.PoolType.v() % 2 == 1 @utils.safe_property def FreePool(self): return self.PoolType.v() == 0 # A class cached version of the lookup map. This is mutable and shared # between all instances. lookup = {} def _BuildLookupTable(self): """Create a fast lookup table mapping InfoMask -> minimum_offset. We are interested in the maximum distance between the _POOL_HEADER and _OBJECT_HEADER. This is dictated by the InfoMask field. Here we build a quick lookup table between the InfoMask field and the offset of the first optional header. """ ObpInfoMaskToOffset = self.obj_session.GetParameter( "ObpInfoMaskToOffset") self.lookup["\x00"] = 0 # Iterate over all the possible InfoMask values (Bytes can take on 256 # values). for i in range(0x100): # Locate the largest offset from the start of # _OBJECT_HEADER. Starting with the largest bit position 1 << 7. bit_position = 0x80 while bit_position > 0: # This is the optional header with the largest offset. if bit_position & i: self.lookup[chr(i)] = ObpInfoMaskToOffset[ i & (bit_position | (bit_position - 1))] break bit_position >>= 1 def IterObject(self, type=None, freed=True): """Generates possible _OBJECT_HEADER accounting for optional headers. Note that not all pool allocations have an _OBJECT_HEADER - only ones allocated from the the object manager. This means calling this method depends on which pool allocation you are after. On windows 8, pool allocations are done from preset sizes. This means that the allocation is never exactly the same size and we can not use the bottom up method like before. We therefore, have to build the headers forward by checking the preamble size and validity of each object. This is a little slower than with earlier versions of windows. Args: type: The object type name. If not specified we return all objects. """ pool_align = self.obj_profile.get_constant("PoolAlignment") allocation_size = self.BlockSize * pool_align # Operate on a cached version of the next page. # We use a temporary buffer for the object to save reads of the image. start = self.obj_end cached_data = self.obj_vm.read(start, allocation_size) cached_vm = addrspace.BufferAddressSpace( base_offset=start, data=cached_data, session=self.obj_session) # We search for the _OBJECT_HEADER.InfoMask in close proximity to our # object. We build a lookup table between the values in the InfoMask and # the minimum distance there is between the start of _OBJECT_HEADER and # the end of _POOL_HEADER. This way we can quickly skip unreasonable # values. # This is the offset within _OBJECT_HEADER of InfoMask. info_mask_offset = self.obj_profile.get_obj_offset( "_OBJECT_HEADER", "InfoMask") # Build the cache if needed. if not self.lookup: self._BuildLookupTable() # Walk over all positions in the address space and try to fit an object # header there. for i in utils.xrange(start, start + allocation_size - info_mask_offset, pool_align): possible_info_mask = cached_data[i - start + info_mask_offset] #if possible_info_mask > '\x7f': # continue # The minimum amount of space needed before the object header to # hold all the optional headers. minimum_offset = self.lookup[possible_info_mask] # Obviously wrong because we need more space than we have. if minimum_offset > i - start: continue # Create a test object header from the cached vm to test for # validity. test_object = self.obj_profile._OBJECT_HEADER( offset=i, vm=cached_vm) if test_object.is_valid(): if (type is None or test_object.get_object_type() == type or # Freed objects point to index 1 #(which is also 0xbad0b0b0). (freed and test_object.TypeIndex <= 2)): yield test_object class ObjectTypeMapHook(kb.ParameterHook): """Get and cache the object type map. In windows 7, rather than store a pointer to the _OBJECT_TYPE object directly, there is a global table of object types, and the object simply stores an index to it. """ name = "ObjectTypeMap" def calculate(self): return self.session.profile.get_constant_object( "ObTypeIndexTable", target="Array", target_args=dict( target="Pointer", target_args=dict( target="_OBJECT_TYPE" ) ) ) def InitializeWindows7Profile(profile): profile.add_overlay(win7_overlays) profile.add_classes( _OBJECT_HEADER=_OBJECT_HEADER, _MMADDRESS_NODE=_MMADDRESS_NODE, _POOL_HEADER=_POOL_HEADER, )
rlugojr/rekall
rekall-core/rekall/plugins/overlays/windows/win7.py
Python
gpl-2.0
15,295
#!/usr/bin/env python3 ''' test_asset_mapper ---------------------------------- Tests for `asset_mapper` module. ''' import unittest import subprocess import sys import os from os import path from bs4 import BeautifulSoup sys.path.append(path.join(path.dirname(__file__), '..')) from ramlpreparer.builders.asset_mapper import map_the_assets class AssetMapperTestCase(unittest.TestCase): ''' Tests for the asset_mapper methods ''' def test_map_the_assets_asset_offset_pass(self): ''' Does the mapper successfully map the new path? ''' tool_time12_path = path.join( os.getcwd(), 'tests', 'dest', 'assets', 'tool_time12.jpg') tool_time_path = path.join( os.getcwd(), 'tests', 'dest', 'assets', 'tool_time.jpg') expected_version = {tool_time12_path: 3980, tool_time_path: 663} source_assets = path.join(os.getcwd(), 'tests', 'src', 'assets', '') dest_assets = path.join(os.getcwd(), 'tests', 'dest', 'assets', '') x, mapped_version = map_the_assets( source_assets, dest_assets, html_doc_path='tests/src/tester-raw.html') self.assertEqual(expected_version, mapped_version) if __name__ == '__main__': unittest.main()
nimbinatus/deconst-raml-preparer
tests/test_asset_mapper.py
Python
apache-2.0
1,256
from sympy import Expr, Symbol from sympy.core.decorators import call_highest_priority class Higher(Expr): _op_priority = 20.0 result = 'high' @call_highest_priority('__rmul__') def __mul__(self, other): return self.result @call_highest_priority('__mul__') def __rmul__(self, other): return self.result @call_highest_priority('__radd__') def __add__(self, other): return self.result @call_highest_priority('__add__') def __radd__(self, other): return self.result @call_highest_priority('__rsub__') def __sub__(self, other): return self.result @call_highest_priority('__sub__') def __rsub__(self, other): return self.result @call_highest_priority('__rpow__') def __pow__(self, other): return self.result @call_highest_priority('__pow__') def __rpow__(self, other): return self.result @call_highest_priority('__rdiv__') def __div__(self, other): return self.result @call_highest_priority('__div__') def __rdiv__(self, other): return self.result class Lower(Higher): _op_priority = 5.0 result = 'low' def test_mul(): x = Symbol('x') h = Higher() l = Lower() assert l*h == h*l == 'high' assert x*h == h*x == 'high' assert l*x == x*l != 'low' def test_add(): x = Symbol('x') h = Higher() l = Lower() assert l+h == h+l == 'high' assert x+h == h+x == 'high' assert l+x == x+l != 'low' def test_sub(): x = Symbol('x') h = Higher() l = Lower() assert l-h == h-l == 'high' assert x-h == h-x == 'high' assert l-x == -(x-l) != 'low' def test_pow(): x = Symbol('x') h = Higher() l = Lower() assert l**h == h**l == 'high' assert x**h == h**x == 'high' assert l**x != 'low' assert x**l != 'low' def test_div(): x = Symbol('x') h = Higher() l = Lower() #FIXME-py3k: AssertionError assert l/h == h/l == 'high' assert x/h == h/x == 'high' assert l/x != 'low' assert x/l != 'low'
GbalsaC/bitnamiP
venv/lib/python2.7/site-packages/sympy/core/tests/test_priority.py
Python
agpl-3.0
2,085
""" This module contains the function to generate core-shell A@B nanoparticle with FCC structure. """ from __future__ import print_function import random import copy from math import sqrt from ase.neighborlist import NeighborList from ase.cluster.cubic import FaceCenteredCubic import numpy as np from qsar import QSAR def sphericalFCC(elem, latticeconstant, nlayers): r""" Geneartes spherical cluster of atoms of FCC metal Parameters ---------- elem: string symbol of chemical element. lattice constant: float lattice constant in Angstr. nlayers: number of atomic layers passed to FaceCenteredCubic. Guards the radius of cluster Returns ------- ase.Atoms object Example -------- >>> atoms = sphericalFCC('Ag', 4.09, 8) """ # 1. generate cubical cluster surfaces = [(1, 0, 0)] layers = [nlayers] atoms = FaceCenteredCubic(elem, surfaces, layers, latticeconstant) atoms.center() # 2. cut al lextra atom from cube to make it spherical Xmin = atoms.positions[:, 0].min() Xmax = atoms.positions[:, 0].max() C = (Xmin + Xmax) / 2.0 R = (Xmax - Xmin) / 2.0 ia = 0 while ia < len(atoms): x2 = (atoms.positions[ia, 0] - C)**2 y2 = (atoms.positions[ia, 1] - C)**2 z2 = (atoms.positions[ia, 2] - C)**2 if (x2 + y2 + z2) > R**2: del atoms[ia] else: ia += 1 return atoms def cut_spherical_cluster(atoms, size): r""" Cuts spherical cluster from provided atoms object Parameters ---------- atoms: ASE.Atoms object the original cluster to be cut off size: float the diameter of resulting cluster, in Angstrom Returns ------- ase.Atoms object of resulted cluster Example -------- >>> atoms = cut_spherical_cluster(atoms, 10) # 1nm cluster """ # atoms = copy.copy(atoms) # keep original atoms uncentered # atoms.center() Xmin = np.min(atoms.positions[:, 0]) Xmax = np.max(atoms.positions[:, 0]) Ymin = np.min(atoms.positions[:, 1]) Ymax = np.max(atoms.positions[:, 1]) Zmin = np.min(atoms.positions[:, 2]) Zmax = np.max(atoms.positions[:, 2]) Cx = (Xmin + Xmax) / 2.0 Cy = (Ymin + Ymax) / 2.0 Cz = (Zmin + Zmax) / 2.0 R = size/2.0 # radius of cluster dists = np.sum((atoms.get_positions() - np.array([Cx, Cy, Cz]))**2, 1) rem = np.nonzero(dists > R**2)[0] if len(rem) > 0: del atoms[rem] else: print('Warning: no atoms were deleted by cut_spherical_cluster()') return atoms def cut_elliptical_cluster(atoms, Dx, Dy, Dz): r""" Cuts 3D ellipsiodal cluster cluster from provided atoms object Parameters ---------- atoms: ASE.Atoms object the original cluster to be cut off Dx, Dy, Dz: float ellipse paramters, in Angstrom Returns ------- ase.Atoms object of resulted cluster Example -------- >>> atoms = cut_elliptical_cluster(atoms, 10, 10, 5) # """ #~ atoms = copy.copy(atoms) # keep original atoms uncentered atoms.center() Xmin = np.min(atoms.positions[:, 0]) Xmax = np.max(atoms.positions[:, 0]) Ymin = np.min(atoms.positions[:, 1]) Ymax = np.max(atoms.positions[:, 1]) Zmin = np.min(atoms.positions[:, 2]) Zmax = np.max(atoms.positions[:, 2]) Cx = (Xmin+Xmax)/2.0 Cy = (Ymin+Ymax)/2.0 Cz = (Zmin+Zmax)/2.0 R = np.array([Dx/2.0, Dy/2.0, Dz/2.0]) dists = np.sum(((atoms.get_positions() - np.array([Cx,Cy,Cz]))/R)**2, 1) rem = np.nonzero(dists > 1)[0] if len(rem) > 0: del atoms[rem] else: print('Warning: no atoms were deleted by cut_spherical_cluster()') return atoms def CoreShellFCC(atoms, type_a, type_b, ratio, a_cell, n_depth=-1): r"""This routine generates cluster with ideal core-shell architecture, so that atoms of type_a are placed on the surface and atoms of type_b are forming the core of nanoparticle. The 'surface' of nanoparticle is defined as atoms with unfinished coordination shell. Parameters ---------- atoms: ase.Atoms ase Atoms object, containing atomic cluster. type_a: string Symbol of chemical element to be placed on the shell. type_b: string Symbol of chemical element to be placed in the core. ratio: float Guards the number of shell atoms, type_a:type_b = ratio:(1-ratio) a_cell: float Parameter of FCC cell, in Angstrom. Required for calculation of neighbor distances in for infinite crystal. n_depth: int Number of layers of the shell formed by atoms ratio. Default value -1 is ignored and n_depth is calculated according ratio. If n_depth is set then value of ratio is ignored. Returns ------- Function returns ASE atoms object which contains bimetallic core-shell cluster Notes ----- The criterion of the atom beeing on the surface is incompletnes of it's coordination shell. For the most outer atoms the first coordination shell will be incomplete (coordination number is less then 12 for FCC), for the second layer -- second coordination shell( CN1 + CN2 < 12 + 6) and so on. In this algorithm each layer is tagged by the number ('depth'), take care if used with other routines dealing with tags (add_adsorbate etc). First, atoms with unfinished first shell are replaced by atoms type_a, then second, and so on. The last depth surface layer is replaced by random to maintain given ratio value. Example -------- >>> atoms = FaceCenteredCubic('Ag', [(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09) >>> atoms = CoreShellFCC(atoms, 'Pt', 'Ag', 0.6, 4.09) >>> view(atoms) """ # 0 < ratio < 1 target_x = ratio if n_depth != -1: target_x = 1 # needed to label all needed layeres def fill_by_tag(atoms, chems, tag): """Replaces all atoms within selected layer""" for i in range(0, len(atoms)): if atoms[i].tag == tag: chems[i] = type_a return # coord numbers for FCC: coord_nums = [1, 12, 6, 24, 12, 24, 8, 48, 6, 36, 24, 24, 24, 72, 48, 12, 48, 30, 72, 24] # coordination radii obtained from this array as R = sqrt(coord_radii)*a/2 coord_radii = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 30, 32, 34, 36, 38, 40] ## generate FCC cluster ## #atoms = FaceCenteredCubic(type_b, surfaces, layers, a_cell) n_atoms = len(atoms) ## tag layers ## positions = [0] # number of positions in layer n_tag = 0 # number of tags to check if there is enought layers n_shell = 0 # depth of the shell while (n_tag < n_atoms * target_x): n_shell += 1 if (n_depth != -1)and(n_shell > n_depth): break neiblist = NeighborList( [ a_cell / 2.0 * sqrt(coord_radii[n_shell]) / 2.0 + 0.0001 ] * n_atoms, self_interaction=False, bothways=True ) neiblist.update(atoms) for i in range(0, n_atoms): indeces, offsets = neiblist.get_neighbors(i) if (atoms[i].tag == 0): if (len(indeces) < sum(coord_nums[1:n_shell + 1])): # coord shell is not full -> atom is on surface! atoms[i].tag = n_shell n_tag += 1 # save the count of positions at each layer: positions.append(n_tag - sum(positions[0:n_shell])) ## populate layers ## chems = atoms.get_chemical_symbols() n_type_a = 0 # number of changes B -> A if (n_tag < n_atoms * target_x)and(n_depth == -1): # raise exception? return None else: n_filled = n_shell - 1 # number of totally filled layers ilayer = 1 while (ilayer < n_filled + 1): fill_by_tag(atoms, chems, ilayer) n_type_a += positions[ilayer] ilayer += 1 while (n_type_a < n_atoms * target_x)and(n_depth == -1): i = random.randint(0, n_atoms - 1) if (atoms[i].tag == n_shell): if (chems[i] == type_b): chems[i] = type_a n_type_a += 1 atoms.set_chemical_symbols(chems) ## check number of atoms ## checkn_a = 0 for element in chems: if element == type_a: checkn_a += 1 assert n_type_a == checkn_a return atoms def CoreShellCN(atoms, type_a, type_b, ratio, R_min = 1.5, CN_max=12, n_depth=-1): r"""This routine generates cluster with ideal core-shell architecture, so that atoms of type_a are placed on the surface and atoms of type_b are forming the core of nanoparticle. The 'surface' of nanoparticle is defined as atoms with unfinished first coordination shell. This algorithm *does not* requires explicit knowledge of far coordination shells parameters, as it was in CoreShellFCC(..) Parameters ---------- atoms: ase.Atoms ase Atoms object, containing atomic cluster. type_a: string Symbol of chemical element to be placed on the shell. type_b: string Symbol of chemical element to be placed in the core. ratio: float Guards the number of shell atoms, type_a:type_b = ratio:(1-ratio) R_min: float Typical bond length. Neighboring atoms within this value are counted as coordination numbers. Default is 1.5. CN_max: float Maximum possible coordination number (bulk coordination number). Default is 12. n_depth: int Number of layers of the shell formed by atoms ratio. Default value -1 is ignored and n_depth is calculated according ratio. If n_depth is set then value of ratio is ignored. Returns ------- Function returns ASE atoms object which contains bimetallic core-shell cluster Example -------- >>> atoms = FaceCenteredCubic('Ag', [(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09) >>> atoms = CoreShellCN(atoms, 'Pt', 'Ag', 0.5) >>> view(atoms) """ # 0 < ratio < 1 target_x = ratio if n_depth != -1: target_x = 1 n_atoms = len(atoms) n_a = (np.array(atoms.get_chemical_symbols()) == type_a).sum() #n_b = (atoms.get_chemical_symbols() == type_b).sum() #print n_a n_shell = 0 # depth of the shell while (n_a < n_atoms * target_x): n_shell += 1 print ("shell: ", n_shell) if (n_depth != -1)and(n_shell > n_depth): break neiblist = NeighborList( [ R_min ] * n_atoms, self_interaction=False, bothways=True ) neiblist.update( atoms ) for i in range( n_atoms ): indeces, offsets = neiblist.get_neighbors(i) if (atoms[i].symbol == type_b): CN_temp = 0 for ii in indeces: if atoms[ii].symbol == type_b: CN_temp += 1 #print "CN_temp: ", CN_temp if (CN_temp < CN_max): # coord shell is not full, swap type to type_a! atoms[i].tag = n_shell # not swap yet, but mark # swap atom types now. Stop if target ratio achieved for atom in atoms: if (atom.tag > 0)&(atom.symbol == type_b): if n_a < n_atoms * target_x: atom.symbol = type_a n_a += 1 #print "n_A: ", n_a # end while # check number of atoms checkn_a = 0 for element in atoms.get_chemical_symbols(): if element == type_a: checkn_a += 1 #print "Should be equal: ", n_a, checkn_a assert n_a == checkn_a return atoms def hollowCore(atoms, radius): print('WARNING: hollowCore() is renamed to hollow_core()') return hollow_core(atoms, radius) def hollow_core(atoms, radius): r""" Make an empty (hollow) core in the middle of atoms system Parameters ---------- atoms: ase.Atoms ase Atoms object, containing atomic cluster. radius: float controlls the size of empty region in the center of cluster. Returns ------- ase.Atoms object Notes ----- Example -------- >>> atoms = FaceCenteredCubic('Ag', [(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7,8,7], 4.09) >>> atoms = hollow_core(atoms, 5.1) >>> view(atoms) """ assert radius > 0 pos = atoms.positions center = (np.min(pos, axis=0) + np.max(pos, axis=0)) / 2.0 dists = np.sum((pos - center)**2, 1) rem = np.nonzero(dists < radius**2)[0] if len(rem) > 0: del atoms[rem] else: print('Warning: no atoms were deleted by hollow_core()') return atoms def randomize_biatom(atoms, type_a, type_b, ratio): """ replace randomly to acheive target conc """ n_A = 0 n_B = 0 for atom in atoms: if atom.symbol == type_a: n_A += 1 elif atom.symbol == type_b: n_B += 1 else: raise Exception('Extra chemical element %s!'%atom.symbol) #print n_A, n_B N = len(atoms) #print "conc", n_A *1.0 / N r = random.Random() while n_A < ratio*N: # add A atoms randomly index = r.randint(0, N-1) if (atoms[index].symbol != type_a): #print "changing atom #"+str(index)+" to "+type_a #prob = probability(dists[index]/Rmax, p) #print p if (r.randint(0, 1000) < 500): atoms[index].symbol = type_a n_A += 1 return atoms def randomize_biatom_13(atoms, type_a, type_b, ratio): """ replace randomly by clusters of 13 atoms to acheive target conc """ n_A = 0 n_B = 0 for atom in atoms: if atom.symbol == type_a: n_A += 1 elif atom.symbol == type_b: n_B += 1 else: raise Exception('Extra chemical element %s!'%atom.symbol) #print n_A, n_B N = len(atoms) nl = NeighborList([1.5]*N, self_interaction=False, bothways=True) # 2*1.5=3 Angstr. radius nl.update(atoms) #print "conc", n_A *1.0 / N r = random.Random() while n_A < ratio*N: # add A atoms randomly index = r.randint(0, N-1) if (atoms[index].symbol != type_a): #print "changing atom #"+str(index)+" to "+type_a #if (r.randint(0, 1000) < 500): atoms[index].symbol = type_a n_A += 1 indeces, offsets = nl.get_neighbors(index) for ia in indeces : if (atoms[ia].symbol != type_a)&(n_A < ratio*N): atoms[ia].symbol = type_a n_A += 1 return atoms def randomize_userfunc(atoms, new_type, user_func): """ replace host atoms randomly by new_type of atom by user function of probability distribution. Concentration is hidden in that function. Go throw all atoms of one type.""" #TODO: backup atoms? N = len(atoms) qsar = QSAR(atoms) r = random.Random() dists = qsar.atom_distances() Rmax = max(dists) for i_atom in range(N): #r.random() - random float in interval [0,1) x = dists[i_atom]/Rmax if r.random() < user_func(x): atoms[i_atom].symbol = new_type return atoms def intermetallideFCC(atoms, A, B, cellconstant): """ Replace atoms type A by atom type B to obtain intermetallide FCC structure. """ x0 = atoms[0].position[0] y0 = atoms[0].position[1] z0 = atoms[0].position[2] for atom in atoms: if atom.symbol == A: x = (atom.position[0] - x0) * 2 / cellconstant y = (atom.position[1] - y0) * 2 / cellconstant z = (atom.position[2] - z0) * 2 / cellconstant # for face centered cubic: n1 = round(0.5 * (-x+y+z)) n2 = round(0.5 * ( x-y+z)) n3 = round(0.5 * ( x+y-z)) # for simple cubic: # n1 = 0.5 * x # n2 = 0.5 * y # n3 = 0.5 * z #print n1, n2, n3 if ( n1 + n2 + n3 ) % 2 == 0: atom.symbol = B #TODO: check and show warning if no changes were made return atoms def janus_z_particle(atoms, A, B, ratio): ''' two-sided particle A - base atom type B - atom type to fill from the lower to higher z coordinate ''' poss = atoms.get_positions() zs = poss[:,2] # ~ zmin = np.min(zs) # ~ zmax = np.max(zs) ntarget = int(np.round(len(atoms) * ratio)) print('target sites: %i' % ntarget) # ~ zborder = fsolve(lambda x: np.sum(zs < x) - ntarget, (zmin+zmax)/2+0.01) # ~ print('%.2f < %.2f < %.2f' % (zmin, zborder, zmax)) # ~ print('found sites: %i' % np.sum(zs < zborder)) asort = np.argsort(zs) syms = np.array(atoms.get_chemical_symbols()) syms[asort[:ntarget]] = B syms[asort[ntarget:]] = A atoms.set_chemical_symbols(syms) return atoms def hop_shuffle(atoms, A, B, count=10, R=3.0): """ Shuffle atoms in given structure by swapping atom types within first coordination shell Parameters ---------- atoms: ase.Atoms ase Atoms object, containing atomic cluster. A, B: string symbols of atoms to swap count: integer number of shuffles R: float radius of coordination shell, were atoms will be swapped Returns ------- Function returns ASE atoms object whith shuffled atoms """ n_atoms = len(atoms) nswaps = 0 neiblist = NeighborList( [ R ] * n_atoms, self_interaction=False, bothways=True ) neiblist.update( atoms ) rnd = random.Random() while nswaps < count: i = rnd.randint(0, n_atoms-1) indeces, offsets = neiblist.get_neighbors( i ) if (atoms[i].symbol == B): candidates = [] for ii in indeces: if atoms[ii].symbol == A: candidates.append( ii ) if len(candidates) > 0: j = random.choice(candidates) atoms[i].symbol = A atoms[j].symbol = B nswaps += 1 neiblist.update( atoms ) elif (atoms[i].symbol == B): candidates = [] for ii in indeces: if atoms[ii].symbol == A: candidates.append( ii ) if len(candidates) > 0: j = random.choice(candidates) atoms[i].symbol = B atoms[j].symbol = A nswaps += 1 neiblist.update( atoms ) return atoms if __name__ == '__main__': # from ase.visualize import view # test sphericalFCC #atoms = sphericalFCC('Ag', 4.09, 8) #view(atoms) #raw_input('press enter') # ~ atoms = FaceCenteredCubic('Ag', [(1, 0, 0)], [20], latticeconstant=4.09) #~ atoms = cut_spherical_cluster(atoms, 40) # ~ atoms = cut_elliptical_cluster(atoms, 40, 40, 24) # ~ atoms = hollow_core(atoms, radius=12) # ~ view(atoms) # ~ input('press enter') # atoms = FaceCenteredCubic( 'Ag', [(1, 0, 0), (1, 1, 0), (1, 1, 1)], [7, 8, 7], 4.09) # test core shell #atoms = CoreShellFCC(atoms, 'Pt', 'Ag', ratio=0.6, a_cell=4.09) # ratio-based filling #atoms = sphericalFCC('Ag', 4.09, 8) #atoms = CoreShellFCC(atoms, 'Pt', 'Ag', ratio=0.0, a_cell=4.09, n_depth=1) #atoms = randomize_biatom(atoms, 'Pt', 'Ag', ratio=0.6) #atoms = randomize_biatom_13(atoms, 'Pt', 'Ag', ratio=0.6) #atoms = hollowCore(atoms, 5.1) #atoms = CoreShellCN( atoms, 'Pt', 'Ag', 0.5 ) #atoms = intermetallideFCC( atoms, 'Ag', 'Pt', 4.09 ) #atoms = hop_shuffle( atoms, 'Pt', 'Ag', count=10) # test janus atoms = janus_z_particle(atoms, 'Cu', 'Pt', 0.6) view(atoms)
lavakyan/ase-bimetall
coreshell.py
Python
gpl-2.0
20,123
""" module sdk """ from . import wandb_helper as helper # noqa: F401 from .wandb_alerts import AlertLevel # noqa: F401 from .wandb_artifacts import Artifact # noqa: F401 from .wandb_config import Config # noqa: F401 from .wandb_init import _attach, init # noqa: F401 from .wandb_login import login # noqa: F401 from .wandb_require import require # noqa: F401 from .wandb_run import finish # noqa: F401 from .wandb_save import save # noqa: F401 from .wandb_settings import Settings # noqa: F401 from .wandb_setup import setup, teardown # noqa: F401 from .wandb_summary import Summary # noqa: F401 from .wandb_sweep import controller, sweep # noqa: F401 from .wandb_watch import unwatch, watch # noqa: F401
wandb/client
wandb/sdk/__init__.py
Python
mit
720
def nn(mat,i,j,di,inv): r=0 if j<yLen/di for c in di: r*=2 r+=mat[i][j*di+c] return r def compressedMat(mat): xLen=len(mat) yLen=len(mat[0]) yl=yLen/16+1 tab=[nn(mat,i,j,16,0) for j in xrange(yl) for i in range(xLen))]
craigfreilly/masters-project-submission
src/KnotTheory/HFK-Zurich/fastMat.py
Python
mit
297
# This file is part of Indico. # Copyright (C) 2002 - 2022 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. import ast import re from collections import Counter from contextlib import contextmanager from babel import negotiate_locale from babel.core import LOCALE_ALIASES, Locale from babel.messages.pofile import read_po from babel.support import NullTranslations from flask import current_app, g, has_app_context, has_request_context, request, session from flask_babel import Babel, Domain, get_domain from flask_pluginengine import current_plugin from speaklater import is_lazy_string, make_lazy_string from werkzeug.utils import cached_property from indico.core.config import config from indico.util.caching import memoize_request LOCALE_ALIASES = dict(LOCALE_ALIASES, en='en_GB') RE_TR_FUNCTION = re.compile(r'''_\("([^"]*)"\)|_\('([^']*)'\)''', re.DOTALL | re.MULTILINE) babel = Babel() _use_context = object() def get_translation_domain(plugin_name=_use_context): """Get the translation domain for the given plugin. If `plugin_name` is omitted, the plugin will be taken from current_plugin. If `plugin_name` is None, the core translation domain ('indico') will be used. """ if plugin_name is None: return get_domain() else: plugin = None if has_app_context(): from indico.core.plugins import plugin_engine plugin = plugin_engine.get_plugin(plugin_name) if plugin_name is not _use_context else current_plugin if plugin: return plugin.translation_domain else: return get_domain() def _gettext(tr, message): """Look for a translation in both the standard and plural translations. Same as gettext.GNUTranslations.gettext but when the translation is not found, it looks for a translation in plurals to find the singular case. When neither is found, the gettext fallback is used. This is needed because by default, gettext() only looks through non-pluralized translations. For example, the following cannot be translated with gettext: # Polish msgid "Convener" msgid_plural "Conveners" msgstr[0] "Lider" To get a translation for 'Convener', we need ngettext('Convener', 'Conveners', 1). See GNUTranslations.gettext & ngettext for more details. """ translation = tr.gettext(message) if message != translation: return translation return tr.ngettext(message, message, 1) def _pgettext(tr, context, message): """pgettext variant of _gettext.""" translation = tr.pgettext(context, message) if message != translation: return translation return tr.npgettext(context, message, message, 1) def _indico_gettext(*args, **kwargs): func_name = kwargs.pop('func_name', 'gettext') plugin_name = kwargs.pop('plugin_name', None) translations = get_translation_domain(plugin_name).get_translations() if func_name == 'gettext': return _gettext(translations, *args, **kwargs) elif func_name == 'pgettext': return _pgettext(translations, *args, **kwargs) else: return getattr(translations, func_name)(*args, **kwargs) def lazy_gettext(string, plugin_name=None): if is_lazy_string(string): return string return make_lazy_string(_indico_gettext, string, plugin_name=plugin_name) def orig_string(lazy_string): """Get the original string from a lazy string.""" return lazy_string._args[0] if is_lazy_string(lazy_string) else lazy_string def smart_func(func_name, plugin_name=None): def _wrap(*args, **kwargs): """ Returns either a translated string or a lazy-translatable object, depending on whether there is a session language or not (respectively) """ if has_request_context() or func_name != 'gettext': # straight translation return _indico_gettext(*args, func_name=func_name, plugin_name=plugin_name, **kwargs) else: # otherwise, defer translation to eval time return lazy_gettext(*args, plugin_name=plugin_name) if plugin_name is _use_context: _wrap.__name__ = f'<smart {func_name}>' else: _wrap.__name__ = '<smart {} bound to {}>'.format(func_name, plugin_name or 'indico') return _wrap def make_bound_gettext(plugin_name): """ Create a smart gettext callable bound to the domain of the specified plugin. """ return smart_func('gettext', plugin_name=plugin_name) def make_bound_ngettext(plugin_name): """ Create a smart ngettext callable bound to the domain of the specified plugin. """ return smart_func('ngettext', plugin_name=plugin_name) def make_bound_pgettext(plugin_name): """ Create a smart pgettext callable bound to the domain of the specified plugin. """ return smart_func('pgettext', plugin_name=plugin_name) def make_bound_npgettext(plugin_name): """ Create a smart npgettext callable bound to the domain of the specified plugin. """ return smart_func('npgettext', plugin_name=plugin_name) # Shortcuts _ = gettext = make_bound_gettext(None) ngettext = make_bound_ngettext(None) pgettext = make_bound_pgettext(None) npgettext = make_bound_npgettext(None) L_ = lazy_gettext # Plugin-context-sensitive gettext for templates gettext_context = make_bound_gettext(_use_context) ngettext_context = make_bound_ngettext(_use_context) pgettext_context = make_bound_pgettext(_use_context) npgettext_context = make_bound_npgettext(_use_context) class NullDomain(Domain): """A `Domain` that doesn't contain any translations.""" def __init__(self): super().__init__() self.null = NullTranslations() def get_translations(self): return self.null class IndicoLocale(Locale): """Extend the Babel Locale class with some utility methods.""" def weekday(self, daynum, short=True): """Return the week day given the index.""" return self.days['format']['abbreviated' if short else 'wide'][daynum] @cached_property def time_formats(self): formats = super().time_formats for k, v in formats.items(): v.format = v.format.replace(':%(ss)s', '') return formats def _remove_locale_script(locale): parts = locale.split('_') # e.g. `en_GB` or `zh_Hans_CN` return f'{parts[0]}_{parts[-1]}' @babel.localeselector def set_best_lang(check_session=True): """Get the best language/locale for the current user. This means that first the session will be checked, and then in the absence of an explicitly-set language, we will try to guess it from the browser settings and only after that fall back to the server's default. """ from indico.core.config import config if not has_request_context(): return 'en_GB' if current_app.config['TESTING'] else config.DEFAULT_LOCALE elif 'lang' in g: return g.lang elif check_session and session.lang is not None: return session.lang # chinese uses `zh-Hans-CN`, but browsers send `zh-CN` all_locales = {_remove_locale_script(loc).lower(): loc for loc in get_all_locales()} # try to use browser language preferred = [x.replace('-', '_') for x in request.accept_languages.values()] resolved_lang = negotiate_locale(preferred, list(all_locales), aliases=LOCALE_ALIASES) if not resolved_lang: if current_app.config['TESTING']: return 'en_GB' # fall back to server default resolved_lang = config.DEFAULT_LOCALE # restore script information if necessary try: resolved_lang = all_locales[resolved_lang.lower()] except KeyError: # this happens if we have e.g. a development setup with no built languages. # in this case `get_all_locales()` only contains `en_EN` return 'en_GB' # normalize to xx_YY capitalization resolved_lang = re.sub(r'^([a-zA-Z]+)_([a-zA-Z]+)$', lambda m: f'{m.group(1).lower()}_{m.group(2).upper()}', resolved_lang) # As soon as we looked up a language, cache it during the request. # This will be returned when accessing `session.lang` since there's code # which reads the language from there and might fail (e.g. by returning # lazy strings) if it's not set. g.lang = resolved_lang return resolved_lang @memoize_request def get_current_locale(): return IndicoLocale.parse(set_best_lang()) def get_all_locales(): """ List all available locales/names e.g. ``{'pt_PT': ('Portuguese', 'Portugal)}``. """ if babel.app is None: return {} else: missing = object() languages = {str(t): config.CUSTOM_LANGUAGES.get(str(t), (t.language_name.title(), t.territory_name)) for t in babel.list_translations() if config.CUSTOM_LANGUAGES.get(str(t), missing) is not None} counts = Counter(x[0] for x in languages.values()) return {code: (name, territory, counts[name] > 1) for code, (name, territory) in languages.items()} def set_session_lang(lang): """Set the current language in the current request context.""" session.lang = lang @contextmanager def session_language(lang): """Context manager that temporarily sets session language.""" old_lang = session.lang set_session_lang(lang) yield set_session_lang(old_lang) def parse_locale(locale): """Get a Locale object from a locale id.""" return IndicoLocale.parse(locale) def extract_node(node, keywords, commentTags, options, parents=[None]): if isinstance(node, ast.Str) and isinstance(parents[-1], (ast.Assign, ast.Call)): matches = RE_TR_FUNCTION.findall(node.s) for m in matches: line = m[0] or m[1] yield (node.lineno, '', line.split('\n'), ['old style recursive strings']) else: for cnode in ast.iter_child_nodes(node): yield from extract_node(cnode, keywords, commentTags, options, parents=(parents + [node])) def po_to_json(po_file, locale=None, domain=None): """Convert *.po file to a json-like data structure.""" with open(po_file, 'rb') as f: po_data = read_po(f, locale=locale, domain=domain) messages = dict((message.id[0], message.string) if message.pluralizable else (message.id, [message.string]) for message in po_data) messages[''] = { 'domain': po_data.domain, 'lang': str(po_data.locale), 'plural_forms': po_data.plural_forms } return { (po_data.domain or ''): messages }
indico/indico
indico/util/i18n.py
Python
mit
10,760
from collections import deque import time import requests # Constants BRAZIL = 'br' EUROPE_NORDIC_EAST = 'eune' EUROPE_WEST = 'euw' KOREA = 'kr' LATIN_AMERICA_NORTH = 'lan' LATIN_AMERICA_SOUTH = 'las' NORTH_AMERICA = 'na' OCEANIA = 'oce' RUSSIA = 'ru' TURKEY = 'tr' # Platforms platforms = { BRAZIL: 'BR1', EUROPE_NORDIC_EAST: 'EUN1', EUROPE_WEST: 'EUW1', KOREA: 'KR', LATIN_AMERICA_NORTH: 'LA1', LATIN_AMERICA_SOUTH: 'LA2', NORTH_AMERICA: 'NA1', OCEANIA: 'OC1', RUSSIA: 'RU', TURKEY: 'TR1' } queue_types = [ 'CUSTOM', # Custom games 'NORMAL_5x5_BLIND', # Normal 5v5 blind pick 'BOT_5x5', # Historical Summoners Rift coop vs AI games 'BOT_5x5_INTRO', # Summoners Rift Intro bots 'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games 'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games 'NORMAL_3x3', # Normal 3v3 games 'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games 'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games 'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games 'BOT_ODIN_5x5', # Dominion Coop vs AI games 'RANKED_SOLO_5x5', # Ranked Solo 5v5 games 'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games 'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games 'RANKED_TEAM_3x3', # Ranked Team 3v3 games 'RANKED_TEAM_5x5', # Ranked Team 5v5 games 'BOT_TT_3x3', # Twisted Treeline Coop vs AI games 'GROUP_FINDER_5x5', # Team Builder games 'ARAM_5x5', # ARAM games 'ONEFORALL_5x5', # One for All games 'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games 'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games 'SR_6x6', # Hexakill games 'URF_5x5', # Ultra Rapid Fire games 'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games 'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games 'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games 'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games 'ASCENSION_5x5', # Ascension games 'HEXAKILL', # 6v6 games on twisted treeline 'KING_PORO_5x5', # King Poro game games 'COUNTER_PICK', # Nemesis games, 'BILGEWATER_5x5', # Black Market Brawlers games ] game_maps = [ {'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"}, {'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"}, {'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"}, {'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"}, {'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"}, {'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"}, {'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"}, {'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"}, {'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"}, ] game_modes = [ 'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games 'ODIN', # Dominion/Crystal Scar games 'ARAM', # ARAM games 'TUTORIAL', # Tutorial games 'ONEFORALL', # One for All games 'ASCENSION', # Ascension games 'FIRSTBLOOD', # Snowdown Showdown games 'KINGPORO', # King Poro games ] game_types = [ 'CUSTOM_GAME', # Custom games 'TUTORIAL_GAME', # Tutorial games 'MATCHED_GAME', # All other games ] sub_types = [ 'NONE', # Custom games 'NORMAL', # Summoner's Rift unranked games 'NORMAL_3x3', # Twisted Treeline unranked games 'ODIN_UNRANKED', # Dominion/Crystal Scar games 'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games 'BOT', # Summoner's Rift and Crystal Scar games played against AI 'BOT_3x3', # Twisted Treeline games played against AI 'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games 'RANKED_TEAM_3x3', # Twisted Treeline ranked team games 'RANKED_TEAM_5x5', # Summoner's Rift ranked team games 'ONEFORALL_5x5', # One for All games 'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games 'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games 'SR_6x6', # Hexakill games 'CAP_5x5', # Team Builder games 'URF', # Ultra Rapid Fire games 'URF_BOT', # Ultra Rapid Fire games against AI 'NIGHTMARE_BOT', # Nightmare bots 'ASCENSION', # Ascension games 'HEXAKILL', # Twisted Treeline 6x6 Hexakill 'KING_PORO', # King Poro games 'COUNTER_PICK', # Nemesis games 'BILGEWATER', # Black Market Brawlers games ] player_stat_summary_types = [ 'Unranked', # Summoner's Rift unranked games 'Unranked3x3', # Twisted Treeline unranked games 'OdinUnranked', # Dominion/Crystal Scar games 'AramUnranked5x5', # ARAM / Howling Abyss games 'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI 'CoopVsAI3x3', # Twisted Treeline games played against AI 'RankedSolo5x5', # Summoner's Rift ranked solo queue games 'RankedTeams3x3', # Twisted Treeline ranked team games 'RankedTeams5x5', # Summoner's Rift ranked team games 'OneForAll5x5', # One for All games 'FirstBlood1x1', # Snowdown Showdown 1x1 games 'FirstBlood2x2', # Snowdown Showdown 2x2 games 'SummonersRift6x6', # Hexakill games 'CAP5x5', # Team Builder games 'URF', # Ultra Rapid Fire games 'URFBots', # Ultra Rapid Fire games played against AI 'NightmareBot', # Summoner's Rift games played against Nightmare AI 'Hexakill', # Twisted Treeline 6x6 Hexakill games 'KingPoro', # King Poro games 'CounterPick', # Nemesis games 'Bilgewater', # Black Market Brawlers games ] solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3' api_versions = { 'champion': 1.2, 'current-game': 1.0, 'featured-games': 1.0, 'game': 1.3, 'league': 2.5, 'lol-static-data': 1.2, 'lol-status': 1.0, 'match': 2.2, 'matchhistory': 2.2, 'matchlist': 2.2, 'stats': 1.3, 'summoner': 1.4, 'team': 2.4 } class LoLException(Exception): def __init__(self, error, response): self.error = error self.response = response def __str__(self): return self.error error_400 = "Bad request" error_401 = "Unauthorized" error_404 = "Game data not found" error_429 = "Too many requests" error_500 = "Internal server error" error_503 = "Service unavailable" def raise_status(response): if response.status_code == 400: raise LoLException(error_400, response) elif response.status_code == 401: raise LoLException(error_401, response) elif response.status_code == 404: raise LoLException(error_404, response) elif response.status_code == 429: raise LoLException(error_429, response) elif response.status_code == 500: raise LoLException(error_500, response) elif response.status_code == 503: raise LoLException(error_503, response) else: response.raise_for_status() class RateLimit: def __init__(self, allowed_requests, seconds): self.allowed_requests = allowed_requests self.seconds = seconds self.made_requests = deque() def __reload(self): t = time.time() while len(self.made_requests) > 0 and self.made_requests[0] < t: self.made_requests.popleft() def add_request(self): self.made_requests.append(time.time() + self.seconds) def request_available(self): self.__reload() return len(self.made_requests) < self.allowed_requests class RiotWatcher: def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )): self.key = key self.default_region = default_region self.limits = limits def can_make_request(self): for lim in self.limits: if not lim.request_available(): return False return True def base_request(self, url, region, static=False, **kwargs): if region is None: region = self.default_region args = {'api_key': self.key} for k in kwargs: if kwargs[k] is not None: args[k] = kwargs[k] r = requests.get( 'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format( proxy='global' if static else region, static='static-data/' if static else '', region=region, url=url ), params=args ) if not static: for lim in self.limits: lim.add_request() raise_status(r) return r.json() def _observer_mode_request(self, url, proxy=None, **kwargs): if proxy is None: proxy = self.default_region args = {'api_key': self.key} for k in kwargs: if kwargs[k] is not None: args[k] = kwargs[k] r = requests.get( 'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format( proxy=proxy, url=url ), params=args ) for lim in self.limits: lim.add_request() raise_status(r) return r.json() @staticmethod def sanitized_name(name): return name.replace(' ', '').lower() # champion-v1.2 def _champion_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/champion/{end_url}'.format( version=api_versions['champion'], end_url=end_url ), region, **kwargs ) def get_all_champions(self, region=None, free_to_play=False): return self._champion_request('', region, freeToPlay=free_to_play) def get_champion(self, champion_id, region=None): return self._champion_request('{id}'.format(id=champion_id), region) # current-game-v1.0 def get_current_game(self, summoner_id, platform_id=None, region=None): if platform_id is None: platform_id = platforms[self.default_region] return self._observer_mode_request( 'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format( platform=platform_id, summoner_id=summoner_id ), region ) # featured-game-v1.0 def get_featured_games(self, proxy=None): return self._observer_mode_request('featured', proxy) # game-v1.3 def _game_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/game/{end_url}'.format( version=api_versions['game'], end_url=end_url ), region, **kwargs ) def get_recent_games(self, summoner_id, region=None): return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region) # league-v2.5 def _league_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/league/{end_url}'.format( version=api_versions['league'], end_url=end_url ), region, **kwargs ) def get_league(self, summoner_ids=None, team_ids=None, region=None): """summoner_ids and team_ids arguments must be iterable, only one should be specified, not both""" if (summoner_ids is None) != (team_ids is None): if summoner_ids is not None: return self._league_request( 'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])), region ) else: return self._league_request( 'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])), region ) def get_league_entry(self, summoner_ids=None, team_ids=None, region=None): """summoner_ids and team_ids arguments must be iterable, only one should be specified, not both""" if (summoner_ids is None) != (team_ids is None): if summoner_ids is not None: return self._league_request( 'by-summoner/{summoner_ids}/entry'.format( summoner_ids=','.join([str(s) for s in summoner_ids]) ), region ) else: return self._league_request( 'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])), region ) def get_challenger(self, region=None, queue=solo_queue): return self._league_request('challenger', region, type=queue) def get_master(self, region=None, queue=solo_queue): return self._league_request('master', region, type=queue) # lol-static-data-v1.2 def _static_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/{end_url}'.format( version=api_versions['lol-static-data'], end_url=end_url ), region, static=True, **kwargs ) def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None): return self._static_request( 'champion', region, locale=locale, version=version, dataById=data_by_id, champData=champ_data ) def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None): return self._static_request( 'champion/{id}'.format(id=champ_id), region, locale=locale, version=version, champData=champ_data ) def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None): return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data) def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None): return self._static_request( 'item/{id}'.format(id=item_id), region, locale=locale, version=version, itemData=item_data ) def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None): return self._static_request( 'mastery', region, locale=locale, version=version, masteryListData=mastery_list_data ) def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None): return self._static_request( 'mastery/{id}'.format(id=mastery_id), region, locale=locale, version=version, masteryData=mastery_data ) def static_get_realm(self, region=None): return self._static_request('realm', region) def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None): return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data) def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None): return self._static_request( 'rune/{id}'.format(id=rune_id), region, locale=locale, version=version, runeData=rune_data ) def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None): return self._static_request( 'summoner-spell', region, locale=locale, version=version, dataById=data_by_id, spellData=spell_data ) def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None): return self._static_request( 'summoner-spell/{id}'.format(id=spell_id), region, locale=locale, version=version, spellData=spell_data ) def static_get_versions(self, region=None): return self._static_request('versions', region) # match-v2.2 def _match_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/match/{end_url}'.format( version=api_versions['match'], end_url=end_url ), region, **kwargs ) def get_match(self, match_id, region=None, include_timeline=False): return self._match_request( '{match_id}'.format(match_id=match_id), region, includeTimeline=include_timeline ) # lol-status-v1.0 @staticmethod def get_server_status(region=None): if region is None: url = 'shards' else: url = 'shards/{region}'.format(region=region) r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url)) raise_status(r) return r.json() # match history-v2.2 def _match_history_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/matchhistory/{end_url}'.format( version=api_versions['matchhistory'], end_url=end_url ), region, **kwargs ) def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None, end_index=None): return self._match_history_request( '{summoner_id}'.format(summoner_id=summoner_id), region, championIds=champion_ids, rankedQueues=ranked_queues, beginIndex=begin_index, endIndex=end_index ) # match list-v2.2 def _match_list_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/matchlist/by-summoner/{end_url}'.format( version=api_versions['matchlist'], end_url=end_url, ), region, **kwargs ) def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None, begin_time=None, end_time=None, begin_index=None, end_index=None): return self._match_list_request( '{summoner_id}'.format(summoner_id=summoner_id), region, championsIds=champion_ids, rankedQueues=ranked_queues, seasons=seasons, beginTime=begin_time, endTime=end_time, beginIndex=begin_index, endIndex=end_index ) # stats-v1.3 def _stats_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/stats/{end_url}'.format( version=api_versions['stats'], end_url=end_url ), region, **kwargs ) def get_stat_summary(self, summoner_id, region=None, season=None): return self._stats_request( 'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id), region, season='SEASON{}'.format(season) if season is not None else None) def get_ranked_stats(self, summoner_id, region=None, season=None): return self._stats_request( 'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id), region, season='SEASON{}'.format(season) if season is not None else None ) # summoner-v1.4 def _summoner_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/summoner/{end_url}'.format( version=api_versions['summoner'], end_url=end_url ), region, **kwargs ) def get_mastery_pages(self, summoner_ids, region=None): return self._summoner_request( '{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])), region ) def get_rune_pages(self, summoner_ids, region=None): return self._summoner_request( '{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])), region ) def get_summoners(self, names=None, ids=None, region=None): if (names is None) != (ids is None): return self._summoner_request( 'by-name/{summoner_names}'.format( summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])), region ) else: return None def get_summoner(self, name=None, _id=None, region=None): if (name is None) != (_id is None): if name is not None: name = self.sanitized_name(name) return self.get_summoners(names=[name, ], region=region)[name] else: return self.get_summoners(ids=[_id, ], region=region)[str(_id)] return None def get_summoner_name(self, summoner_ids, region=None): return self._summoner_request( '{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])), region ) # team-v2.4 def _team_request(self, end_url, region, **kwargs): return self.base_request( 'v{version}/team/{end_url}'.format( version=api_versions['team'], end_url=end_url ), region, **kwargs ) def get_teams_for_summoner(self, summoner_id, region=None): return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)] def get_teams_for_summoners(self, summoner_ids, region=None): return self._team_request( 'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])), region ) def get_team(self, team_id, region=None): return self.get_teams([team_id, ], region=region)[str(team_id)] def get_teams(self, team_ids, region=None): return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
gnozell/Yar-Ha-Har
lib/riotwatcher/riotwatcher.py
Python
mit
22,700
""" Read file into texts and calls. It's ok if you don't understand how to read files """ import csv with open('texts.csv', 'r') as f: reader = csv.reader(f) texts = list(reader) with open('calls.csv', 'r') as f: reader = csv.reader(f) calls = list(reader) """ TASK 2: Which telephone number spent the longest time on the phone during the period? Don't forget that time spent answering a call is also time spent on the phone. Print a message: "<telephone number> spent the longest time, <total time> seconds, on the phone during September 2016.". """ callDuration = {} for call in calls: if call[0] in callDuration.keys(): callDuration[call[0]] += int(call[3]) else: callDuration[call[0]] = int(call[3]) if call[1] in callDuration.keys(): callDuration[call[1]] += int(call[3]) else: callDuration[call[1]] = int(call[3]) sortedCallDuration = sorted(callDuration.items(), key=lambda x: x[1], reverse=True) print "{} spent the longest time, {} seconds, on the phone during September 2016.".format(sortedCallDuration[0][0], sortedCallDuration[0][1])
manishbisht/Udacity
Data Structures and Algorithms Nanodegree/P1 - Unscramble Computer Science Problems/Task2.py
Python
mit
1,119
import pytest from deathstar.utils import beautify_json @pytest.mark.asyncio async def test_listeners(empire): r = await empire.listeners.create(name="DeathStar-Test", additional={"Port": 8989}) r = await empire.listeners.get("DeathStar-Test") assert "error" not in r await empire.listeners.kill("DeathStar-Test") @pytest.mark.asyncio async def test_agents(empire): agents = await empire.agents.get() assert len(agents) > 0 agent = await empire.agents.get(agents[0].name) assert agent @pytest.mark.asyncio async def test_modules(empire, agents): agent = agents[0] modules = await empire.modules.search("get_domain_sid") assert len(modules) > 0 module = await empire.modules.get("powershell/management/get_domain_sid") assert module r = await empire.modules.execute(module, agent) print(beautify_json(r)) assert r["results"] != None and not r["results"].startswith("Job started") @pytest.mark.asyncio async def test_agent_results(empire, agents): agent = agents[0] r = await empire.agents.results(agent) print(beautify_json(r)) @pytest.mark.asyncio async def test_shell(empire, agents): agent = agents[0] r = await empire.agents.shell("tasklist", agent) print(beautify_json(r)) @pytest.mark.asyncio async def test_events(empire, agents): agent = agents[0] r = await empire.events.all() print(beautify_json(r)) r = await empire.events.agent(agent) print(beautify_json(r))
byt3bl33d3r/DeathStar
tests/test_empire_api.py
Python
gpl-3.0
1,499
# -*- coding: UTF-8 -*- from __future__ import unicode_literals, print_function from . import base class CompleteMixin(object): def complete(self, template_id=None): if template_id: data = {'complete': {'template_id': template_id}} else: data = {'complete': {}} self.objects.client.query( resource='%s/%s/complete' % (self.endpoint_name, self.id.value), method=base.Client.METHOD_PUT, data=data, ) class CancelMixin(object): def cancel(self): self.objects.client.query( resource='%s/%s/cancel' % (self.endpoint_name, self.id.value), method=base.Client.METHOD_PUT, ) def uncancel(self): self.objects.client.query( resource='%s/%s/uncancel' % (self.endpoint_name, self.id.value), method=base.Client.METHOD_PUT, ) class SendEmailMixin(object): def send_email(self, recipient, email_template_id=None): email_data = {'recipients': {'to': recipient}} if email_template_id: email_data['email_template_id'] = email_template_id self.objects.client.query( resource='%s/%s/email' % (self.endpoint_name, self.id.value), method=base.Client.METHOD_POST, data={'email': email_data} ) class StatusAndEmailMixin(CompleteMixin, CancelMixin, SendEmailMixin): pass
lociii/billomat
billomat/mixins.py
Python
mit
1,428
#!/usr/bin/env python # -*- coding: utf-8 -*- #*************************************************************************** #* * #* Copyright (c) 2015 Yorik van Havre <yorik@uncreated.net> * #* * #* This program is free software; you can redistribute it and/or modify * #* it under the terms of the GNU Lesser General Public License (LGPL) * #* as published by the Free Software Foundation; either version 2 of * #* the License, or (at your option) any later version. * #* for detail see the LICENCE text file. * #* * #* This program is distributed in the hope that it will be useful, * #* but WITHOUT ANY WARRANTY; without even the implied warranty of * #* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * #* GNU Library General Public License for more details. * #* * #* You should have received a copy of the GNU Library General Public * #* License along with this program; if not, write to the Free Software * #* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * #* USA * #* * #*************************************************************************** from __future__ import print_function __title__="FreeCAD Addon Manager Module" __author__ = "Yorik van Havre","Jonathan Wiedemann","Kurt Kremitzki" __url__ = "http://www.freecadweb.org" ''' FreeCAD Addon Manager Module It will fetch its contents from https://github.com/FreeCAD/FreeCAD-addons You need a working internet connection, and the python-git package installed. ''' from PySide import QtCore, QtGui import FreeCAD,urllib2,re,os,shutil NOGIT = False # for debugging purposes, set this to True to always use http downloads MACROS_BLACKLIST = ["BOLTS","WorkFeatures","how to install","PartsLibrary"] def symlink(source, link_name): if os.path.exists(link_name): print("symlink already exists") else: os_symlink = getattr(os, "symlink", None) if callable(os_symlink): os_symlink(source, link_name) else: import ctypes csl = ctypes.windll.kernel32.CreateSymbolicLinkW csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) csl.restype = ctypes.c_ubyte flags = 1 if os.path.isdir(source) else 0 if csl(link_name, source, flags) == 0: raise ctypes.WinError() class AddonsInstaller(QtGui.QDialog): def __init__(self): QtGui.QDialog.__init__(self) self.repos = [] self.macros = [] self.setObjectName("AddonsInstaller") self.resize(326, 304) self.verticalLayout = QtGui.QVBoxLayout(self) self.tabWidget = QtGui.QTabWidget() self.verticalLayout.addWidget(self.tabWidget) self.listWorkbenches = QtGui.QListWidget() self.listWorkbenches.setIconSize(QtCore.QSize(16,16)) self.tabWidget.addTab(self.listWorkbenches,"") self.listMacros = QtGui.QListWidget() self.listMacros.setIconSize(QtCore.QSize(16,16)) self.tabWidget.addTab(self.listMacros,"") self.labelDescription = QtGui.QLabel() self.labelDescription.setMinimumSize(QtCore.QSize(0, 75)) self.labelDescription.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop) self.labelDescription.setWordWrap(True) self.verticalLayout.addWidget(self.labelDescription) self.progressBar = QtGui.QProgressBar(self) #self.progressBar.setProperty("value", 24) self.progressBar.setObjectName("progressBar") #self.progressBar.hide() self.progressBar.setRange(0,0) self.verticalLayout.addWidget(self.progressBar) self.horizontalLayout = QtGui.QHBoxLayout() spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.buttonInstall = QtGui.QPushButton() icon = QtGui.QIcon.fromTheme("download") self.buttonInstall.setIcon(icon) self.horizontalLayout.addWidget(self.buttonInstall) self.buttonRemove = QtGui.QPushButton() icon = QtGui.QIcon.fromTheme("edit-delete") self.buttonRemove.setIcon(icon) self.horizontalLayout.addWidget(self.buttonRemove) self.buttonCancel = QtGui.QPushButton() icon = QtGui.QIcon.fromTheme("cancel") self.buttonCancel.setIcon(icon) self.horizontalLayout.addWidget(self.buttonCancel) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi() QtCore.QObject.connect(self.buttonCancel, QtCore.SIGNAL("clicked()"), self.reject) QtCore.QObject.connect(self.buttonInstall, QtCore.SIGNAL("clicked()"), self.install) QtCore.QObject.connect(self.buttonRemove, QtCore.SIGNAL("clicked()"), self.remove) QtCore.QObject.connect(self.labelDescription, QtCore.SIGNAL("linkActivated(QString)"), self.showlink) QtCore.QObject.connect(self.listWorkbenches, QtCore.SIGNAL("currentRowChanged(int)"), self.show) QtCore.QObject.connect(self.tabWidget, QtCore.SIGNAL("currentChanged(int)"), self.switchtab) QtCore.QObject.connect(self.listMacros, QtCore.SIGNAL("currentRowChanged(int)"), self.show_macro) QtCore.QMetaObject.connectSlotsByName(self) self.update() def retranslateUi(self): self.setWindowTitle(QtGui.QApplication.translate("AddonsInstaller", "Addon manager", None, QtGui.QApplication.UnicodeUTF8)) self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Downloading addon list...", None, QtGui.QApplication.UnicodeUTF8)) self.buttonCancel.setText(QtGui.QApplication.translate("AddonsInstaller", "Close", None, QtGui.QApplication.UnicodeUTF8)) self.buttonInstall.setText(QtGui.QApplication.translate("AddonsInstaller", "Install / update", None, QtGui.QApplication.UnicodeUTF8)) self.buttonRemove.setText(QtGui.QApplication.translate("AddonsInstaller", "Remove", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.listWorkbenches), QtGui.QApplication.translate("AddonsInstaller", "Workbenches", None, QtGui.QApplication.UnicodeUTF8)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.listMacros), QtGui.QApplication.translate("AddonsInstaller", "Macros", None, QtGui.QApplication.UnicodeUTF8)) def update(self): self.listWorkbenches.clear() self.repos = [] self.info_worker = InfoWorker() self.info_worker.addon_repos.connect(self.update_repos) self.update_worker = UpdateWorker() self.update_worker.info_label.connect(self.set_information_label) self.update_worker.addon_repo.connect(self.add_addon_repo) self.update_worker.progressbar_show.connect(self.show_progress_bar) self.update_worker.start() def add_addon_repo(self, addon_repo): self.repos.append(addon_repo) if addon_repo[2] == 1 : self.listWorkbenches.addItem(QtGui.QListWidgetItem(QtGui.QIcon.fromTheme("dialog-ok"),str(addon_repo[0]) + str(" (Installed)"))) else: self.listWorkbenches.addItem(" "+str(addon_repo[0])) def set_information_label(self, label): self.labelDescription.setText(label) def show(self,idx): if self.repos and idx >= 0: self.show_worker = ShowWorker(self.repos, idx) self.show_worker.info_label.connect(self.set_information_label) self.show_worker.addon_repos.connect(self.update_repos) self.show_worker.progressbar_show.connect(self.show_progress_bar) self.show_worker.start() def show_macro(self,idx): if self.macros and idx >= 0: self.showmacro_worker = ShowMacroWorker(self.macros, idx) self.showmacro_worker.info_label.connect(self.set_information_label) self.showmacro_worker.update_macro.connect(self.update_macro) self.showmacro_worker.progressbar_show.connect(self.show_progress_bar) self.showmacro_worker.start() def switchtab(self,idx): if idx == 1: if not self.macros: self.listMacros.clear() self.macros = [] self.macro_worker = MacroWorker() self.macro_worker.add_macro.connect(self.add_macro) self.macro_worker.info_label.connect(self.set_information_label) self.macro_worker.progressbar_show.connect(self.show_progress_bar) self.macro_worker.start() def update_repos(self, repos): self.repos = repos def add_macro(self, macro): self.macros.append(macro) if macro[1] == 1: self.listMacros.addItem(QtGui.QListWidgetItem(QtGui.QIcon.fromTheme("dialog-ok"),str(macro[0]) + str(" (Installed)"))) else: self.listMacros.addItem(" "+str(macro[0])) def update_macro(self, idx, macro): self.macros[idx] = macro def showlink(self,link): "opens a link with the system browser" #print("clicked: ",link) QtGui.QDesktopServices.openUrl(QtCore.QUrl(link, QtCore.QUrl.TolerantMode)) def install(self): if self.tabWidget.currentIndex() == 0: idx = self.listWorkbenches.currentRow() self.install_worker = InstallWorker(self.repos, idx) self.install_worker.info_label.connect(self.set_information_label) self.install_worker.progressbar_show.connect(self.show_progress_bar) self.install_worker.start() elif self.tabWidget.currentIndex() == 1: macropath = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Macro').GetString("MacroPath",os.path.join(FreeCAD.ConfigGet("UserAppData"),"Macro")) macro = self.macros[self.listMacros.currentRow()] if len(macro) < 5: self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Unable to install", None, QtGui.QApplication.UnicodeUTF8)) return macroname = "Macro_"+macro[0]+".FCMacro" macroname = macroname.replace(" ","_") macrofilename = os.path.join(macropath,macroname) macrofile = open(macrofilename,"wb") macrofile.write(macro[3]) macrofile.close() self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Macro successfully installed. The macro is now available from the Macros dialog.", None, QtGui.QApplication.UnicodeUTF8)) self.update_status() def show_progress_bar(self, state): if state == True: self.listWorkbenches.setEnabled(False) self.listMacros.setEnabled(False) self.buttonInstall.setEnabled(False) self.buttonRemove.setEnabled(False) self.progressBar.show() else: self.progressBar.hide() self.listWorkbenches.setEnabled(True) self.listMacros.setEnabled(True) self.buttonInstall.setEnabled(True) self.buttonRemove.setEnabled(True) def remove(self): if self.tabWidget.currentIndex() == 0: idx = self.listWorkbenches.currentRow() basedir = FreeCAD.ConfigGet("UserAppData") moddir = basedir + os.sep + "Mod" clonedir = basedir + os.sep + "Mod" + os.sep + self.repos[idx][0] if os.path.exists(clonedir): shutil.rmtree(clonedir) self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Addon successfully removed. Please restart FreeCAD", None, QtGui.QApplication.UnicodeUTF8)) else: self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Unable to remove this addon", None, QtGui.QApplication.UnicodeUTF8)) elif self.tabWidget.currentIndex() == 1: macropath = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Macro').GetString("MacroPath",os.path.join(FreeCAD.ConfigGet("UserAppData"),"Macro")) macro = self.macros[self.listMacros.currentRow()] if macro[1] != 1: return macroname = "Macro_"+macro[0]+".FCMacro" macroname = macroname.replace(" ","_") macrofilename = os.path.join(macropath,macroname) if os.path.exists(macrofilename): os.remove(macrofilename) self.labelDescription.setText(QtGui.QApplication.translate("AddonsInstaller", "Macro successfully removed.", None, QtGui.QApplication.UnicodeUTF8)) self.update_status() def update_status(self): self.listWorkbenches.clear() self.listMacros.clear() moddir = FreeCAD.ConfigGet("UserAppData") + os.sep + "Mod" macropath = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Macro').GetString("MacroPath",os.path.join(FreeCAD.ConfigGet("UserAppData"),"Macro")) for wb in self.repos: if os.path.exists(os.path.join(moddir,wb[0])): self.listWorkbenches.addItem(QtGui.QListWidgetItem(QtGui.QIcon.fromTheme("dialog-ok"),str(wb[0]) + str(" (Installed)"))) wb[2] = 1 else: self.listWorkbenches.addItem(" "+str(wb[0])) wb[2] = 0 for macro in self.macros: if os.path.exists(os.path.join(macropath,"Macro_"+macro[0].replace(" ","_")+".FCMacro")): self.listMacros.addItem(QtGui.QListWidgetItem(QtGui.QIcon.fromTheme("dialog-ok"),str(macro[0]) + str(" (Installed)"))) macro[1] = 1 else: self.listMacros.addItem(" "+str(macro[0])) macro[1] = 0 class UpdateWorker(QtCore.QThread): info_label = QtCore.Signal(str) addon_repo = QtCore.Signal(object) progressbar_show = QtCore.Signal(bool) def __init__(self): QtCore.QThread.__init__(self) def run(self): "populates the list of addons" self.progressbar_show.emit(True) u = urllib2.urlopen("https://github.com/FreeCAD/FreeCAD-addons") p = u.read() u.close() p = p.replace("\n"," ") p = re.findall("octicon-file-submodule(.*?)message",p) basedir = FreeCAD.ConfigGet("UserAppData") moddir = basedir + os.sep + "Mod" repos = [] for l in p: #name = re.findall("data-skip-pjax=\"true\">(.*?)<",l)[0] name = re.findall("title=\"(.*?) @",l)[0] self.info_label.emit(name) #url = re.findall("title=\"(.*?) @",l)[0] url = "https://github.com/" + re.findall("href=\"\/(.*?)\/tree",l)[0] addondir = moddir + os.sep + name if not os.path.exists(addondir): state = 0 else: state = 1 repos.append([name,url,state]) self.addon_repo.emit([name,url,state]) if not repos: self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Unable to download addon list.", None, QtGui.QApplication.UnicodeUTF8)) else: self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Workbenches list was updated.", None, QtGui.QApplication.UnicodeUTF8)) self.progressbar_show.emit(False) self.stop = True class InfoWorker(QtCore.QThread): addon_repos = QtCore.Signal(object) def __init__(self): QtCore.QThread.__init__(self) def run(self): i = 0 for repo in self.repos: url = repo[1] u = urllib2.urlopen(url) p = u.read() u.close() desc = re.findall("<meta content=\"(.*?)\" name", p)[3] self.repos[i].append(desc) i += 1 self.addon_repos.emit(self.repos) self.stop = True class MacroWorker(QtCore.QThread): add_macro = QtCore.Signal(object) info_label = QtCore.Signal(str) progressbar_show = QtCore.Signal(bool) def __init__(self): QtCore.QThread.__init__(self) def run(self): "populates the list of addons" self.info_label.emit("Downloading list of macros...") self.progressbar_show.emit(True) macropath = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Macro').GetString("MacroPath",os.path.join(FreeCAD.ConfigGet("UserAppData"),"Macro")) u = urllib2.urlopen("http://www.freecadweb.org/wiki/Macros_recipes") p = u.read() u.close() macros = re.findall("title=\"(Macro.*?)\"",p) macros = [mac for mac in macros if (not("translated" in mac))] macros.sort() for mac in macros: macname = mac[6:] macname = macname.replace("&amp;","&") if not (macname in MACROS_BLACKLIST): macfile = mac.replace(" ","_")+".FCMacro" if os.path.exists(os.path.join(macropath,macfile)): installed = 1 else: installed = 0 self.add_macro.emit([macname,installed]) self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "List of macros successfully retrieved.", None, QtGui.QApplication.UnicodeUTF8)) self.progressbar_show.emit(False) self.stop = True class ShowWorker(QtCore.QThread): info_label = QtCore.Signal(str) addon_repos = QtCore.Signal(object) progressbar_show = QtCore.Signal(bool) def __init__(self, repos, idx): QtCore.QThread.__init__(self) self.repos = repos self.idx = idx def run(self): self.progressbar_show.emit(True) self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Retrieving description...", None, QtGui.QApplication.UnicodeUTF8)) if len(self.repos[self.idx]) == 4: desc = self.repos[self.idx][3] else: url = self.repos[self.idx][1] self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Retrieving info from ", None, QtGui.QApplication.UnicodeUTF8) + str(url)) u = urllib2.urlopen(url) p = u.read() u.close() desc = re.findall("<meta content=\"(.*?)\" name",p)[4] self.repos[self.idx].append(desc) self.addon_repos.emit(self.repos) if self.repos[self.idx][2] == 1 : message = "<strong>" + QtGui.QApplication.translate("AddonsInstaller", "<strong>This addon is already installed.", None, QtGui.QApplication.UnicodeUTF8) + "</strong><br>" + desc + ' - <a href="' + self.repos[self.idx][1] + '"><span style="word-wrap: break-word;width:15em;text-decoration: underline; color:#0000ff;">' + self.repos[self.idx][1] + '</span></a>' else: message = desc + ' - <a href="' + self.repos[self.idx][1] + '"><span style="word-wrap: break-word;width:15em;text-decoration: underline; color:#0000ff;">' + self.repos[self.idx][1] + '</span></a>' self.info_label.emit( message ) self.progressbar_show.emit(False) self.stop = True class ShowMacroWorker(QtCore.QThread): info_label = QtCore.Signal(str) update_macro = QtCore.Signal(int,object) progressbar_show = QtCore.Signal(bool) def __init__(self, macros, idx): QtCore.QThread.__init__(self) self.macros = macros self.idx = idx def run(self): self.progressbar_show.emit(True) self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Retrieving description...", None, QtGui.QApplication.UnicodeUTF8)) if len(self.macros[self.idx]) > 2: desc = self.macros[self.idx][2] url = self.macros[self.idx][4] else: mac = self.macros[self.idx][0].replace(" ","_") mac = mac.replace("&","%26") mac = mac.replace("+","%2B") url = "http://www.freecadweb.org/wiki/Macro_"+mac self.info_label.emit("Retrieving info from " + str(url)) u = urllib2.urlopen(url) p = u.read() u.close() code = re.findall("<pre>(.*?)<\/pre>",p.replace("\n","--endl--")) if code: code = code[0] code = code.replace("--endl--","\n") else: self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Unable to fetch the code of this macro.", None, QtGui.QApplication.UnicodeUTF8)) self.progressbar_show.emit(False) self.stop = True return desc = re.findall("<td class=\"ctEven left macro-description\">(.*?)<\/td>",p.replace("\n"," ")) if desc: desc = desc[0] else: self.info_label.emit(QtGui.QApplication.translate("AddonsInstaller", "Unable to retrieve a description for this macro.", None, QtGui.QApplication.UnicodeUTF8)) desc = "No description available" # clean HTML escape codes try: from HTMLParser import HTMLParser except ImportError: from html.parser import HTMLParser try: code = code.decode("utf8") code = HTMLParser().unescape(code) code = code.encode("utf8") code = code.replace("\xc2\xa0", " ") except: FreeCAD.Console.PrintWarning("Unable to clean macro code: "+mac+"\n") self.update_macro.emit(self.idx,self.macros[self.idx]+[desc,code,url]) if self.macros[self.idx][1] == 1 : message = "<strong>" + QtGui.QApplication.translate("AddonsInstaller", "<strong>This addon is already installed.", None, QtGui.QApplication.UnicodeUTF8) + "</strong><br>" + desc + ' - <a href="' + url + '"><span style="word-wrap: break-word;width:15em;text-decoration: underline; color:#0000ff;">' + url + '</span></a>' else: message = desc + ' - <a href="' + url + '"><span style="word-wrap: break-word;width:15em;text-decoration: underline; color:#0000ff;">' + url + '</span></a>' self.info_label.emit( message ) self.progressbar_show.emit(False) self.stop = True class InstallWorker(QtCore.QThread): info_label = QtCore.Signal(str) progressbar_show = QtCore.Signal(bool) def __init__(self, repos, idx): QtCore.QThread.__init__(self) self.idx = idx self.repos = repos def run(self): "installs or updates the selected addon" git = None try: import git except: self.info_label.emit("python-git not found.") FreeCAD.Console.PrintWarning("python-git not found. Using standard download instead.\n") try: import zipfile,StringIO except: self.info_label.emit("no zip support.") FreeCAD.Console.PrintError("your version of python doesn't appear to support ZIP files. Unable to proceed.\n") return if self.idx < 0: return if not self.repos: return if NOGIT: git = None basedir = FreeCAD.ConfigGet("UserAppData") moddir = basedir + os.sep + "Mod" if not os.path.exists(moddir): os.makedirs(moddir) clonedir = moddir + os.sep + self.repos[self.idx][0] self.progressbar_show.emit(True) if os.path.exists(clonedir): self.info_label.emit("Updating module...") if git: repo = git.Git(clonedir) answer = repo.pull() else: answer = self.download(self.repos[self.idx][1],clonedir) else: if git: self.info_label.emit("Cloning module...") repo = git.Repo.clone_from(self.repos[self.idx][1], clonedir, branch='master') else: self.info_label.emit("Downloading module...") self.download(self.repos[self.idx][1],clonedir) answer = QtGui.QApplication.translate("AddonsInstaller", "Workbench successfully installed. Please restart FreeCAD to apply the changes.", None, QtGui.QApplication.UnicodeUTF8) # symlink any macro contained in the module to the macros folder macrodir = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Macro").GetString("MacroPath") for f in os.listdir(clonedir): if f.lower().endswith(".fcmacro"): symlink(clonedir+os.sep+f,macrodir+os.sep+f) FreeCAD.ParamGet('User parameter:Plugins/'+self.repos[self.idx][0]).SetString("destination",clonedir) answer += QtGui.QApplication.translate("AddonsInstaller", "A macro has been installed and is available the Macros menu", None, QtGui.QApplication.UnicodeUTF8) + ": <b>" answer += f + "</b>" self.info_label.emit(answer) self.progressbar_show.emit(False) self.stop = True def download(self,giturl,clonedir): "downloads and unzip from github" import StringIO,zipfile bakdir = None if os.path.exists(clonedir): bakdir = clonedir+".bak" if os.path.exists(bakdir): shutil.rmtree(bakdir) os.rename(clonedir,bakdir) os.makedirs(clonedir) zipurl = giturl+"/archive/master.zip" try: print("Downloading "+zipurl) u = urllib2.urlopen(zipurl) except: return QtGui.QApplication.translate("AddonsInstaller", "Error: Unable to download", None, QtGui.QApplication.UnicodeUTF8) + " " + zipurl zfile = StringIO.StringIO() zfile.write(u.read()) zfile = zipfile.ZipFile(zfile) master = zfile.namelist()[0] # github will put everything in a subfolder zfile.extractall(clonedir) u.close() zfile.close() for filename in os.listdir(clonedir+os.sep+master): shutil.move(clonedir+os.sep+master+os.sep+filename, clonedir+os.sep+filename) os.rmdir(clonedir+os.sep+master) if bakdir: shutil.rmtree(bakdir) return QtGui.QApplication.translate("AddonsInstaller", "Successfully installed", None, QtGui.QApplication.UnicodeUTF8) + " " + zipurl def launchAddonMgr(): # first use dialog readWarning = FreeCAD.ParamGet('User parameter:Plugins/addonsRepository').GetBool('readWarning',False) if not readWarning: if QtGui.QMessageBox.warning(None,"FreeCAD",QtGui.QApplication.translate("AddonsInstaller", "The addons that can be installed here are not officially part of FreeCAD, and are not reviewed by the FreeCAD team. Make sure you know what you are installing!", None, QtGui.QApplication.UnicodeUTF8), QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok) != QtGui.QMessageBox.StandardButton.Cancel: FreeCAD.ParamGet('User parameter:Plugins/addonsRepository').SetBool('readWarning',True) readWarning = True if readWarning: dialog = AddonsInstaller() dialog.exec_()
bblacey/FreeCAD-MacOS-CI
src/Mod/AddonManager/AddonManager.py
Python
lgpl-2.1
27,781