text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
import execjs._exceptions as exceptions
from execjs._abstract_runtime import AbstractRuntime
from execjs._abstract_runtime_context import AbstractRuntimeContext
from execjs._misc import encode_unicode_codepoints
try:
import PyV8
except ImportError:
_pyv8_available = False
else:
_pyv8_available = True
class PyV8Runtime(AbstractRuntime):
'''Runtime to execute codes with PyV8.'''
def __init__(self):
pass
@property
def name(self):
return "PyV8"
def _compile(self, source, cwd=None):
return self.Context(source)
def is_available(self):
return _pyv8_available
class Context(AbstractRuntimeContext):
def __init__(self, source=""):
self._source = source
def is_available(self):
return _pyv8_available
def _exec_(self, source):
source = '''\
(function() {{
{0};
{1};
}})()'''.format(
encode_unicode_codepoints(self._source),
encode_unicode_codepoints(source)
)
source = str(source)
# backward compatibility
with PyV8.JSContext() as ctxt, PyV8.JSEngine() as engine:
js_errors = (PyV8.JSError, IndexError, ReferenceError, SyntaxError, TypeError)
try:
script = engine.compile(source)
except js_errors as e:
raise exceptions.ProgramError(e)
try:
value = script.run()
except js_errors as e:
raise exceptions.ProgramError(e)
return self.convert(value)
def _eval(self, source):
return self.exec_('return ' + encode_unicode_codepoints(source))
def _call(self, identifier, *args):
args = json.dumps(args)
return self.eval("{identifier}.apply(this, {args})".format(identifier=identifier, args=args))
@classmethod
def convert(cls, obj):
from PyV8 import _PyV8
if isinstance(obj, bytes):
return obj.decode('utf8')
if isinstance(obj, _PyV8.JSArray):
return [cls.convert(v) for v in obj]
elif isinstance(obj, _PyV8.JSFunction):
return None
elif isinstance(obj, _PyV8.JSObject):
ret = {}
for k in obj.keys():
v = cls.convert(obj[k])
if v is not None:
ret[cls.convert(k)] = v
return ret
else:
return obj
|
{
"content_hash": "06900da806ba40cc32c095bcf6a31d5f",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 105,
"avg_line_length": 31.03488372093023,
"alnum_prop": 0.5327838141626077,
"repo_name": "doloopwhile/PyExecJS",
"id": "06ddcb75ef96b11ec68caba7f1163a372c645f16",
"size": "2669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "execjs/_pyv8runtime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34263"
},
{
"name": "Shell",
"bytes": "236"
}
],
"symlink_target": ""
}
|
"""
Miscellaneous convenience functions for iterating over blocks, txs, etc.
"""
import time
import threading
from .scan import LongestChainBlockIterator, TxIterator
from .track import TrackedSpendingTxIterator, UtxoSet
from .blockchain import BlockChainIterator
################################################################################
# Blocks
def iter_blocks(block_iter = None, **kwargs):
"""
Currently, this function doesn't do much.
It is roughly equivalent to `return LongestChainBlockIterator()`.
It is here mainly for forward-compatibility.
For simple cases, it is encouraged to use this function instead of LongestChainBlockIterator
directly. In the future we might add various useful options and flags to it.
"""
if block_iter is None:
block_iter = LongestChainBlockIterator(**kwargs)
return block_iter
def get_blockchain(blockchain_iter = None, **kwargs):
"""
:param blockchain_iter: a BlockChainIterator
:param kwargs: extra kwargs for BlockChainIterator (ignored unless blockchain_iter is None)
:return: a BlockChain
"""
if blockchain_iter is None:
blockchain_iter = BlockChainIterator(**kwargs)
# blockchain_iter builds the block chain as we iterate over it
for _ in blockchain_iter: pass
return blockchain_iter.blockchain
################################################################################
# Txs
def iter_txs(
track_spending = False,
track_scripts = False,
tracker = None,
utxoset = None,
block_iter = None,
blockchain = None,
block_kwargs = {},
block_filter = None,
show_progressbar = False,
**tx_kwargs
):
"""
Iterates over the transactions of the blockchain.
:param track_spending: resolve spent_output for each TxInput (will use TrackedSpendingTxIterator
instead of the basic TxIterator)
:param track_scripts: when resolving spent_output, also include its script. track_scripts=True
implies track_spending=True. (ignored unless utxoset is None)
:param tracker, utxoset: ignored unless track_spending=True
:param block_iter: a LongestChainBlockIterator
:param blockchain: a BlockChain object to populate on the fly
:param block_kwargs: extra kwargs for the block_iter (LongestChainBlockIterator or BlockChainIterator)
:param tx_kwargs: extra kwargs for the tx_iter (TxIterator or TrackedSpendingTxIterator)
"""
block_kwargs = dict(block_kwargs)
block_kwargs.setdefault('block_filter', block_filter)
block_kwargs.setdefault('show_progressbar', show_progressbar)
# block_iter and blockchain building
if blockchain is not None:
# We need to build the blockchain. We wrap the original block_iter with a
# BlockChainIterator, which builds the blockchain as we go. `blockchain` is the initial
# BlockChain object to use, and will be updated in place.
block_iter = BlockChainIterator(blockchain = blockchain, block_iter = block_iter, **block_kwargs)
if block_iter is None:
block_iter = LongestChainBlockIterator(**block_kwargs)
# tracked spending
if track_scripts:
# track_scripts=True implies track_spending=True
track_spending = True
if track_spending:
if utxoset is None:
utxoset = UtxoSet(include_scripts = track_scripts)
tx_iter_cls = TrackedSpendingTxIterator
tx_kwargs.update(tracker = tracker, utxoset = utxoset)
else:
tx_iter_cls = TxIterator
# create the tx-iterator
return tx_iter_cls(block_iter = block_iter, **tx_kwargs)
################################################################################
# itertools
class tailable:
"""
An iterator-wrapper which keeps waiting for new data from the underlying
iterator.
The underlying iterator needs to be "refreshable", i.e. calls to next()
can keep returning more data as it arrives, even after raising
StopIteration on past calls to next().
:note: This iterator is resumable if the underlying is resumable.
"""
def __init__(self, iterator, timeout = None, polling_interval = 5):
self.iter = iterator
if timeout is None:
timeout = float('Inf')
self.timeout = timeout
self.polling_interval = polling_interval
self._stop_event = threading.Event()
def __next__(self):
start_time = time.time()
while not self._stop_event.is_set():
try:
return next(self.iter)
except StopIteration:
# no more available data. check timeout, then sleep+retry
elapsed_time = time.time() - start_time
remaining_time = self.timeout - elapsed_time
if remaining_time <= 0:
# timed out, waited long enough
break
self._stop_event.wait(timeout = min(self.polling_interval, remaining_time))
raise StopIteration
def __iter__(self):
return self
def stop(self):
"""
Signal the iterator to stop waiting for more data
"""
self._stop_event.set()
################################################################################
|
{
"content_hash": "5944c3babf3b44df2dad0967037f50b6",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 106,
"avg_line_length": 36.48979591836735,
"alnum_prop": 0.6178225205070843,
"repo_name": "fungibit/chainscan",
"id": "11d5e0d546e4efff3fa0dff6081951a161bb8b0c",
"size": "5364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainscan/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6210"
},
{
"name": "Python",
"bytes": "115337"
}
],
"symlink_target": ""
}
|
import pytest
import requests
from pprint import pprint
requests.packages.urllib3.disable_warnings()
req_symbols = ['bigip_mgmt_ip', 'bigip_username', 'bigip_password', 'bigip_port']
def missing_bigip_symbols():
for sym in req_symbols:
if not hasattr(pytest.symbols, sym):
return True
return False
pytestmark = pytest.mark.skipif(missing_bigip_symbols(),
reason="Need symbols pointing at a real bigip.")
def _make_svc_config(partition, num_virtuals=0, num_members=0):
base_virtual = {
'name': 'Virtual-1',
'destination': '/{}/1.2.3.4:80'.format(partition),
'ipProtocol': 'tcp',
'profiles': [
{
'name': "tcp",
'partition': "Common",
'context': "all"
}
],
"enabled": True,
"vlansEnabled": True,
"sourceAddressTranslation": {
"type": "automap",
}
}
base_pool = {
"name": "pool1",
"monitors": ["/Common/http"]
}
base_member = {
"address": "172.16.0.100%0", "port": 8080
}
cfg = {
'virtualServers': [],
'pools': [],
}
for i in range(num_virtuals):
v = {}
v.update(base_virtual)
v['name'] = "virtual-{}".format(i)
v['pool'] = "/{}/pool-{}".format(partition, i)
cfg['virtualServers'].append(v)
p = {}
p.update(base_pool)
p['name'] = "pool-{}".format(i)
members = []
for j in range(num_members):
m = {}
m.update(base_member)
m['address'] = '172.16.0.{}'.format(j)
members.append(m)
p['members'] = members
cfg['pools'].append(p)
return cfg
testdata = [
(1, 1),
(10, 10),
(100, 10),
(10, 100),
]
@pytest.mark.parametrize("nv,nm", testdata)
@pytest.mark.benchmark(group="apply-new")
def test_apply_new(partition, cccl, bigip_rest_counters, benchmark, nv, nm):
cfg = _make_svc_config(partition, num_virtuals=nv, num_members=nm)
def setup():
cccl.apply_ltm_config({})
def apply_cfg():
cccl.apply_ltm_config(cfg)
benchmark.pedantic(apply_cfg, setup=setup, rounds=2, iterations=1)
pprint(bigip_rest_counters)
@pytest.mark.parametrize("nv,nm", testdata)
@pytest.mark.benchmark(group="apply-no-change")
def test_apply_no_change(partition, cccl, bigip_rest_counters, benchmark, nv, nm):
cfg = _make_svc_config(partition, num_virtuals=nv, num_members=nm)
def apply_cfg():
cccl.apply_ltm_config(cfg)
apply_cfg()
benchmark.pedantic(apply_cfg, rounds=2, iterations=1)
pprint(bigip_rest_counters)
|
{
"content_hash": "860188201a36c16b7f350d3c2f8753b8",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 82,
"avg_line_length": 25.62264150943396,
"alnum_prop": 0.5537555228276878,
"repo_name": "f5devcentral/f5-cccl",
"id": "d9ce59741f9197acb4010c28c430f0b58f71b8e8",
"size": "3331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/f5_cccl/perf/test_perf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "484942"
},
{
"name": "Shell",
"bytes": "5416"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import Group, User
from goflow.workflow.models import Process, Activity, Transition, UserProfile
from goflow.workflow.notification import send_mail
from datetime import timedelta, datetime
from django.core.urlresolvers import resolve
from django.core.mail import mail_admins
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from goflow.workflow.logger import Log; log = Log('goflow.runtime.managers')
from django.conf import settings
from goflow.workflow.decorators import allow_tags
class ProcessInstanceManager(models.Manager):
'''Custom model manager for ProcessInstance
'''
def start(self, process_name, user, item, title=None, priority=0):
'''
Returns a workitem given the name of a preexisting enabled Process
instance, while passing in the id of the user, the contenttype
object and the title.
:type process_name: string
:param process_name: a name of a process. e.g. 'leave'
:type user: User
:param user: an instance of django.contrib.auth.models.User,
typically retrieved through a request object.
:type item: ContentType
:param item: a content_type object e.g. an instance of LeaveRequest
:type: title: string
:param title: title of new ProcessInstance instance (optional)
:type: priority: integer
:param priority: default priority (optional)
:rtype: WorkItem
:return: a newly configured workitem sent to auto_user,
a target_user, or ?? (roles).
usage::
wi = Process.objects.start(process_name='leave',
user=admin, item=leaverequest1)
'''
process = Process.objects.get(title=process_name, enabled=True)
if priority == 0: priority = process.priority
if not title or (title=='instance'):
title = '%s %s' % (process_name, str(item))
instance = ProcessInstance.objects.create(process=process, user=user, title=title, content_object=item)
# instance running
instance.set_status('running')
workitem = WorkItem.objects.create(instance=instance, user=user,
activity=process.begin, priority=priority)
log.event('created by ' + user.username, workitem)
log('process:', process_name, 'user:', user.username, 'item:', item)
if process.begin.kind == 'dummy':
log('routing activity', process.begin.title, 'workitem:', workitem)
auto_user = User.objects.get(username=settings.WF_USER_AUTO)
workitem.activate(actor=auto_user)
workitem.complete(actor=auto_user)
return workitem
if process.begin.autostart:
log('run auto activity', process.begin.title, 'workitem:', workitem)
auto_user = User.objects.get(username=settings.WF_USER_AUTO)
workitem.activate(actor=auto_user)
if workitem.exec_auto_application():
log('workitem.exec_auto_application:', workitem)
workitem.complete(actor=auto_user)
return workitem
if process.begin.push_application:
target_user = workitem.exec_push_application()
log('application pushed to user', target_user.username)
workitem.user = target_user
workitem.save()
log.event('assigned to '+target_user.username, workitem)
#notify_if_needed(user=target_user)
else:
# set pull roles; useful (in activity too)?
workitem.pull_roles = workitem.activity.roles.all()
workitem.save()
#notify_if_needed(roles=workitem.pull_roles)
return workitem
class ProcessInstance(models.Model):
""" This is a process instance.
A process instance is created when someone decides to do something,
and doing this thing means start using a process defined in GoFlow.
That's why it is called "process instance". The process is a class
(=the definition of the process), and each time you want to
"do what is defined in this process", that means you want to create
an INSTANCE of this process.
So from this point of view, an instance represents your dynamic
part of a process. While the process definition contains the map
of the workflow, the instance stores your usage, your history,
your state of this process.
The ProcessInstance will collect and handle workitems (see definition)
to be passed from activity to activity in the process.
Each instance can have more than one workitem depending on the
number of split actions encountered in the process flow.
That means that an instance is actually the collection of all of
the instance "pieces" (workitems) that we get from splits of the
same original process instance.
Each ProcessInstance keeps track of its history through a graph.
Each node of the graph represents an activity the instance has
gone through (normal graph nodes) or an activity the instance is
now pending on (a graph leaf node). Tracking the ProcessInstance history
can be very useful for the ProcessInstance monitoring.
When a process instance starts, the instance has to carry an
implementation object that contains the application data. The
specifications for the implementation class is:
(nothing: now managed by generic relation)
From the instance, the implementation object is reached as following:
obj = instance.content_object (or instance.wfobject()).
In a template, a field date1 will be displayed like this:
{{ instance.wfobject.date1 }} or {{ instance.content_object.date1 }}
From the object, instances may be reached with the reverse generic relation:
the following can be added to the model:
wfinstances = generic.GenericRelation(ProcessInstance)
"""
STATUS_CHOICES = (
('initiated', 'initiated'),
('running', 'running'),
('active', 'active'),
('complete', 'complete'),
('terminated', 'terminated'),
('suspended', 'suspended'),
)
title = models.CharField(max_length=100)
process = models.ForeignKey(Process, related_name='instances', null=True, blank=True)
creationTime = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User, related_name='instances')
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='initiated')
old_status = models.CharField(max_length=10, choices=STATUS_CHOICES, null=True, blank=True)
condition = models.CharField(max_length=50, null=True, blank=True)
# refactoring
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
# add new ProcessInstanceManager
objects = ProcessInstanceManager()
def wfobject(self):
return self.content_object
@allow_tags
def workitems_list(self):
'''provide html link to workitems for a process instance in admin change list.
@rtype: string
@return: html href link "../workitem/?instance__id__exact=[self.id]&ot=asc&o=0"
'''
nbwi = self.workitems.count()
return '<a href=../workitem/?instance__id__exact=%d&ot=asc&o=0>%d item(s)</a>' % (self.pk, nbwi)
def __str__(self):
return str(self.pk)
def __unicode__(self):
return self.title
def set_status(self, status):
if not status in [x for x,y in ProcessInstance.STATUS_CHOICES]:
raise Exception('instance status incorrect :%s' % status)
self.old_status = self.status
self.status = status
self.save()
class WorkItemManager(models.Manager):
'''Custom model manager for WorkItem
'''
def get_safe(self, id, user=None, enabled_only=False, status=('inactive','active')):
'''
Retrieves a single WorkItem instance given a set of parameters
:type id: int
:param id: the id of the WorkItem instance
:type user: User
:param user: an instance of django.contrib.auth.models.User,
typically retrieved through a request object.
:type enabled_only: bool
:param enabled_only: implies that only enabled processes should be queried
:type status: tuple or string
:param status: ensure that workitem has one of the given set of statuses
usage::
workitem = WorkItem.objects.get_safe(id, user=request.user)
'''
if enabled_only:
workitem = self.get(id=id, activity__process__enabled=True)
else:
workitem = self.get(id=id)
workitem._check(user, status)
return workitem
def list_safe(self, user=None, username=None, queryset='qs_default', activity=None, status=None,
notstatus=('blocked','suspended','fallout','complete'), noauto=True):
"""
Retrieve list of workitems (in order to display a task list for example).
:type user: User
:param user: filter on instance of django.contrib.auth.models.User (default=all)
:type username: string
:param username: filter on username of django.contrib.auth.models.User (default=all)
:type queryset: QuerySet
:param queryset: pre-filtering (default=WorkItem.objects)
:type activity: Activity
:param activity: filter on instance of goflow.workflow.models.Activity (default=all)
:type status: string
:param status: filter on status (default=all)
:type notstatus: string or tuple
:param notstatus: list of status to exclude (default: [blocked, suspended, fallout, complete])
:type noauto: bool
:param noauto: if True (default) auto activities are excluded.
usage::
workitems = WorkItem.objects.list_safe(user=me, notstatus='complete', noauto=True)
"""
if queryset == 'qs_default': queryset = WorkItem.objects
if status: notstatus = []
groups = Group.objects.all()
if user:
query = queryset.filter(user=user, activity__process__enabled=True).order_by('-priority')
groups = user.groups.all()
else:
if username:
query = queryset.filter(
user__username=username,
activity__process__enabled=True
).order_by('-priority')
groups = User.objects.get(username=username).groups.all()
else:
query = None
if query:
if status:
query = query.filter(status=status)
if notstatus:
for s in notstatus:
query = query.exclude(status=s)
if noauto:
query = query.exclude(activity__autostart=True)
if activity:
#TODO: this is not used...??
sq = query.filter(activity=activity)
query = list(query)
else:
query = []
# search pullable workitems
for role in groups:
pullables = queryset.filter(pull_roles=role, activity__process__enabled=True).order_by('-priority')
if status:
pullables = pullables.filter(status=status)
if notstatus:
for s in notstatus:
pullables = pullables.exclude(status=s)
if noauto:
pullables = pullables.exclude(activity__autostart=True)
if activity:
pullables = pullables.filter(activity=activity)
if user:
pullables = pullables.filter(user__isnull=True) # tricky
pullables = pullables.exclude(user=user)
query.extend(list(pullables))
if username:
pullables = pullables.exclude(user__username=username)
log.debug('pullables workitems role %s: %s', role, str(pullables))
query.extend(list(pullables))
# search workitems pullable by anybody
pullables = queryset.filter(pull_roles__isnull=True,
activity__process__enabled=True,
user__isnull=True).order_by('-priority')
if status:
pullables = pullables.filter(status=status)
if notstatus:
for s in notstatus:
pullables = pullables.exclude(status=s)
if noauto:
pullables = pullables.exclude(activity__autostart=True)
if activity:
pullables = pullables.filter(activity=activity)
if pullables.count() > 0:
log.debug('anybody\'s workitems: %s', str(pullables))
query.extend(list(pullables))
return query
def notify_if_needed(self, user=None, roles=None):
''' notify user if conditions are fullfilled
'''
if user:
workitems = self.list_safe(user=user, notstatus='complete', noauto=True)
profile, created = UserProfile.objects.get_or_create(user=user)
if len(workitems) >= profile.nb_wi_notif:
try:
if profile.check_notif_to_send():
send_mail(workitems=workitems, user=user, subject='message', template='mail.txt')
profile.notif_sent()
log.info('notification sent to %s' % user.username)
except Exception, v:
log.error('sendmail error: %s' % v)
return
class WorkItem(models.Model):
"""A workitem object represents an activity you are performing.
An Activity object defines the activity, while the workitem object
represents that you are performing this activity. So workitem is
an "instance" of the activity.
"""
STATUS_CHOICES = (
('blocked', 'blocked'),
('inactive', 'inactive'),
('active', 'active'),
('suspended', 'suspended'),
('fallout', 'fallout'),
('complete', 'complete'),
)
date = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name='workitems', null=True, blank=True)
instance = models.ForeignKey(ProcessInstance, related_name='workitems')
activity = models.ForeignKey(Activity, related_name='workitems')
workitem_from = models.ForeignKey('self', related_name='workitems_to', null=True, blank=True)
others_workitems_from = models.ManyToManyField('self', related_name='others_workitems_to', null=True, blank=True)
push_roles = models.ManyToManyField(Group, related_name='push_workitems', null=True, blank=True)
pull_roles = models.ManyToManyField(Group, related_name='pull_workitems', null=True, blank=True)
blocked = models.BooleanField(default=False)
priority = models.IntegerField(default=0)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='inactive')
objects = WorkItemManager()
def forward(self, timeout_forwarding=False, subflow_workitem=None):
# forward_workitem(workitem, path=None, timeout_forwarding=False, subflow_workitem=None):
'''
Convenience procedure to forwards workitems to valid destination activities.
@type path: string??
@param path: XXX TODO: This is not used, so don't know why it's here.
@type timeoutForwarding: bool
@param timeoutForwarding:
@type: subflow_workitem: WorkItem
@param subflow_workitem: a workitem associated with a subflow ???
'''
log.info(u'forward_workitem %s', self.__unicode__())
if not timeout_forwarding:
if self.status != 'complete':
return
if self.has_workitems_to() and not subflow_workitem:
log.debug('forward_workitem canceled for %s: '
'workitem.has_workitems_to()', self.__unicode__())
return
if timeout_forwarding:
log.info('timeout forwarding')
Event.objects.create(name='timeout', workitem=self)
for destination in self.get_destinations(timeout_forwarding):
self._forward_workitem_to_activity(destination)
if self.activity.split_mode == 'xor': break
def _forward_workitem_to_activity(self, target_activity):
'''
Passes the process instance embedded in the given workitem
to a new workitem that is associated with the destination activity.
@type target_activity: Activity
@param target_activity: the activity instance to which the workitem
should be forwarded
@rtype: WorkItem
@return: a workitem that has been passed on to the next
activity (and next user)
'''
instance = self.instance
# search a blocked workitem first
qwi = WorkItem.objects.filter(instance=instance, activity=target_activity, status='blocked')
if qwi.count() == 0:
wi = WorkItem.objects.create(instance=instance, activity=target_activity,
user=None, priority=self.priority)
created = True
log.info('forwarded to %s', target_activity.title)
Event.objects.create(name='creation by %s' % self.user.username, workitem=wi)
Event.objects.create(name='forwarded to %s' % target_activity.title, workitem=self)
wi.workitem_from = self
else:
created = False
wi = qwi[0]
if target_activity.join_mode == 'and':
nb_input_transitions = target_activity.nb_input_transitions()
if nb_input_transitions > 1:
if created:
# first worktem: block it
wi.block()
return
else:
wi.others_workitems_from.add(self)
if wi.others_workitems_from.all().count() + 1 < nb_input_transitions:
# keep blocked
return
else:
# check if the join is OK
if wi.check_join():
wi.status = 'inactive'
wi.save()
log.info('activity %s: workitem %s unblocked', target_activity.title, str(wi))
else:
return
else:
if not created:
# join_mode='and'
log.error('activity %s: join_mode must be and', target_activity.title)
self.fall_out()
wi.fall_out()
return
if target_activity.autostart:
log.info('run auto activity %s workitem %s', target_activity.title, str(wi))
try:
auto_user = User.objects.get(username=settings.WF_USER_AUTO)
except Exception:
error = 'a user named %s (settings.WF_USER_AUTO) must be defined for auto activities'
raise Exception(error % settings.WF_USER_AUTO)
wi.activate(actor=auto_user)
if wi.exec_auto_application():
wi.complete(actor=auto_user)
return wi
if target_activity.push_application:
target_user = wi.exec_push_application()
log.info('application pushed to user %s', target_user.username)
wi.user = target_user
wi.save()
Event.objects.create(name='assigned to %s' % target_user.username, workitem=wi)
WorkItem.objects.notify_if_needed(user=target_user)
else:
wi.pull_roles = wi.activity.roles.all()
wi.save()
WorkItem.objects.notify_if_needed(roles=wi.pull_roles)
return wi
def check_join(self):
log.warning('workitem check_join NYI- useful ?')
return True
def _check(self, user, status=('inactive','active')):
'''
helper function to determine whether process is:
- enabled, etc..
'''
if type(status)==type(''):
status = (status,)
if not self.activity.process.enabled:
error = 'process %s disabled.' % self.activity.process.title
log.error('workitem._check: %s' % error)
raise Exception(error)
if not self.check_user(user):
error = 'user %s cannot take workitem %d.' % (user.username, self.pk)
log.error('workitem._check: %s' % error)
self.fall_out()
raise Exception(error)
if not self.status in status:
error = 'workitem %d has not a correct status (%s/%s).' % (
self.pk, self.status, str(status))
log.error('workitem._check: %s' % error)
raise Exception(error)
return
def get_destinations(self, timeout_forwarding=False):
#get_destinations(workitem, path=None, timeout_forwarding=False):
'''
Return list of destination activities that meet the conditions of each transition
@type path: string??
@param path: XXX TODO: This is not used, so don't know why it's here.
@type timeout_forwarding: bool
@param timeout_forwarding: a workitem with a time-delay??
@rtype: [Activity]
@return: list of destination activities.
'''
transitions = Transition.objects.filter(input=self.activity)
if timeout_forwarding:
transitions = transitions.filter(condition__contains='workitem.time_out')
destinations = []
for t in transitions:
if self.eval_transition_condition(t):
destinations.append(t.output)
return destinations
def eval_transition_condition(self, transition):
'''
evaluate the condition of a transition
'''
if not transition.condition:
return True
instance = self.instance
wfobject = instance.wfobject()
log.debug('eval_transition_condition %s - %s',
transition.condition, instance.condition)
try:
result = eval(transition.condition)
# boolean expr
if type(result) == type(True):
log.debug('eval_transition_condition boolean %s', str(result))
return result
if type(result) == type(''):
log.debug('eval_transition_condition cmp instance.condition %s', str(instance.condition==result))
return (instance.condition==result)
except Exception, v:
log.debug('eval_transition_condition [%s]: %s', transition.condition, v)
return (instance.condition==transition.condition)
#log.error('eval_transition_condition [%s]: %s', transition.condition, v)
return False
def exec_push_application(self):
'''
Execute push application in workitem
'''
if not self.activity.process.enabled:
raise Exception('process %s disabled.' % self.activity.process.title)
params = self.activity.pushapp_param
try:
if params: kwargs = eval(params)
else: kwargs = {}
result = self.activity.push_application.execute(self, **kwargs)
except Exception, v:
log.error('exec_push_application %s', v)
result = None
self.fall_out()
return result
def exec_auto_application(self):
'''
creates a test auto application for activities that don't yet have applications
@rtype: bool
'''
try:
if not self.activity.process.enabled:
raise Exception('process %s disabled.' % workitem.activity.process.title)
# no application: default auto app
if not self.activity.application:
return self.default_auto_app()
func, args, kwargs = resolve(self.activity.application.get_app_url())
params = self.activity.app_param
# params values defined in activity override those defined in urls.py
if params:
params = eval('{'+params.lstrip('{').rstrip('}')+'}')
kwargs.update(params)
func(workitem=self , **kwargs)
return True
except Exception, v:
log.error('execution wi %s:%s', self, v)
return False
def default_auto_app(self):
'''
retrieves wfobject, logs info to it saves
@rtype: bool
@return: always returns True
'''
obj = self.instance.wfobject()
obj.history += '\n>>> execute auto activity: [%s]' % self.activity.title
obj.save()
return True
def activate(self, actor):
'''
changes workitem status to 'active' and logs event, activator
'''
self._check(actor, ('inactive', 'active'))
if self.status == 'active':
log.warning('activate_workitem actor %s workitem %s already active',
actor.username, str(self))
return
self.status = 'active'
self.user = actor
self.save()
log.info('activate_workitem actor %s workitem %s',
actor.username, str(self))
Event.objects.create(name='activated by %s' % actor.username, workitem=self)
def complete(self, actor):
'''
changes status of workitem to 'complete' and logs event
'''
self._check(actor, 'active')
self.status = 'complete'
self.user = actor
self.save()
log.info('complete_workitem actor %s workitem %s', actor.username, str(self))
Event.objects.create(name='completed by %s' % actor.username, workitem=self)
if self.activity.autofinish:
log.debug('activity autofinish: forward')
self.forward()
# if end activity, instance is complete
if self.instance.process.end == self.activity:
log.info('activity end process %s' % self.instance.process.title)
# first test subflow
lwi = WorkItem.objects.filter(activity__subflow=self.instance.process,
status='blocked',
instance=self.instance)
if lwi.count() > 0:
log.info('parent process for subflow %s' % self.instance.process.title)
workitem0 = lwi[0]
workitem0.instance.process = workitem0.activity.process
workitem0.instance.save()
log.info('process change for instance %s' % workitem0.instance.title)
workitem0.status = 'complete'
workitem0.save()
workitem0.forward(subflow_workitem=self)
else:
self.instance.set_status('complete')
def start_subflow(self, actor=None):
'''
starts subflow and blocks passed in workitem
'''
if not actor: actor = self.user
subflow_begin_activity = self.activity.subflow.begin
instance = self.instance
instance.process = self.activity.subflow
instance.save()
self.status = 'blocked'
self.blocked = True
self.save()
sub_workitem = self._forward_workitem_to_activity(subflow_begin_activity)
return sub_workitem
def eval_condition(self, transition):
'''
evaluate the condition of a transition
'''
raise Exception("New API (not yet implemented)")
def __str__(self):
return str(self.pk)
def __unicode__(self):
return u'%s-%s-%s' % (self.instance.__unicode__(), self.activity.__unicode__(), str(self.pk))
def has_workitems_to(self):
b = ( self.workitems_to.count() > 0 )
return b
def check_user(self, user):
"""returns True if authorized, False if not.
For dummy activities, returns always True
"""
if self.activity.kind == 'dummy':
return True
if user and self.user and self.user != user:
return False
ugroups = user.groups.all()
agroups = self.activity.roles.all()
authorized = False
if agroups and len(agroups) > 0:
for g in ugroups:
if g in agroups:
authorized = True
break
else:
authorized = True
return authorized
def set_user(self, user, commit=True):
"""affect user if he has a role authorized for activity.
return True if authorized, False if not (workitem then falls out)
"""
if self.check_user(user):
self.user = user
if commit: self.save()
return True
self.fallOut()
return False
def can_priority_change(self):
'''can the user change priority.
@rtype: bool
@return: returns True if the user can change priority
the user must belong to a group with "workitem.can_change_priority" permission,
and this group's name must be the same as the process title.
'''
if self.user.has_perm("workitem.can_change_priority"):
lst = self.user.groups.filter(name=self.instance.process.title)
if lst.count()==0 or \
(lst[0].permissions.filter(codename='can_change_priority').count() == 0):
return False
return True
return False
def block(self):
self.status = 'blocked'
self.save()
Event.objects.create(name='blocked', workitem=self)
def fall_out(self):
self.status = 'fallout'
self.save()
Event.objects.create(name='fallout', workitem=self)
if not settings.DEBUG:
mail_admins(subject='workflow workitem %s fall out' % str(self.pk),
message=u'''
The workitem [%s] was falling out.
Process: %s
Activity: %s
instance: %s
----------------------------------
''' % (
self.instance.process,
self.activity,
self.instance,
))
def html_action(self):
label = 'action'
if self.status == 'inactive':
label = 'activate'
url='activate/%d/' % self.id
if self.status == 'active':
label = 'complete'
url='complete/%d/' % self.id
if self.status == 'complete':
return 'completed'
return '<a href=%s>%s</a>' % (url, label)
def html_action_link(self):
if self.status == 'inactive':
url='activate/%d/' % self.id
if self.status == 'active':
url='complete/%d/' % self.id
if self.status == 'complete':
raise Exception('no action for completed workitems')
return url
def time_out(self, delay, unit='days'):
'''
return True if timeout reached
delay: nb units
unit: 'weeks' | 'days' | 'hours' ... (see timedelta)
'''
tdelta = eval('timedelta('+unit+'=delay)')
now = datetime.now()
return (now > (self.date + tdelta))
@allow_tags
def events_list(self):
'''provide html link to events for a workitem in admin change list.
@rtype: string
@return: html href link "../event/?workitem__id__exact=[self.id]&ot=asc&o=0"
'''
nbevt = self.events.count()
return '<a href=../event/?workitem__id__exact=%d&ot=asc&o=0>%d item(s)</a>' % (self.pk, nbevt)
class Meta:
permissions = (
("can_change_priority", "Can change priority"),
)
class Event(models.Model):
"""Event are changes that happens on workitems.
"""
date = models.DateTimeField(auto_now=True)
name = models.CharField(max_length=50)
workitem = models.ForeignKey(WorkItem, related_name='events')
def __unicode__(self):
return self.name
|
{
"content_hash": "02ca331002e25f4b7ef6b90641e6c6b1",
"timestamp": "",
"source": "github",
"line_count": 826,
"max_line_length": 117,
"avg_line_length": 41.26634382566586,
"alnum_prop": 0.5615795341195798,
"repo_name": "zen4ever/django-goflow",
"id": "df20e38e5238d8c4356936c71c4d1fbedc2f5532",
"size": "34136",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "goflow/runtime/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "40541"
},
{
"name": "Python",
"bytes": "160935"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
"""
Command line tool to assign example tests to CI test jobs.
"""
# TODO: Need to handle running examples on different chips
import os
import sys
import re
import argparse
try:
from Utility.CIAssignTest import AssignTest
except ImportError:
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path:
sys.path.insert(0, test_fw_path)
from Utility.CIAssignTest import AssignTest
from Utility.CIAssignTest import Group
class ExampleGroup(Group):
SORT_KEYS = CI_JOB_MATCH_KEYS = ["env_tag", "chip"]
class CIExampleAssignTest(AssignTest):
CI_TEST_JOB_PATTERN = re.compile(r"^example_test_.+")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
parser.add_argument("ci_config_file",
help="gitlab ci config file")
parser.add_argument("output_path",
help="output path of config files")
args = parser.parse_args()
assign_test = CIExampleAssignTest(args.test_case, args.ci_config_file, case_group=ExampleGroup)
assign_test.assign_cases()
assign_test.output_configs(args.output_path)
|
{
"content_hash": "d39e2e8e17fc62caf908341cf219526e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 99,
"avg_line_length": 28.476190476190474,
"alnum_prop": 0.669732441471572,
"repo_name": "krzychb/rtd-test-bed",
"id": "bd6bdaf3b82d7fc7bd162e9605420d1dbb093aa8",
"size": "1801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/tiny-test-fw/CIAssignExampleTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "248929"
},
{
"name": "Batchfile",
"bytes": "9428"
},
{
"name": "C",
"bytes": "42611901"
},
{
"name": "C++",
"bytes": "10437923"
},
{
"name": "CMake",
"bytes": "316611"
},
{
"name": "CSS",
"bytes": "1340"
},
{
"name": "Dockerfile",
"bytes": "4319"
},
{
"name": "GDB",
"bytes": "2764"
},
{
"name": "Go",
"bytes": "146670"
},
{
"name": "HCL",
"bytes": "468"
},
{
"name": "HTML",
"bytes": "115431"
},
{
"name": "Inno Setup",
"bytes": "14977"
},
{
"name": "Lex",
"bytes": "7273"
},
{
"name": "M4",
"bytes": "189150"
},
{
"name": "Makefile",
"bytes": "439631"
},
{
"name": "Objective-C",
"bytes": "133538"
},
{
"name": "PHP",
"bytes": "498"
},
{
"name": "Pawn",
"bytes": "151052"
},
{
"name": "Perl",
"bytes": "141532"
},
{
"name": "Python",
"bytes": "1868534"
},
{
"name": "Roff",
"bytes": "102712"
},
{
"name": "Ruby",
"bytes": "206821"
},
{
"name": "Shell",
"bytes": "625528"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "Tcl",
"bytes": "110"
},
{
"name": "TeX",
"bytes": "1961"
},
{
"name": "Visual Basic",
"bytes": "294"
},
{
"name": "XSLT",
"bytes": "80335"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='valid_model',
version='0.3.6',
description="Generic data modeling and validation",
long_description="""\
""",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
],
keywords='',
author='Joshua Forman',
author_email='jforman@outbrain.com',
url='http://www.outbrain.com/',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
{
"content_hash": "aff1bcb2845641127f3d2cf5a6faebf0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 28.307692307692307,
"alnum_prop": 0.5529891304347826,
"repo_name": "outbrain/valid_model",
"id": "bb7e9aaa1f7d6fe5d1f61c1eacfe33a29f4176eb",
"size": "736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37134"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as pt
dataset= pd.read_csv('data.csv')
X = dataset.iloc[:,-1].values
y = dataset.iloc[:3].values
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2, random_state = 0)"""
# Fitting the Regression Model to the dataset
# Create your regressor here
# Predicting a new result
y_pred = regressor.predict(6.5)
plt.scatter(X,y, color ='red')
|
{
"content_hash": "c505fc9a55522b19b194c42301c488aa",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 93,
"avg_line_length": 23.047619047619047,
"alnum_prop": 0.7169421487603306,
"repo_name": "jigargandhi/UdemyMachineLearning",
"id": "7c79610ed5df43c19be8ea5f4e95c5c2539f8bc6",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Machine Learning A-Z Template Folder/Part 2 - Regression/Section 6 - Polynomial Regression/j_regression_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157569"
},
{
"name": "R",
"bytes": "74375"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import io
import re
import binascii
import pickle
from collections import defaultdict
from sortedcontainers import SortedSet
import logging
logging.basicConfig(
format='%(asctime)s [%(process)d] [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
class Patterns(object):
__slots__ = ('lengths', 'b_ints', 'e_ints', 'checksums')
def __init__(self):
self.lengths = SortedSet()
self.b_ints = SortedSet()
self.e_ints = SortedSet()
self.checksums = SortedSet()
class PhraseMatcher(object):
def __init__(self,
model_dir,
pattern_file=None,
vocab_file=None,
max_len=10,
tokenizer=lambda x: x.split()):
self.tokenizer = tokenizer
self.model_dir = model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if pattern_file:
if vocab_file:
self._read_vocab(vocab_file)
else:
self._build_vocab(pattern_file)
self._compile(pattern_file, max_len=max_len)
else:
fin = open('{}/vocab.p'.format(self.model_dir), 'rb')
self.vocab = pickle.load(fin)
fin = open('{}/patterns.p'.format(self.model_dir), 'rb')
self.patterns = pickle.load(fin)
def _read_vocab(self, fname):
logging.info('Reading vocab file...')
wc = defaultdict(int)
for line in io.open(fname, 'r', encoding='utf-8'):
parts = self.tokenizer(line.lower().strip())
word = parts[0]
if word not in wc:
wc[word] = len(wc)
n_vocab = len(wc)
self.vocab = dict(wc)
with open('{}/vocab.p'.format(self.model_dir), 'wb') as fout:
pickle.dump(self.vocab, fout, -1)
logging.info('Vocab size: {}'.format(n_vocab))
def _build_vocab(self, fname):
logging.info('Start building vocab...')
wc = defaultdict(int)
for line in io.open(fname, 'r', encoding='utf-8'):
for word in self.tokenizer(line.lower().strip()):
wc[word] += 1
wc = sorted(wc.items(), key=lambda x: x[1])
wc = dict((v, k) for k, v in enumerate(reversed([k for k, v in wc])))
n_vocab = len(wc)
self.vocab = wc
with open('{}/vocab.p'.format(self.model_dir), 'wb') as fout:
pickle.dump(self.vocab, fout, -1)
logging.info('Vocab size: {}'.format(n_vocab))
def _compile(self, fname, max_len=10):
logging.info('Start compiling patterns...')
self.patterns = Patterns()
for i, pat in enumerate(io.open(fname, 'r', encoding='utf-8')):
if i % 100000 == 0:
logging.info('Processing input patterns: {}'.format(i))
p_arr = pat.strip().split()
p_len = len(p_arr)
if p_len > max_len:
continue
p_ints = [self.vocab.get(t, None) for t in p_arr]
if None in set(p_ints):
continue
p_c = self.crc32(' '.join(p_arr))
p_f = self.fletcher(p_ints)
self.patterns.lengths.add(p_len)
self.patterns.b_ints.add(p_ints[0])
self.patterns.e_ints.add(p_ints[-1])
self.patterns.checksums.add((p_c, p_f))
with open('{}/patterns.p'.format(self.model_dir), 'wb') as fout:
pickle.dump(self.patterns, fout, -1)
def crc32(self, text):
s = text.encode('utf-8')
return binascii.crc32(s) % (1 << 32)
def fletcher(self, arr):
sum1 = sum2 = 0
for v in arr:
sum1 = (sum1 + v) % 255
sum2 = (sum1 + sum1) % 255
return (sum1 * 256) + sum2
def match(self, sentence, remove_subset=False):
tok = self.tokenizer(sentence.strip())
tok_ints = [self.vocab.get(t, None) for t in tok]
tok_len = len(tok_ints)
candidates = set()
for i, b_int in enumerate(tok_ints):
if b_int not in self.patterns.b_ints:
continue
for p_len in self.patterns.lengths:
j = i + p_len - 1
if j + 1 > tok_len:
continue
p_ints = tok_ints[i:j + 1]
if None in set(p_ints):
continue
e_int = tok_ints[j]
if e_int not in self.patterns.e_ints:
continue
p_c = self.crc32(' '.join(tok[i:j + 1]))
p_f = self.fletcher(p_ints)
if (p_c, p_f) in self.patterns.checksums:
candidates.add((i, j))
if remove_subset:
ranges = list(sorted(candidates, reverse=True))
for (i, j) in list(candidates):
for ii, jj in ranges:
if i == ii and j == jj:
continue
if ii <= i and j <= jj:
try:
candidates.remove((i, j))
except KeyError:
pass
for (i, j) in candidates:
yield tok[i:j + 1]
|
{
"content_hash": "f65489477b93dcb80f6c3a5b1cf2d194",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 77,
"avg_line_length": 30.43103448275862,
"alnum_prop": 0.4985835694050991,
"repo_name": "geovedi/py-phrasematcher",
"id": "697db960300be6322cc8f29675408322154d8e37",
"size": "5320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phrasematcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5320"
}
],
"symlink_target": ""
}
|
from math import sqrt
from numba import cuda, float32, uint32, void
from numba.cuda import compile_ptx, compile_ptx_for_current_device
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
@skip_on_cudasim('Compilation unsupported in the simulator')
class TestCompileToPTX(unittest.TestCase):
def test_global_kernel(self):
def f(r, x, y):
i = cuda.grid(1)
if i < len(r):
r[i] = x[i] + y[i]
args = (float32[:], float32[:], float32[:])
ptx, resty = compile_ptx(f, args)
# Kernels should not have a func_retval parameter
self.assertNotIn('func_retval', ptx)
# .visible .func is used to denote a device function
self.assertNotIn('.visible .func', ptx)
# .visible .entry would denote the presence of a global function
self.assertIn('.visible .entry', ptx)
# Return type for kernels should always be void
self.assertEqual(resty, void)
def test_device_function(self):
def add(x, y):
return x + y
args = (float32, float32)
ptx, resty = compile_ptx(add, args, device=True)
# Device functions take a func_retval parameter for storing the
# returned value in by reference
self.assertIn('func_retval', ptx)
# .visible .func is used to denote a device function
self.assertIn('.visible .func', ptx)
# .visible .entry would denote the presence of a global function
self.assertNotIn('.visible .entry', ptx)
# Inferred return type as expected?
self.assertEqual(resty, float32)
def test_fastmath(self):
def f(x, y, z, d):
return sqrt((x * y + z) / d)
args = (float32, float32, float32, float32)
ptx, resty = compile_ptx(f, args, device=True)
# Without fastmath, fma contraction is enabled by default, but ftz and
# approximate div / sqrt is not.
self.assertIn('fma.rn.f32', ptx)
self.assertIn('div.rn.f32', ptx)
self.assertIn('sqrt.rn.f32', ptx)
ptx, resty = compile_ptx(f, args, device=True, fastmath=True)
# With fastmath, ftz and approximate div / sqrt are enabled
self.assertIn('fma.rn.ftz.f32', ptx)
# "full" refers to a full-range approximate divide
self.assertIn('div.full.ftz.f32', ptx)
self.assertIn('sqrt.approx.ftz.f32', ptx)
def check_debug_info(self, ptx):
# A debug_info section should exist in the PTX. Whitespace varies
# between CUDA toolkit versions.
self.assertRegex(ptx, '\\.section\\s+\\.debug_info')
# A .file directive should be produced and include the name of the
# source. The path and whitespace may vary, so we accept anything
# ending in the filename of this module.
self.assertRegex(ptx, '\\.file.*test_compiler.py"')
def test_device_function_with_debug(self):
# See Issue #6719
def f():
pass
ptx, resty = compile_ptx(f, [], device=True, debug=True)
self.check_debug_info(ptx)
def test_kernel_with_debug(self):
# Inspired by (but not originally affected by) Issue #6719
def f():
pass
ptx, resty = compile_ptx(f, [], debug=True)
self.check_debug_info(ptx)
def check_line_info(self, ptx):
# A .file directive should be produced and include the name of the
# source. The path and whitespace may vary, so we accept anything
# ending in the filename of this module.
self.assertRegex(ptx, '\\.file.*test_compiler.py"')
def test_device_function_with_line_info(self):
def f():
pass
ptx, resty = compile_ptx(f, [], device=True, lineinfo=True)
self.check_line_info(ptx)
def test_kernel_with_line_info(self):
def f():
pass
ptx, resty = compile_ptx(f, [], lineinfo=True)
self.check_line_info(ptx)
@skip_on_cudasim('Compilation unsupported in the simulator')
class TestCompileToPTXForCurrentDevice(CUDATestCase):
def test_compile_ptx_for_current_device(self):
def add(x, y):
return x + y
args = (float32, float32)
ptx, resty = compile_ptx_for_current_device(add, args, device=True)
# Check we target the current device's compute capability, or the
# closest compute capability supported by the current toolkit.
device_cc = cuda.get_current_device().compute_capability
cc = cuda.cudadrv.nvvm.find_closest_arch(device_cc)
target = f'.target sm_{cc[0]}{cc[1]}'
self.assertIn(target, ptx)
@skip_on_cudasim('Compilation unsupported in the simulator')
class TestCompileOnlyTests(unittest.TestCase):
'''For tests where we can only check correctness by examining the compiler
output rather than observing the effects of execution.'''
def test_nanosleep(self):
def use_nanosleep(x):
# Sleep for a constant time
cuda.nanosleep(32)
# Sleep for a variable time
cuda.nanosleep(x)
ptx, resty = compile_ptx(use_nanosleep, (uint32,), cc=(7, 0))
nanosleep_count = 0
for line in ptx.split('\n'):
if 'nanosleep.u32' in line:
nanosleep_count += 1
expected = 2
self.assertEqual(expected, nanosleep_count,
(f'Got {nanosleep_count} nanosleep instructions, '
f'expected {expected}'))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7cd5890b4666af0be7d3d7f706b22198",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 78,
"avg_line_length": 35.980645161290326,
"alnum_prop": 0.6135915366684598,
"repo_name": "stonebig/numba",
"id": "0d229a103d49ce26c2c4e3d878cdaaee9d4ef2f0",
"size": "5577",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudapy/test_compiler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2212"
},
{
"name": "C",
"bytes": "228078"
},
{
"name": "C++",
"bytes": "18847"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "HTML",
"bytes": "98846"
},
{
"name": "PowerShell",
"bytes": "3153"
},
{
"name": "Python",
"bytes": "2965893"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
from builtins import str
import fnmatch
import mimetypes
import os
import time
import uuid
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
import math
from django_cradmin.utils import crhumanize
from django_cradmin.utils.deprecation_decorators import CradminDeprecatedSinceV4
CradminDeprecatedSinceV4(
message='Deprecated, and only available to enable migration of the data. '
'If you still need cradmin_temporaryfileuploadstore, please use the cradmin_legacy '
'library, which can be used along with django_cradmin>=4.0.0. '
'See the releasenotes for 4.0.0 for more details and migration guide.'
).show_warning(name=__name__)
class TemporaryFileCollectionQuerySet(models.query.QuerySet):
def filter_for_user(self, user):
return self.filter(user=user)
class TemporaryFileCollectionManager(models.Manager):
def get_queryset(self):
return TemporaryFileCollectionQuerySet(self.model, using=self._db)
def filter_for_user(self, user):
return self.get_queryset().filter_for_user(user)
def html_input_accept_match(accept, mimetype, filename):
filename = filename.lower()
for pattern in accept.split(','):
if '/' in pattern:
if mimetype and fnmatch.fnmatch(mimetype, pattern):
return True
elif pattern.startswith('.'):
if os.path.splitext(filename)[1] == pattern:
return True
else:
if fnmatch.fnmatch(filename, pattern):
return True
return False
def truncate_filename(filename, maxlength, ellipsis='...'):
if len(filename) <= maxlength:
return filename
elif maxlength < 12:
return filename[-maxlength:]
else:
max_length_noellipsis = maxlength - len(ellipsis)
startlength = int(math.floor(max_length_noellipsis / 2.0))
endlength = int(math.ceil(max_length_noellipsis / 2.0))
start = filename[0:startlength]
end = filename[-endlength:]
return u'{}{}{}'.format(start, ellipsis, end)
def _make_unique_filename(filename_set, wanted_filename, generated_filename, max_filename_length, ellipsis):
if generated_filename in filename_set:
generated_uuid = str(uuid.uuid4())
if max_filename_length:
filename = truncate_filename(
filename=wanted_filename,
maxlength=max_filename_length - len(generated_uuid) - 1,
ellipsis=ellipsis)
else:
filename = wanted_filename
generated_filename = u'{}-{}'.format(generated_uuid, filename)
return _make_unique_filename(
filename_set,
wanted_filename=wanted_filename,
generated_filename=generated_filename,
max_filename_length=max_filename_length,
ellipsis=ellipsis)
else:
return generated_filename
def make_unique_filename(filename_set, wanted_filename, max_filename_length=None, ellipsis='...'):
min_max_filename_length = TemporaryFileCollectionDeprecated.MAX_FILENAME_LENGTH_MINVALUE_WITH_UNIQUE_FILENAMES
if max_filename_length and max_filename_length < min_max_filename_length:
raise ValueError('make_unique_filename requires max_filename_length to be at least 45.')
else:
return _make_unique_filename(
filename_set=filename_set,
wanted_filename=wanted_filename,
generated_filename=wanted_filename,
max_filename_length=max_filename_length,
ellipsis=ellipsis)
class TemporaryFileCollectionDeprecated(models.Model):
"""
A collection of temporary files uploaded by a user.
This model is used by apps to store temporary files
uploaded through the temporary file upload API. The typical
workflow is:
1. Upload some files (a collection of files) file via the API.
2. Use the IDs returned by the API to POST a form that uses
file uploads.
3. In the code handling the form POST request, retrieve the temporary
file(s), move them into some form of persistent storage (perhaps
after manipulating the file in some way).
4. Delete the collection.
The ``accept``, ``max_filename_length`` and ``prevent_filename_duplicates``
attributes are set by the client. This means that you can not trust them
and have to perform error checking when you process temporary files. This
is not really a problem since:
1. You should have error checking in the views that
use TemporaryFileCollectionDeprecated.
2. If you have error checking in your views, the only thing
the user can do by manipulating the API call is to cause
themselves a less user-friendly experience - they will typically
get an error when they post the form containing the
hidden field with the collection-id instead of when they upload
a file.
"""
#: Minimum value of ``max_filename_length`` when ``unique_filenames`` is ``True``.
#: It is 45 because ``len(str(uuid.uuid4()))`` is 36, and we need some room over for
#: the file extension.
MAX_FILENAME_LENGTH_MINVALUE_WITH_UNIQUE_FILENAMES = 45
objects = TemporaryFileCollectionManager()
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
help_text='The user that owns this temporary file. Users should not'
'be allowed access to other users temporary files.',
on_delete=models.CASCADE)
created_datetime = models.DateTimeField(auto_now_add=True)
minutes_to_live = models.PositiveIntegerField(
default=60,
help_text='The number of minutes the app requests that this '
'file collection should be kept before automatic removal. '
'This is used by automatic cleanup jobs to determine '
'what to delete. You should not rely on the file beeing '
'automatically deleted after this number of minutes, and '
'you should always delete files explicitly as part of the '
'file upload process.'
)
accept = models.TextField(
null=False, blank=True, default='',
help_text='An html input field accept attribute formatted string. '
'This is validated by the API on upload.')
max_filename_length = models.IntegerField(
null=True, blank=True, default=None,
help_text='If specified, we shorten filenames to maximum the specified length. '
'This is validated by the API on upload.')
unique_filenames = models.BooleanField(
null=False, default=False,
help_text='If this is True, we add random data when we '
'detect duplicate filenames. The duplicate prevention '
'algorithm handles max_filename.'
'This is validated by the API on upload.')
#: Max file size in bytes.
max_filesize_bytes = models.PositiveIntegerField(
null=True, default=None, blank=True)
#: If this is True, only a single file can be added to the collection.
#: This means that the last file added to the collection will be the
#: only file in the collection.
singlemode = models.BooleanField(
null=False, default=False,
help_text='If this is True, only a single file can be added to the '
'collection. This means that the last file added to the '
'collection will be the only file in the collection.')
def clear_files(self):
for temporaryfile in self.files.all():
temporaryfile.delete_object_and_file()
def clear_files_and_delete(self):
self.clear_files()
self.delete()
def is_supported_filetype(self, mimetype, filename):
if self.accept:
return html_input_accept_match(self.accept, mimetype, filename)
else:
return True
def get_filename_set(self):
return set(self.files.values_list('filename', flat=True))
def clean(self):
if self.max_filename_length and self.unique_filenames:
if self.max_filename_length < self.MAX_FILENAME_LENGTH_MINVALUE_WITH_UNIQUE_FILENAMES:
raise ValidationError('max_filename_length must be at least {} when unique_filenames '
'is True'.format(self.MAX_FILENAME_LENGTH_MINVALUE_WITH_UNIQUE_FILENAMES))
def temporary_file_upload_to(instance, filename):
filename, extension = os.path.splitext(filename)
if instance.collection_id is None:
raise AttributeError('temporary_file_upload_to() requires a TemporaryFileDeprecated with '
'a collection that has been saved to the database.')
return u'{directory}/{collectionid}/{uuid}_{timestamp}{extension}'.format(
directory=getattr(settings, 'CRADMIN_TEMPORARYFILEUPLOADSTORE_UPLOAD_DIRECTORY',
'cradmin_temporaryfileuploadstore'),
collectionid=instance.collection_id,
uuid=uuid.uuid4(),
timestamp=time.time(),
extension=extension)
def validate_max_file_size(max_filesize_bytes, fieldfile):
if fieldfile.size > max_filesize_bytes:
raise ValidationError(_('Can not upload files larger than %(max_filesize)s.') % {
'max_filesize': crhumanize.human_readable_filesize(max_filesize_bytes),
}, code='max_filesize_bytes_exceeded')
class TemporaryFileDeprecated(models.Model):
"""
A temporary file uploaded by a user.
"""
collection = models.ForeignKey(
TemporaryFileCollectionDeprecated,
on_delete=models.CASCADE,
related_name='files')
filename = models.TextField(db_index=True)
file = models.FileField(
upload_to=temporary_file_upload_to)
mimetype = models.TextField(null=False, blank=True, default='')
def delete_object_and_file(self):
self.file.delete()
self.delete()
def set_mimetype_from_filename(self):
self.mimetype = mimetypes.guess_type(self.filename)[0] or ''
def clean(self):
if not self.mimetype and self.filename:
self.set_mimetype_from_filename()
if self.collection:
if self.filename and self.collection.max_filename_length:
self.filename = truncate_filename(filename=self.filename,
maxlength=self.collection.max_filename_length)
if not self.collection.is_supported_filetype(self.mimetype, self.filename):
raise ValidationError(_('Unsupported filetype.'), code='unsupported_mimetype')
if self.collection.singlemode:
other_temporaryfiles_queryset = self.collection.files.all()
if self.id is not None:
other_temporaryfiles_queryset = other_temporaryfiles_queryset.exclude(id=self.id)
for temporaryfile in other_temporaryfiles_queryset:
temporaryfile.file.delete()
temporaryfile.delete()
if self.collection.max_filesize_bytes and self.file:
validate_max_file_size(max_filesize_bytes=self.collection.max_filesize_bytes,
fieldfile=self.file)
|
{
"content_hash": "0c5a486170a237a89226378c8dd296ae",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 114,
"avg_line_length": 43.27756653992395,
"alnum_prop": 0.6572658583728694,
"repo_name": "appressoas/django_cradmin",
"id": "a564402218fb8455df589b875c271df7b5c6e92d",
"size": "11382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cradmin/deprecated_apps/cradmin_temporaryfileuploadstore/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "192105"
},
{
"name": "JavaScript",
"bytes": "1951677"
},
{
"name": "Python",
"bytes": "771868"
},
{
"name": "SCSS",
"bytes": "679114"
}
],
"symlink_target": ""
}
|
from error import *
from tokens import *
from events import *
from nodes import *
from loader import *
from dumper import *
__version__ = '5.1'
try:
from cyaml import *
__with_libyaml__ = True
except ImportError:
__with_libyaml__ = False
#------------------------------------------------------------------------------
# Warnings control
#------------------------------------------------------------------------------
# 'Global' warnings state:
_warnings_enabled = {
'YAMLLoadWarning': True,
}
# Get or set global warnings' state
def warnings(settings=None):
if settings is None:
return _warnings_enabled
if type(settings) is dict:
for key in settings:
if key in _warnings_enabled:
_warnings_enabled[key] = settings[key]
# Warn when load() is called without Loader=...
class YAMLLoadWarning(RuntimeWarning):
pass
def load_warning(method):
if _warnings_enabled['YAMLLoadWarning'] is False:
return
import warnings
message = (
"calling yaml.%s() without Loader=... is deprecated, as the "
"default Loader is unsafe. Please read "
"https://msg.pyyaml.org/load for full details."
) % method
warnings.warn(message, YAMLLoadWarning, stacklevel=3)
#------------------------------------------------------------------------------
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.check_token():
yield loader.get_token()
finally:
loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose()
def load(stream, Loader=None):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
if Loader is None:
load_warning('load')
Loader = FullLoader
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream, Loader=None):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
if Loader is None:
load_warning('load_all')
Loader = FullLoader
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
def full_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load(stream, FullLoader)
def full_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load_all(stream, FullLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load(stream, SafeLoader)
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load_all(stream, SafeLoader)
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader)
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
try:
for event in events:
dumper.emit(event)
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
from StringIO import StringIO
else:
from cStringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
from StringIO import StringIO
else:
from cStringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
try:
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=Loader, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=Loader):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(object):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__metaclass__ = YAMLObjectMetaclass
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
to_yaml = classmethod(to_yaml)
|
{
"content_hash": "64975ef39062bd6fae06f015b54c9edf",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 90,
"avg_line_length": 30.076543209876544,
"alnum_prop": 0.641737131598391,
"repo_name": "instinct-vfx/rez",
"id": "e7a419dd2b5b9aa65d92de28689e9f84bb80d3c5",
"size": "12182",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/rez/vendor/yaml/lib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "CMake",
"bytes": "61812"
},
{
"name": "Dockerfile",
"bytes": "3668"
},
{
"name": "PowerShell",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "1950470"
},
{
"name": "Shell",
"bytes": "3185"
}
],
"symlink_target": ""
}
|
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import os
import YamlFileMaker
from cfnCluster import ConnectionManager
from util import DesignFileLoader
workspace = "/shared/workspace/RNASeqPipeline"
data_dir = "/shared/workspace/data_archive/RNASeq"
## run all analysis from download, alignment, counting and differential calculation.
def run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, s3_output_files_address):
yaml_file = project_name + ".yaml"
print "making the yaml file..."
YamlFileMaker.make_yaml_file(yaml_file, workflow, project_name, analysis_steps, s3_input_files_address,
sample_list, group_list, s3_output_files_address)
print "copying yaml file to remote master node..."
ConnectionManager.copy_file(ssh_client, yaml_file, workspace + "/" + workflow + "/yaml_examples")
## Remove the local yaml file
os.remove(yaml_file)
print "executing pipeline..."
ConnectionManager.execute_command(ssh_client, "sh " + workspace + "/run.sh "
+ workspace + "/" + workflow + "/yaml_examples/" + yaml_file)
## checking your jobs status
def check_processing_status(ssh_client):
print "checking processing status"
ConnectionManager.execute_command(ssh_client, "cat " + workspace + "/nohup.out")
## checking your jobs status
def check_jobs_status(ssh_client):
print "checking jobs status"
ConnectionManager.execute_command(ssh_client, "qstat")
## checking your host status
def check_host_status(ssh_client):
print "checking qhost status"
ConnectionManager.execute_command(ssh_client, "qhost")
if __name__ == '__main__':
workflow = "star_htseq_workflow"
analysis_steps = ["fastqc"]
s3_input_files_address = "s3://ccbb-analysis/Guorong/jupyter-genomics/data_archive/test_data/ChiPSeq/RA2284"
s3_output_files_address = "s3://ccbb-analysis/Guorong/jupyter-genomics/data_archive/analysis_data/ChiPSeq"
project_name = "Sample_cDNA"
ssh_client = ""
design_file = "/Users/guorongxu/Desktop/workspace/projects/jupyter-genomics_bitbucket/data/awsCluster/rnaSeq_design_example.txt"
sample_list, group_list = DesignFileLoader.load_design_file(design_file)
run_analysis(ssh_client, workflow, project_name, analysis_steps,
s3_input_files_address, sample_list, group_list, s3_output_files_address)
|
{
"content_hash": "0a9d45166db8379262f9fb0215de4732",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 132,
"avg_line_length": 41.32203389830509,
"alnum_prop": 0.7063166529942576,
"repo_name": "ucsd-ccbb/jupyter-genomics",
"id": "3f3c4cd6b7de334cee51f6c81dd1e92c8bacf4d1",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/awsCluster/rnaSeq/RNAPipelineManager.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "254329"
},
{
"name": "Java",
"bytes": "278021"
},
{
"name": "Jupyter Notebook",
"bytes": "19771596"
},
{
"name": "Perl",
"bytes": "14052"
},
{
"name": "Python",
"bytes": "428899"
},
{
"name": "R",
"bytes": "6817"
},
{
"name": "Shell",
"bytes": "37476"
}
],
"symlink_target": ""
}
|
import pytest
from webdriver.error import NoSuchAlertException
from tests.support.sync import Poll
@pytest.fixture
def add_event_listeners(session):
"""Register listeners for tracked events on element."""
def add_event_listeners(element, tracked_events):
element.session.execute_script("""
let element = arguments[0];
let trackedEvents = arguments[1];
if (!("events" in window)) {
window.events = [];
}
for (var i = 0; i < trackedEvents.length; i++) {
element.addEventListener(trackedEvents[i], function (event) {
window.events.push(event.type);
});
}
""", args=(element, tracked_events))
return add_event_listeners
@pytest.fixture
def closed_frame(session, url):
"""Create a frame and remove it after switching to it.
The removed frame will be kept selected, which allows to test for invalid
browsing context references.
"""
original_handle = session.window_handle
new_handle = session.new_window()
session.window_handle = new_handle
session.url = url("/webdriver/tests/support/html/frames.html")
subframe = session.find.css("#sub-frame", all=False)
session.switch_frame(subframe)
deleteframe = session.find.css("#delete-frame", all=False)
session.switch_frame(deleteframe)
button = session.find.css("#remove-parent", all=False)
button.click()
yield
session.window.close()
assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
session.window_handle = original_handle
@pytest.fixture
def closed_window(session, inline):
"""Create a window and close it immediately.
The window handle will be kept selected, which allows to test for invalid
top-level browsing context references.
"""
original_handle = session.window_handle
new_handle = session.new_window()
session.window_handle = new_handle
session.url = inline("<input id='a' value='b'>")
element = session.find.css("input", all=False)
session.window.close()
assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
yield (original_handle, element)
session.window_handle = original_handle
@pytest.fixture
def create_cookie(session, url):
"""Create a cookie."""
def create_cookie(name, value, **kwargs):
if kwargs.get("path", None) is not None:
session.url = url(kwargs["path"])
session.set_cookie(name, value, **kwargs)
return session.cookies(name)
return create_cookie
@pytest.fixture
def create_dialog(session):
"""Create a dialog (one of "alert", "prompt", or "confirm").
Also it provides a function to validate that the dialog has been "handled"
(either accepted or dismissed) by returning some value.
"""
def create_dialog(dialog_type, text=None):
assert dialog_type in ("alert", "confirm", "prompt"), (
"Invalid dialog type: '%s'" % dialog_type)
if text is None:
text = ""
assert isinstance(text, str), "`text` parameter must be a string"
# Script completes itself when the user prompt has been opened.
# For prompt() dialogs, add a value for the 'default' argument,
# as some user agents (IE, for example) do not produce consistent
# values for the default.
session.execute_async_script("""
let dialog_type = arguments[0];
let text = arguments[1];
setTimeout(function() {
if (dialog_type == 'prompt') {
window.dialog_return_value = window[dialog_type](text, '');
} else {
window.dialog_return_value = window[dialog_type](text);
}
}, 0);
""", args=(dialog_type, text))
wait = Poll(
session,
timeout=15,
ignored_exceptions=NoSuchAlertException,
message="No user prompt with text '{}' detected".format(text))
wait.until(lambda s: s.alert.text == text)
return create_dialog
@pytest.fixture
def create_frame(session):
"""Create an `iframe` element.
The element will be inserted into the document of the current browsing
context. Return a reference to the newly-created element.
"""
def create_frame():
append = """
var frame = document.createElement('iframe');
document.body.appendChild(frame);
return frame;
"""
return session.execute_script(append)
return create_frame
@pytest.fixture
def stale_element(current_session, iframe, inline):
"""Create a stale element reference
The given document will be loaded in the top-level or child browsing context.
Before the requested element is returned it is removed from the document's DOM.
"""
def stale_element(doc, css_value, as_frame=False):
if as_frame:
current_session.url = inline(iframe(doc))
frame = current_session.find.css("iframe", all=False)
current_session.switch_frame(frame)
else:
current_session.url = inline(doc)
element = current_session.find.css(css_value, all=False)
current_session.execute_script("arguments[0].remove();", args=[element])
return element
return stale_element
|
{
"content_hash": "e5d7d297e39fb67764be22d927fd26c5",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 92,
"avg_line_length": 30.943181818181817,
"alnum_prop": 0.6296364304076386,
"repo_name": "chromium/chromium",
"id": "42a74ba41fbc4d4943650fc6cfa23e24561b679c",
"size": "5446",
"binary": false,
"copies": "13",
"ref": "refs/heads/main",
"path": "third_party/blink/web_tests/external/wpt/webdriver/tests/support/fixtures_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
"""
__author__ = 'Alisue <lambdalisue@hashnote.net>'
import os
from maidenhair.loaders.base import unite_dataset as _unite_dataset
from maidenhair.compat import OrderedDict
def default_unite_function(data):
"""
A default unite_function which recieve `data` and return filename without
middle extensions
>>> # [<filename>] is mimicking `data`
>>> default_unite_function(['./foo/foo.bar.hoge.piyo'])
'./foo/foo.piyo'
>>> default_unite_function(['./foo/foo.piyo'])
'./foo/foo.piyo'
>>> default_unite_function(['./foo/foo'])
'./foo/foo'
"""
# data[0] indicate the filename of the data
rootname, basename = os.path.split(data[0])
filename, ext = os.path.splitext(basename)
if '.' in filename:
filename = filename.rsplit('.')[0]
filename = os.path.join(rootname, filename + ext)
return filename
def unite_dataset(dataset, basecolumn, fn=None):
"""
Unite dataset via fn
Parameters
----------
dataset : list
A list of data
basecolumn : int
A number of column which will be respected in uniting dataset
fn : function
A function which recieve :attr:`data` and return classification string.
It if is None, a function which return the first item of the
:attr:`data` will be used (See ``with_filename`` parameter of
:func:`maidenhair.load` function).
Returns
-------
list
A united dataset
"""
# create default unite_fn
if fn is None:
fn = default_unite_function
# classify dataset via unite_fn
united_dataset = OrderedDict()
for data in dataset:
unite_name = fn(data)
if unite_name not in united_dataset:
united_dataset[unite_name] = []
united_dataset[unite_name].append(data[1:])
# unite dataset via maidenhair.loaders.base.unite_dataset
for name, dataset in united_dataset.items():
united_dataset[name] = _unite_dataset(dataset, basecolumn)[0]
# create new dataset (respect the order of the dataset)
dataset = []
for name, _dataset in united_dataset.items():
dataset.append([name] + _dataset)
return dataset
|
{
"content_hash": "ae3da3b13cf8c756a5c50a4a8bd91ee2",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 31.71014492753623,
"alnum_prop": 0.636654478976234,
"repo_name": "lambdalisue/maidenhair",
"id": "9223c5c9d7def9dc3b358518b36fb5ac6d435ebb",
"size": "2203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/maidenhair/classification/unite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50558"
}
],
"symlink_target": ""
}
|
import requests, sys
import json
import os.path
import csv
import argparse
import seaborn as sns
import struct
def hex2rgb(rgb):
return struct.unpack('BBB', rgb.decode('hex'))
def rgb2hex(rgb):
return struct.pack('BBB',*rgb).encode('hex')
# ------------------------------------------------------------------------
# TODO put into a sensible JSON file
# "human" "chicken" "zebrafish" "mouse"
speciesmap = {"homo_sapiens":9606, "gallus_gallus":9031, "danio_rerio":7955, "mus_musculus":10090}
keggmap = {"homo_sapiens":"hsa", "gallus_gallus":"gga", "danio_rerio":"dre", "mus_musculus":"mmu"}
# ------------------------------------------------------------------------
# TODO put into a JSON file
# make kegg color map
colors = {"nodata":"#ffffff","pos":"#1f78b4","neg":"#a6cee3","test":"#b2df8a", "de":"#33a02c", "E105":"#fdbf6f","E115":"#ff7f00","28hpf_AP":"#fb9a99","28hpf_DV":"#e31a1c","40hpf_AP":"#cab2d6","40hpf_DV":"#6a3d9a"}
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Convert and MAP IDs across species to one KEGG map')
parser.add_argument('-i', '--input', type=str, help='input json data file')
parser.add_argument('-o', '--outdir', type=str, help='output directory')
parser.add_argument('-p', '--pathway', type=str, help='pathway to map to')
args = parser.parse_args()
print("-"*60)
print("Running with these settings:")
print(args)
print("-"*60)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# RESTfull functions
def generic_json_request_handler(server, ext):
r = requests.get(server+ext, headers={ "Content-Type" : "application/json"})
if not r.ok:
r.raise_for_status()
sys.exit()
return(r.json())
def get_kegg_genes(pathway):
#if args.debug:
# print("Parsing pathway: "+pathway)
server="http://togows.org"
ext="/entry/kegg-pathway/"+pathway+"/genes.json"
return(generic_json_request_handler(server, ext)[0])
def get_ens_orthologues(ensid):
server = "https://rest.ensembl.org"
ext = "/homology/id/"+ensid+"?format=condensed;type=orthologues"
return(generic_json_request_handler(server, ext)['data'][0])
def map_ens_to_species(ensid, targettaxon):
server = "https://rest.ensembl.org"
ext = "/homology/id/"+ensid+"?format=condensed;type=orthologues;target_taxon="+targettaxon
return(generic_json_request_handler(server, ext)['data'][0])
def get_sym_orthologues(symbol, species):
server="https://rest.ensembl.org"
ext="/homology/symbol/"+species+"/"+symbol+"?format=condensed;type=orthologues"
return(generic_json_request_handler(server, ext)['data'][0])
def find_symbols(symbols, species):
server = "https://rest.ensembl.org"
ext = "/lookup/symbol/"+species
headers={ "Content-Type" : "application/json", "Accept" : "application/json"}
# requires slightly more formatting of the list
r = requests.post(server+ext, headers=headers, data='{ "symbols" : ["'+'", "'.join(symbols)+'" ] }')
#print('{ "symbols" : ["'+'", "'.join(symbols)+'" ] }')
if not r.ok:
r.raise_for_status()
sys.exit()
decoded = r.json()
return(decoded)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def parse_kegg_genes(species, jsondata):
decoded = {}
for kid in jsondata:
hgcn = jsondata[kid].split(";")[0]
decoded[hgcn] = {}
decoded[hgcn]["KEGG"]=keggmap[species]+":"+kid
mappings = find_symbols(decoded.keys(), species)
for gene in mappings:
decoded[gene]["mapping"] = mappings[gene]
decoded[gene]["orthologues"] = get_ens_orthologues(mappings[gene]['id'])
for hgcn in decoded:
# NOT found through lookup let's try something else
if hgcn not in mappings:
decoded[hgcn]["orthologues"] = get_sym_orthologues(hgcn, species)
decoded[hgcn]['id'] = decoded[hgcn]["orthologues"]['id']
# REPORT incomplete genes
for i in decoded:
if not 'orthologues' in decoded[i]:
print('[WARN]'+'no orthologues found for: %s'%(i))
return(decoded)
# ------------------------------------------------------------------------
def fill_kegg_colors(genedata, fulldata, colors):
# INIT colors
keggcolors = {fulldata[i]["KEGG"]:{} for i in fulldata}
for species in genedata:
for condition in genedata[species]:
# print(species+"_"+condition)
# STORE DEFAULT VALUE
for keggid in keggcolors:
keggcolors[keggid][species+"_"+condition] = colors["nodata"]
# Retrieve ID mapping data
ofile = args.outdir+condition+"_mappings.json"
mappingdata={}
if not os.path.isfile(ofile):
#print(genedata[species][condition])
mappingdata = find_symbols(genedata[species][condition], species)
with open(ofile, 'w') as outfile:
json.dump(mappingdata, outfile)
else:
with open(ofile, 'r') as data_file:
mappingdata = json.load(data_file)
print("[INFO] Finished %s"%(species+"_"+condition))
# CHECK FOR GENE MATCHES
for agene in mappingdata:
# IF FULL SYMBOL MATCH
if agene in fulldata:
keggcolors[fulldata[agene]["KEGG"]][species+"_"+condition] = colors[condition]
continue
for i in fulldata:
if not 'orthologues' in fulldata[i]:
# SKIP IDs without ortholog information
continue
if mappingdata[agene]['id'] in [fulldata[i]["orthologues"]["homologies"][j]["id"] for j in range(0,len( fulldata[i]["orthologues"]["homologies"]))]:
keggcolors[fulldata[i]["KEGG"]][species+"_"+condition] = colors[condition]
return(keggcolors)
# ------------------------------------------------------------------------
def write_kegg_colors(keggcolors, outputfile):
pallette = sns.color_palette("Reds",9).as_hex()
#['#fee5d8', '#fdcab5', '#fcab8f', '#fc8a6a', '#fb694a', '#f14432', '#d92523', '#bc141a', '#980c13']
# ADD the all column
for keggid in keggcolors:
keggcolors[keggid]["all"] = sum([j!=colors["nodata"] for i,j in keggcolors[keggid].items()])
for keggid in keggcolors:
if keggcolors[keggid]["all"] == 0:
keggcolors[keggid]["all"] = colors["nodata"]
else:
keggcolors[keggid]["all"] = pallette[keggcolors[keggid]["all"]-1]
conditions = [i for i in keggcolors[list(keggcolors)[0]]]
with open(outputfile, 'w') as outfile:
outfile.write("#hsa\t"+'\t'.join(conditions)+'\n')
for keggid in keggcolors:
outfile.write(keggid.split(":")[1]+'\t'+'\t'.join([str(j) for i,j in keggcolors[keggid].items()])+'\n')
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# MAIN
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
fulldata = {}
pathwayfile = args.pathway+"_mappings.json"
if not os.path.isfile(os.path.join(os.sep, args.outdir, args.pathway, pathwayfile)):
# parse all info for genes in pathway
fulldata = parse_kegg_genes("homo_sapiens", get_kegg_genes(args.pathway))
# store in file
with open(os.path.join(os.sep, args.outdir, args.pathway, pathwayfile), 'w') as outfile:
json.dump(fulldata, outfile)
else:
# load data from pre-generated file
with open(os.path.join(os.sep, args.outdir, args.pathway, pathwayfile), 'r') as data_file:
fulldata = json.load(data_file)
# ------------------------------------------------------------------------
with open(args.input, 'r') as data_file:
genedata = json.load(data_file)
keggcolors = fill_kegg_colors(genedata, fulldata, colors)
write_kegg_colors(keggcolors, os.path.join(os.sep, args.outdir, args.pathway, args.pathway+"_all_conditions_colors.txt"))
# ------------------------------------------------------------------------
|
{
"content_hash": "037f3e6225021662aa27a13c97348318",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 213,
"avg_line_length": 42.24752475247525,
"alnum_prop": 0.5305835481603,
"repo_name": "jdeligt/Genetics",
"id": "79f20a5d4aa02d888e55012cc7640bc9066b4839",
"size": "8534",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "IDconverter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40219"
},
{
"name": "R",
"bytes": "6421"
},
{
"name": "Shell",
"bytes": "560"
}
],
"symlink_target": ""
}
|
import time
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.powervm import operator
LOG = logging.getLogger(__name__)
powervm_opts = [
cfg.StrOpt('powervm_mgr_type',
default='ivm',
help='PowerVM manager type (ivm, hmc)'),
cfg.StrOpt('powervm_mgr',
default=None,
help='PowerVM manager host or ip'),
cfg.StrOpt('powervm_mgr_user',
default=None,
help='PowerVM manager user name'),
cfg.StrOpt('powervm_mgr_passwd',
default=None,
help='PowerVM manager user password'),
cfg.StrOpt('powervm_img_remote_path',
default=None,
help='PowerVM image remote path'),
cfg.StrOpt('powervm_img_local_path',
default=None,
help='Local directory to download glance images to'),
]
CONF = cfg.CONF
CONF.register_opts(powervm_opts)
class PowerVMDriver(driver.ComputeDriver):
"""PowerVM Implementation of Compute Driver."""
def __init__(self, virtapi):
super(PowerVMDriver, self).__init__(virtapi)
self._powervm = operator.PowerVMOperator()
@property
def host_state(self):
pass
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
pass
def get_info(self, instance):
"""Get the current status of an instance."""
return self._powervm.get_info(instance['name'])
def get_num_instances(self):
return len(self.list_instances())
def instance_exists(self, instance_name):
return self._powervm.instance_exists(instance_name)
def list_instances(self):
return self._powervm.list_instances()
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
return self._powervm.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on powerVM."""
self._powervm.spawn(context, instance, image_meta['id'])
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance."""
self._powervm.destroy(instance['name'], destroy_disks)
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
pass
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
pass
def snapshot(self, context, instance, image_id):
"""Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
snapshot_start = time.time()
# get current image info
glance_service, old_image_id = glance.get_remote_image_service(
context, instance['image_ref'])
image_meta = glance_service.show(context, old_image_id)
img_props = image_meta['properties']
# build updated snapshot metadata
snapshot_meta = glance_service.show(context, image_id)
new_snapshot_meta = {'is_public': False,
'name': snapshot_meta['name'],
'status': 'active',
'properties': {'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id']
},
'disk_format': image_meta['disk_format'],
'container_format': image_meta['container_format']
}
if 'architecture' in image_meta['properties']:
arch = image_meta['properties']['architecture']
new_snapshot_meta['properties']['architecture'] = arch
# disk capture and glance upload
self._powervm.capture_image(context, instance, image_id,
new_snapshot_meta)
snapshot_time = time.time() - snapshot_start
inst_name = instance['name']
LOG.info(_("%(inst_name)s captured in %(snapshot_time)s seconds") %
locals())
def pause(self, instance):
"""Pause the specified instance."""
pass
def unpause(self, instance):
"""Unpause paused VM instance."""
pass
def suspend(self, instance):
"""suspend the specified instance."""
pass
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
pass
def power_off(self, instance):
"""Power off the specified instance."""
self._powervm.power_off(instance['name'])
def power_on(self, instance):
"""Power on the specified instance."""
self._powervm.power_on(instance['name'])
def get_available_resource(self, nodename):
"""Retrieve resource info."""
return self._powervm.get_available_resource()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
pass
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
return False
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
pass
|
{
"content_hash": "f9cd181a4a17fa738bf0e5a588ca57e5",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 34.59473684210526,
"alnum_prop": 0.5887722501141032,
"repo_name": "maoy/zknova",
"id": "0ce31353501d118e4590a498f9b945d652e10abd",
"size": "7214",
"binary": false,
"copies": "1",
"ref": "refs/heads/zk-servicegroup",
"path": "nova/virt/powervm/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7960822"
},
{
"name": "Shell",
"bytes": "16987"
}
],
"symlink_target": ""
}
|
import os
import os.path
from hashlib import sha1
import requests
import sh
URLS = {'test': 'https://raw.githubusercontent.com/python/cpython/master/Lib/test/test_ssl.py',
'code': 'https://raw.githubusercontent.com/python/cpython/master/Lib/ssl.py'}
DESTDIR = os.path.join(os.path.dirname(__file__), 'upstream')
if __name__ == '__main__':
for subject, url in URLS.items():
upstream_file = os.path.join(DESTDIR, os.path.basename(url))
with open(upstream_file, 'rb') as f:
cur_data = f.read()
cur_hash = sha1(cur_data).hexdigest()
dest_file = '{}.new'.format(upstream_file)
r = requests.get(url)
if sha1(r.content).hexdigest() != cur_hash:
print('New {} found: {}'.format(upstream_file, dest_file))
with open(dest_file, 'wb') as f:
f.write(r.content)
|
{
"content_hash": "9027101cc6714dda4942bdf4d9ec1c30",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 95,
"avg_line_length": 32.148148148148145,
"alnum_prop": 0.6071428571428571,
"repo_name": "imron/scalyr-agent-2",
"id": "1b20509c385478f3d6ffe07360beca0503fa36d7",
"size": "948",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scalyr_agent/third_party_tls/backports_ssl_match_hostname/maintainers/check-updates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import symposion.speakers.models
class Migration(migrations.Migration):
dependencies = [
('speakers', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='photo',
field=models.ImageField(upload_to=symposion.speakers.models.get_photo_path, blank=True),
),
]
|
{
"content_hash": "bd9ce311b1dd42cfa9347b428e7a83d8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 100,
"avg_line_length": 23.736842105263158,
"alnum_prop": 0.6341463414634146,
"repo_name": "njl/pycon",
"id": "d2bc135f40703f96dc5ec1b9496e7fb22951d58a",
"size": "475",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "symposion/speakers/migrations/0002_auto_20151006_0952.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "99458"
},
{
"name": "HTML",
"bytes": "294494"
},
{
"name": "JavaScript",
"bytes": "741348"
},
{
"name": "Makefile",
"bytes": "457"
},
{
"name": "Python",
"bytes": "767135"
},
{
"name": "Shell",
"bytes": "1975"
}
],
"symlink_target": ""
}
|
import json
import mimetypes
from contextlib import contextmanager
from unittest import mock
from django.db import DEFAULT_DB_ALIAS
from django.test import TestCase, Client
from django.test.client import encode_multipart
from django.core.files import File
from django.contrib.auth.models import User
from binder.json import jsonloads
from .testapp.models import Animal, Zoo
from .utils import temp_imagefile
@contextmanager
def immediate_on_commit(using=None):
"""
Context manager executing transaction.on_commit() hooks immediately as
if the connection was in auto-commit mode. This is required when
using a subclass of django.test.TestCase as all tests are wrapped in
a transaction that never gets committed.
"""
immediate_using = DEFAULT_DB_ALIAS if using is None else using
def on_commit(func, using=None):
using = DEFAULT_DB_ALIAS if using is None else using
if using == immediate_using:
func()
with mock.patch('django.db.transaction.on_commit', side_effect=on_commit) as patch:
yield patch
class FileUploadTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
# Clean up uploaded files
def tearDown(self):
Zoo.objects.all().delete()
def test_get_model_with_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
response = self.client.get('/zoo/%d/' % emmen.id)
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(emmen.id, result['data']['id'])
self.assertEqual(emmen.name, result['data']['name'], 'Wildlands Adventure Zoo Emmen')
self.assertEqual('/zoo/%d/floor_plan/' % emmen.id, result['data']['floor_plan'])
# This is a basic regression test for a bug due to the router
# singleton refactor, GET would crash if the model simply
# _contained_ a file attribute.
def test_get_related_model_with_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
donald = Animal(name='Donald Duck', zoo=emmen)
donald.save()
response = self.client.get('/animal/%d/' % donald.id, data={'with': 'zoo'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(donald.id, result['data']['id'])
self.assertEqual({'zoo': 'zoo'}, result['with_mapping'])
self.assertEqual({'zoo': 'animals'}, result['with_related_name_mapping'])
zoo = result['with']['zoo'][0]
self.assertEqual(emmen.id, zoo['id'])
self.assertEqual(emmen.name, zoo['name'], 'Wildlands Adventure Zoo Emmen')
self.assertEqual('/zoo/%d/floor_plan/' % emmen.id, zoo['floor_plan'])
# Same as above, but in multi-put's code path
def test_multi_put_model_with_existing_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
model_data = {
'data': [{
'id': emmen.id,
'name': 'Wildlands!',
}]
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_upload_to_file_field_stores_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(100, 200, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
uploaded_file.seek(0)
self.assertTrue(emmen.floor_plan)
with emmen.floor_plan.file as current_file:
self.assertEqual(uploaded_file.read(), current_file.read())
# overwrite with new one
with temp_imagefile(10, 20, 'jpeg') as replacement_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': replacement_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
replacement_file.seek(0)
self.assertTrue(emmen.floor_plan)
with emmen.floor_plan.file as current_file:
self.assertEqual(replacement_file.read(), current_file.read())
def test_upload_triggers_file_field_validation_errors(self):
emmen = Zoo(name='Nowhere')
emmen.save()
with temp_imagefile(100, 200, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
self.assertEqual(len(returned_data['errors']), 1)
self.assertEqual(len(returned_data['errors']['zoo']), 1)
self.assertSetEqual(set(['floor_plan', 'name']), set(returned_data['errors']['zoo'][str(emmen.id)].keys()))
self.assertEqual('no plan', returned_data['errors']['zoo'][str(emmen.id)]['floor_plan'][0]['code'])
self.assertEqual('nowhere', returned_data['errors']['zoo'][str(emmen.id)]['name'][0]['code'])
emmen.refresh_from_db()
self.assertFalse(emmen.floor_plan)
def test_upload_size_resized_png(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(600, 600, 'png') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_size_resized_png_rgba(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(600, 600, 'png', 'RGBA') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_size_resized_jpeg(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(600, 600, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_file_in_post(self):
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/', data={
'data': json.dumps({
'name': 'Wildlands Adventure Zoo Emmen',
'floor_plan': None,
}),
'file:floor_plan': uploaded_file,
})
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
emmen = Zoo.objects.get(pk=data['id'])
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_file_in_multiput(self):
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': json.dumps({
'data': [{
'id': -1,
'name': 'Wildlands Adventure Zoo Emmen',
'floor_plan': None,
}],
}),
'file:data.0.floor_plan': uploaded_file,
})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
emmen = Zoo.objects.get(pk=dict(data['idmap']['zoo'])[-1])
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_no_data(self):
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
self.assertEqual(data['code'], 'RequestError')
self.assertEqual(data['message'], 'data field is required in multipart body')
def test_upload_invalid_data(self):
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': 'not valid json',
})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
self.assertEqual(data['code'], 'RequestError')
self.assertEqual(data['message'], 'JSON parse error: Expecting value: line 1 column 1 (char 0).')
def test_upload_non_existing_file_path(self):
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': json.dumps({
'data': [{
'id': -1,
'name': 'Wildlands Adventure Zoo Emmen',
'floor_plan': None,
}],
}),
'file:data.1.floor_plan': uploaded_file,
})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
self.assertEqual(data['code'], 'RequestError')
self.assertEqual(data['message'], 'unexpected key at path: data.1')
def test_upload_non_integer_key_at_list(self):
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': json.dumps({
'data': [{
'id': -1,
'name': 'Wildlands Adventure Zoo Emmen',
'floor_plan': None,
}],
}),
'file:data.foo.floor_plan': uploaded_file,
})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
self.assertEqual(data['code'], 'RequestError')
self.assertEqual(data['message'], 'expected integer key at path: data.foo')
def test_upload_not_null_at_path(self):
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': json.dumps({
'data': [{
'id': -1,
'name': 'Wildlands Adventure Zoo Emmen',
'floor_plan': 'foo',
}],
}),
'file:data.0.floor_plan': uploaded_file,
})
response = self.client.put('/zoo/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 418)
data = jsonloads(response.content)
self.assertEqual(data['code'], 'RequestError')
self.assertEqual(data['message'], 'expected null at path: data.0.floor_plan')
def test_upload_using_path(self):
with temp_imagefile(500, 500, 'jpeg') as file:
zoo1 = Zoo(name='Zoo')
zoo1.floor_plan.save('plan.jpg', File(file), save=False)
zoo1.save()
response = self.client.post(
'/zoo/',
content_type='application/json',
data={
'name': 'Zoo 2',
'floor_plan': f'/api/zoo/{zoo1.pk}/floor_plan/',
},
)
self.assertEqual(response.status_code, 200)
data = jsonloads(response.content)
zoo2 = Zoo.objects.get(pk=data['id'])
# Make sure they are the same
self.assertIsNotNone(zoo2.floor_plan.name)
with zoo1.floor_plan.open() as f1, zoo2.floor_plan.open() as f2:
zoo1_content = f1.read()
self.assertEqual(zoo1_content, f2.read())
# Make sure we leave the floor plan of zoo1 intact if we now update zoo2
with temp_imagefile(500, 500, 'jpeg') as uploaded_file:
boundary = 'my-boundary'
content_type = 'multipart/form-data; boundary=' + boundary
data = encode_multipart(boundary, {
'data': json.dumps({'floor_plan': None}),
'file:floor_plan': uploaded_file,
})
# We need the on commit hook to delete the old path
with immediate_on_commit():
response = self.client.put(f'/zoo/{zoo2.pk}/', content_type=content_type, data=data)
self.assertEqual(response.status_code, 200)
zoo1.refresh_from_db()
zoo2.refresh_from_db()
with zoo1.floor_plan.open() as f:
self.assertEqual(f.read(), zoo1_content)
|
{
"content_hash": "46f6e75c623ff5b30f2ebf532649795b",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 110,
"avg_line_length": 35.078590785907856,
"alnum_prop": 0.6910537700865266,
"repo_name": "CodeYellowBV/django-binder",
"id": "57d515af7f27f1f951faeef26628562f201d922e",
"size": "12944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_file_uploads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "578095"
}
],
"symlink_target": ""
}
|
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS, ALL
from django.conf import settings
from django.conf.urls import url
from django.http import HttpResponse
from tastypie import fields
from cbh_core_model.models import Project, PinnedCustomField, CustomFieldConfig
from cbh_core_ws.serializers import CustomFieldXLSSerializer
from cbh_core_ws.resources import UserResource
from tastypie.authorization import Authorization
from cbh_core_ws.authorization import ProjectListAuthorization
from tastypie.authentication import SessionAuthentication
from tastypie.paginator import Paginator
import json
import copy
import time
from django.core.urlresolvers import reverse
from cbh_core_ws.serializers import CustomFieldsSerializer
from django.db.models import Prefetch
from cbh_core_ws.resources import ProjectTypeResource, \
CustomFieldConfigResource
from django.contrib.auth.models import User
def build_content_type(format, encoding='utf-8'):
"""
Appends character encoding to the provided format if not already present.
"""
if 'charset' in format:
return format
return '%s; charset=%s' % (format, encoding)
class ChemRegDataPointProjectFieldResource(ModelResource):
"""Provides the schema information about a field that is required by front end apps"""
edit_form = fields.DictField(
null=True, blank=False, help_text=None)
edit_schema = fields.DictField(
null=True, blank=False, help_text=None)
class Meta:
queryset = PinnedCustomField.objects.all()
always_return_data = True
resource_name = 'cbh_chemreg_datapoint_fields'
include_resource_uri = True
allowed_methods = ['get', 'post', 'patch', 'put']
default_format = 'application/json'
authentication = SessionAuthentication()
authorization = Authorization()
level = None
description = {'api_dispatch_detail' : '''
Provides information about the data types present in the flexible schema of the datapoint table
For each field a set of attributes are returned:
hide_form/schema - an angular schema form element that can be used to hide this column from view
edit_form /schema - an angular schema form element that can be used to edit this field
assuming it is edited as part of a larger data form classification object
- To change the key of the json schema then change the get_namespace method
filter_form/schema - an angular schema form element that can be used to hide this filter this field
exclude_form /schema an angular schema form element that can be used to hide this exclude values from this field
sort_form /schema an angular schema form element that can be used to hide this exclude values from this field
Things still to be implemented:
actions form - would be used for mapping functions etc
autocomplete urls
''',
'api_dispatch_list' : '''
Provides information about the data types present in the flexible schema of the datapoint table
For each field a set of attributes are returned:
hide_form/schema - an angular schema form element that can be used to hide this column from view
edit_form /schema - an angular schema form element that can be used to edit this field
assuming it is edited as part of a larger data form classification object
- To change the key of the json schema then change the get_namespace method
filter_form/schema - an angular schema form element that can be used to hide this filter this field
exclude_form /schema an angular schema form element that can be used to hide this exclude values from this field
sort_form /schema an angular schema form element that can be used to hide this exclude values from this field
Things still to be implemented:
actions form - would be used for mapping functions etc
autocomplete urls
'''
}
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
return True
def save(self, bundle, skip_errors=False):
if bundle.via_uri:
return bundle
self.is_valid(bundle)
if bundle.errors and not skip_errors:
raise ImmediateHttpResponse(response=self.error_response(bundle.request, bundle.errors))
# Check if they're authorized.
# if bundle.obj.pk:
# self.authorized_update_detail(self.get_object_list(bundle.request), bundle)
# else:
# self.authorized_create_detail(self.get_object_list(bundle.request), bundle)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
obj_id = self.create_identifier(bundle.obj)
if obj_id not in bundle.objects_saved or bundle.obj._state.adding:
bundle.obj.save()
bundle.objects_saved.add(obj_id)
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
# self.method_check(request, allowed=['get'])
# self.is_authenticated(request)
# self.throttle_check(request)
# self.log_throttled_access(request)
# bundle = self.build_bundle(request=request)
# self.authorized_read_detail(self.get_object_list(bundle.request), bundle)
return self.create_response(request, self.build_schema())
def get_namespace(self, bundle):
'''
Hook to return the dotted path to this field based on the level and the name of the field
The level name is formatted in the dehydrate method of the DataFormConfigResource
'''
return "{level}.project_data.%s" % (bundle.obj.get_space_replaced_name)
def get_namespace_for_action_key(self, bundle, action_type):
return action_type
def dehydrate_edit_form(self, bundle):
''' Slightly different implementation of this '''
if bundle.request.GET.get("empty", False):
return {}
data = bundle.obj.field_values[1]
data["key"] = bundle.obj.name
if bundle.obj.UISELECTTAG in bundle.obj.field_type:
data['options'] = {'refreshDelay': 0,
'async': {'url': "%s" % reverse('api_get_list_elasticsearch',
kwargs={'resource_name': 'cbh_compound_batches',
'api_name': settings.WEBSERVICES_NAME})}
}
return {"form": [data]}
def dehydrate_edit_schema(self, bundle):
''' '''
if bundle.request.GET.get("empty", False):
return {}
return {"properties": {bundle.obj.name: bundle.obj.field_values[0]}}
def authorized_update_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
return True
def authorized_create_detail(self, object_list, bundle):
"""
Handles checking of permissions to see if the user has authorization
to PUT this resource.
"""
return True
class ChemRegCustomFieldConfigResource(ModelResource):
'''Return only the project type and custom field config name as returning the full field list would be '''
data_type = fields.ForeignKey("cbh_core_ws.resources.DataTypeResource",
'data_type', readonly=True, null=True, blank=False, default=None, full=True)
project_data_fields = fields.ToManyField(ChemRegDataPointProjectFieldResource, lambda bundle: PinnedCustomField.objects.filter(
custom_field_config_id=bundle.obj.id
).select_related("custom_field_config__project"), readonly=True, null=True, blank=False, default=None, full=True)
created_by = fields.ForeignKey(
"cbh_core_ws.resources.UserResource", 'created_by')
class Meta:
object_class = CustomFieldConfig
queryset = CustomFieldConfig.objects.select_related(
"created_by", "data_type",)
excludes = ("schemaform")
include_resource_uri = False
resource_name = 'cbh_chemreg_custom_field_config'
authentication = SessionAuthentication()
authorization = Authorization()
include_resource_uri = True
default_format = 'application/json'
serializer = CustomFieldXLSSerializer()
# serializer = Serializer()
filtering = {"id": ALL}
allowed_methods = ['get', 'post', 'put', 'patch']
description = {'api_dispatch_detail' : '''
Provides data about a single level of a data form config
data_type: A string to describe what "sort" of data this is (fields will generally be the same as other objects of this data type but that is up to the curator)
project_data_fields:
The fields that are in this particular custom field config:
Provides information about the data types present in the flexible schema of the datapoint table
For each field a set of attributes are returned:
hide_form/schema - an angular schema form element that can be used to hide this column from view
edit_form /schema - an angular schema form element that can be used to edit this field
assuming it is edited as part of a larger data form classification object
- To change the key of the json schema then change the get_namespace method
filter_form/schema - an angular schema form element that can be used to hide this filter this field
exclude_form /schema an angular schema form element that can be used to hide this exclude values from this field
sort_form /schema an angular schema form element that can be used to hide this exclude values from this field
Things still to be implemented:
actions form - would be used for mapping functions etc
autocomplete urls
''',
'api_dispatch_list' : '''
Provides data about a single level of a data form config
data_type: A string to describe what "sort" of data this is (fields will generally be the same as other objects of this data type but that is up to the curator)
project_data_fields:
The fields that are in this particular custom field config:
Provides information about the data types present in the flexible schema of the datapoint table
For each field a set of attributes are returned:
hide_form/schema - an angular schema form element that can be used to hide this column from view
edit_form /schema - an angular schema form element that can be used to edit this field
assuming it is edited as part of a larger data form classification object
- To change the key of the json schema then change the get_namespace method
filter_form/schema - an angular schema form element that can be used to hide this filter this field
exclude_form /schema an angular schema form element that can be used to hide this exclude values from this field
sort_form /schema an angular schema form element that can be used to hide this exclude values from this field
Things still to be implemented:
actions form - would be used for mapping functions etc
autocomplete urls
'''
}
def hydrate_created_by(self, bundle):
user = get_user_model().objects.get(pk=bundle.request.user.pk)
bundle.obj.created_by = user
return bundle
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
return self.create_response(request, self.build_schema())
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
rc = response_class(content=serialized, content_type=build_content_type(
desired_format), **response_kwargs)
if(desired_format == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'):
rc['Content-Disposition'] = 'attachment; filename=project_data_explanation.xlsx'
return rc
class ChemregProjectResource( ModelResource):
project_type = fields.ForeignKey(
ProjectTypeResource, 'project_type', blank=False, null=False, full=True)
custom_field_config = fields.ForeignKey(ChemRegCustomFieldConfigResource,
'custom_field_config', blank=False, null=False, full=True)
valid_cache_get_keys = ['format', 'limit', 'project_key',
'schemaform']
class Meta:
queryset = Project.objects.all()
authentication = SessionAuthentication()
paginator_class = Paginator
allowed_methods = ['get']
resource_name = 'cbh_projects'
authorization = ProjectListAuthorization()
include_resource_uri = False
default_format = 'application/json'
# serializer = Serializer()
serializer = CustomFieldsSerializer()
filtering = {'project_key': ALL_WITH_RELATIONS}
def get_object_list(self, request):
return super(ChemregProjectResource,
self).get_object_list(request).prefetch_related(Prefetch('project_type'
)).order_by('-modified')
def prepend_urls(self):
return [url(r"^(?P<resource_name>%s)/custom_fields/$"
% self._meta.resource_name,
self.wrap_view('get_custom_fields'),
name='get_custom_fields')]
def get_custom_fields(self, request):
return super(ChemregProjectResource,
self).get_object_list(request).prefetch_related(Prefetch('custom_field_config'
))
def get_searchform(self, bundle):
'''Note that the form here is expected to have the UOx id as the first item'''
ur = UserResource()
uri = ur.get_resource_uri()
return {
'cf_form': [{
'htmlClass': 'col-sm-10',
'key': 'search_custom_fields__kv_any',
'disableSuccessState': True,
'feedback': False,
'options': {'refreshDelay': 0,
'async': {'url': reverse('api_get_elasticsearch_autocomplete',
kwargs={'resource_name': 'cbh_compound_batches',
'api_name': settings.WEBSERVICES_NAME})}},
}],
'cf_schema': {'required': [], 'type': 'object',
'properties': {'search_custom_fields__kv_any': {
'type': 'array',
'format': 'uiselect',
'items': [],
'placeholder': 'Filter project data',
'title': 'Project data values:',
}}},
'form': [
{
'key': 'related_molregno__chembl__chembl_id__in',
'title': '%s ID' % settings.ID_PREFIX,
'placeholder': 'Search multiple IDs',
'feedback': False,
'htmlClass': 'col-md-6 col-xs-6',
'options': {'refreshDelay': 0,
'async': {'url': reverse('api_get_elasticsearch_ids',
kwargs={'resource_name': 'cbh_compound_batches',
'api_name': settings.WEBSERVICES_NAME})}},
},
{
'key': 'creator_uri',
'htmlClass': 'col-md-6 col-xs-6',
'placeholder': 'Select users to search',
'feedback': False,
},
{
'key': 'project__project_key__in',
'placeholder': 'Select projects to search',
'htmlClass': 'col-md-6 col-xs-6',
'feedback': False,
'description': 'Search for projects in order to limit the choice of fields on show. Select a single project if you want to edit data.',
'disableSuccessState': True,
'validationMessage': {'default': 'Please select a project if you wish to edit data.'}
},
{
'key': 'multiple_batch_id',
'htmlClass': 'col-md-6 col-xs-6',
'disableSuccessState': True,
'feedback': False,
},
{
'key': 'dateStart',
'type': 'datepicker',
'minDate': '2004-01-01',
'htmlClass': 'col-md-6 col-xs-6',
'disableSuccessState': True,
'feedback': False,
'pickadate': {
'selectYears': True,
'selectMonths': True
},
},
{
'key': 'dateEnd',
'type': 'datepicker',
'minDate': '2004-01-01',
'htmlClass': 'col-md-6 col-xs-6',
'disableSuccessState': True,
'feedback': False,
'pickadate': {'selectYears': True,
'selectMonths': True},
},
{
'htmlClass': 'col-md-6 col-xs-6',
'disableSuccessState': True,
'feedback': False,
'key': 'functional_group',
},
{
'key': 'smiles',
'placeholder': 'Search SMILES or SMARTS string',
'append': 'today',
'feedback': False,
'htmlClass': 'col-md-6 col-xs-6',
'disableSuccessState': True,
},
{
'key': 'substruc',
'style': {'selected': 'btn-success',
'unselected': 'btn-default'},
'htmlClass': 'col-md-6 col-xs-6',
'type': 'radiobuttons',
'disableSuccessState': True,
'feedback': False,
'titleMap': [{'value': 'with_substructure',
'name': 'Substructure'},
{'value': 'flexmatch',
'name': 'Exact Match'}],
},
{
'htmlClass': 'col-md-6 col-xs-6',
'key': 'search_custom_fields__kv_any',
'disableSuccessState': True,
'help': 'Searching using this filter will bring back results that match an OR pattern within the same data category, with AND across data categories, i.e. results which contain this item within category a OR that item within category a AND that item within category b.',
'feedback': False,
'options': {'refreshDelay': 0,
'async': {'url': reverse('api_get_list_elasticsearch',
kwargs={'resource_name': 'cbh_compound_batches',
'api_name': settings.WEBSERVICES_NAME})},
},
},
{
'key': 'archived',
'style': {'selected': 'btn-success',
'unselected': 'btn-default'},
'htmlClass': 'col-md-6 col-xs-6',
'type': 'radiobuttons',
'disableSuccessState': True,
'feedback': False,
'titleMap': [
{'value': 'false',
'name': 'Normal mode'},
{'value': 'true',
'name': 'Archive mode'},]
},
],
'schema': {'required': [], 'type': 'object', 'properties': {
'related_molregno__chembl__chembl_id__in': {
'type': 'array',
'format': 'uiselect',
},
'creator_uri': {
'type': 'array',
'format': 'uiselect',
'title': 'Compound batch created by',
'type': 'array',
'format': 'uiselect',
'htmlClass': 'col-md-6 col-xs-6',
'placeholder': 'Search user who created the batch',
'options': {'searchDescriptions': False},
'items': sorted([
{'label': user.first_name + " " + user.last_name , "value" : uri + '/' + str(user.id) } if user.first_name else {'label': user.username , "value" : uri + '/' + str(user.id) }
for user in User.objects.exclude(pk=-1)
], key=lambda k: k['label'])
},
'multiple_batch_id': {'title': 'Upload ID',
'type': 'string'},
'project__project_key__in': {
'title': 'Project',
'type': 'array',
'format': 'uiselect',
'items': [{'label': p.obj.name,
'value': p.obj.project_key} for p in
bundle['objects']],
},
'functional_group': {
'title': 'Functional Group',
'type': 'string',
'format': 'uiselect',
'placeholder': 'Search chemical groups',
'options': {'searchDescriptions': False},
'default': '',
'copyValueTo': 'smiles',
'items': [{'label': 'None', 'value': ''}] + sorted([
{'label': 'Alkyl Carbon', 'value': '[CX4]'},
{'label': 'Allenic Carbon',
'value': '[$([CX2](=C)=C)]'},
{'label': 'Vinylic Carbon',
'value': '[$([CX3]=[CX3])]'},
{'label': 'Acetylenic Carbon',
'value': '[$([CX2]#C)]'},
{'label': 'Arene', 'value': 'c'},
{'label': 'Carbonyl group. Low specificity',
'value': '[CX3]=[OX1]'},
{'label': 'Carbonyl group',
'value': '[$([CX3]=[OX1]),$([CX3+]-[OX1-])]'},
{'label': 'Carbonyl with Carbon',
'value': '[CX3](=[OX1])C'},
{'label': 'Carbonyl with Nitrogen.',
'value': '[OX1]=CN'},
{'label': 'Carbonyl with Oxygen.',
'value': '[CX3](=[OX1])O'},
{'label': 'Acyl Halide',
'value': '[CX3](=[OX1])[F,Cl,Br,I]'},
{'label': 'Aldehyde', 'value': '[CX3H1](=O)[#6]'
},
{'label': 'Anhydride',
'value': '[CX3](=[OX1])[OX2][CX3](=[OX1])'},
{'label': 'Amide',
'value': '[NX3][CX3](=[OX1])[#6]'},
{'label': 'Amidinium',
'value': '[NX3][CX3]=[NX3+]'},
{'label': 'Carbamate.',
'value': '[NX3,NX4+][CX3](=[OX1])[OX2,OX1-]'},
{'label': 'Carbamic ester',
'value': '[NX3][CX3](=[OX1])[OX2H0]'},
{'label': 'Carbamic acid.',
'value': '[NX3,NX4+][CX3](=[OX1])[OX2H,OX1-]'
},
{'label': 'Carboxylate Ion.',
'value': '[CX3](=O)[O-]'},
{'label': 'Carbonic Acid or Carbonic Ester',
'value': '[CX3](=[OX1])(O)O'},
{'label': 'Carbonic Acid or Carbonic Acid-Ester', 'value': '[CX3](=[OX1])([OX2])[OX2H,OX1H0-1]'
},
{'label': 'Carbonic Ester (carbonic acid diester)',
'value': 'C[OX2][CX3](=[OX1])[OX2]C'},
{'label': 'Carboxylic acid',
'value': '[CX3](=O)[OX2H1]'},
{'label': 'Carboxylic acid or conjugate base.',
'value': '[CX3](=O)[OX1H0-,OX2H1]'},
{'label': 'Cyanamide',
'value': '[NX3][CX2]#[NX1]'},
{'label': 'Ester Also hits anhydrides',
'value': '[#6][CX3](=O)[OX2H0][#6]'},
{'label': 'Ketone', 'value': '[#6][CX3](=O)[#6]'
},
{'label': 'Ether', 'value': '[OD2]([#6])[#6]'},
{'label': 'Hydrogen Atom', 'value': '[H]'},
{'label': 'Not a Hydrogen Atom',
'value': '[!#1]'},
{'label': 'Proton', 'value': '[H+]'},
{'label': 'Mono-Hydrogenated Cation',
'value': '[+H]'},
{'label': 'Not Mono-Hydrogenated',
'value': '[!H] or [!H1]'},
{'label': 'Primary or secondary amine, not amide.',
'value': '[NX3;H2,H1;!$(NC=O)]'},
{'label': 'Enamine', 'value': '[NX3][CX3]=[CX3]'
},
{'label': 'Primary amine, not amide.',
'value': "[NX3;H2;!$(NC=[!#6]);!$(NC#[!#6])][#6] Not amide (C not double bonded to a hetero-atom), not ammonium ion (N must be 3-connected), not ammonia (N's H-count can't be 3), not cyanamide (C not triple bonded to a hetero-atom)"
},
{'label': 'Two primary or secondary amines',
'value': '[NX3;H2,H1;!$(NC=O)].[NX3;H2,H1;!$(NC=O)]'
},
{'label': 'Enamine or Aniline Nitrogen',
'value': '[NX3][$(C=C),$(cc)]'},
{'label': 'Azide group.',
'value': '[$(*-[NX2-]-[NX2+]#[NX1]),$(*-[NX2]=[NX2+]=[NX1-])]'
},
{'label': 'Azide ion.',
'value': '[$([NX1-]=[NX2+]=[NX1-]),$([NX1]#[NX2+]-[NX1-2])]'
},
{'label': 'Nitrogen.', 'value': '[#7]'},
{'label': 'Azo Nitrogen. Low specificity.',
'value': '[NX2]=N'},
{'label': 'Azo Nitrogen.diazene',
'value': '[NX2]=[NX2]'},
{'label': 'Azoxy Nitrogen.',
'value': '[$([NX2]=[NX3+]([O-])[#6]),$([NX2]=[NX3+0](=[O])[#6])]'
},
{'label': 'Diazo Nitrogen',
'value': '[$([#6]=[N+]=[N-]),$([#6-]-[N+]#[N])]'
},
{'label': 'Azole.',
'value': '[$([nr5]:[nr5,or5,sr5]),$([nr5]:[cr5]:[nr5,or5,sr5])]'
},
{'label': 'Hydrazine H2NNH2',
'value': '[NX3][NX3]'},
{'label': 'Hydrazone C=NNH2',
'value': '[NX3][NX2]=[*]'},
{'label': 'Substituted imine',
'value': '[CX3;$([C]([#6])[#6]),$([CH][#6])]=[NX2][#6]'
},
{'label': 'Substituted or un-substituted imine',
'value': '[$([CX3]([#6])[#6]),$([CX3H][#6])]=[$([NX2][#6]),$([NX2H])]'
},
{'label': 'Iminium', 'value': '[NX3+]=[CX3]'},
{'label': 'Unsubstituted dicarboximide',
'value': '[CX3](=[OX1])[NX3H][CX3](=[OX1])'},
{'label': 'Substituted dicarboximide',
'value': '[CX3](=[OX1])[NX3H0]([#6])[CX3](=[OX1])'
},
{'label': 'Dicarboxdiimide',
'value': '[CX3](=[OX1])[NX3H0]([NX3H0]([CX3](=[OX1]))[CX3](=[OX1]))[CX3](=[OX1])'
},
{'label': 'Nitrate group',
'value': '[$([NX3](=[OX1])(=[OX1])O),$([NX3+]([OX1-])(=[OX1])O)]'
},
{'label': 'Nitrate Anion',
'value': '[$([OX1]=[NX3](=[OX1])[OX1-]),$([OX1]=[NX3+]([OX1-])[OX1-])]'
},
{'label': 'Nitrile', 'value': '[NX1]#[CX2]'},
{'label': 'Isonitrile', 'value': '[CX1-]#[NX2+]'
},
{'label': 'Nitro group.',
'value': '[$([NX3](=O)=O),$([NX3+](=O)[O-])][!#8] Hits both forms.'
},
{'label': 'Two Nitro groups',
'value': '[$([NX3](=O)=O),$([NX3+](=O)[O-])][!#8].[$([NX3](=O)=O),$([NX3+](=O)[O-])][!#8]'
},
{'label': 'Nitroso-group',
'value': '[NX2]=[OX1]'},
{'label': 'N-Oxide',
'value': '[$([#7+][OX1-]),$([#7v5]=[OX1]);!$([#7](~[O])~[O]);!$([#7]=[#7])]'
},
{'label': 'Hydroxyl', 'value': '[OX2H]'},
{'label': 'Hydroxyl in Alcohol',
'value': '[#6][OX2H]'},
{'label': 'Hydroxyl in Carboxylic Acid',
'value': '[OX2H][CX3]=[OX1]'},
{'label': 'Hydroxyl in H-O-P-',
'value': '[OX2H]P'},
{'label': 'Enol', 'value': '[OX2H][#6X3]=[#6]'
},
{'label': 'Phenol', 'value': '[OX2H][cX3]:[c]'
},
{'label': 'Enol or Phenol',
'value': '[OX2H][$(C=C),$(cc)]'},
{'label': 'Hydroxyl_acidic',
'value': '[$([OH]-*=[!#6])]'},
{'label': 'Peroxide groups.',
'value': '[OX2,OX1-][OX2,OX1-]'},
{'label': 'Phosphoric_acid groups.',
'value': '[$(P(=[OX1])([$([OX2H]),$([OX1-]),$([OX2]P)])([$([OX2H]),$([OX1-]),$([OX2]P)])[$([OX2H]),$([OX1-]),$([OX2]P)]),$([P+]([OX1-])([$([OX2H]),$([OX1-]),$([OX2]P)])([$([OX2H]),$([OX1-]),$([OX2]P)])[$([OX2H]),$([OX1-]),$([OX2]P)])]'
},
{'label': 'Phosphoric_ester groups.',
'value': '[$(P(=[OX1])([OX2][#6])([$([OX2H]),$([OX1-]),$([OX2][#6])])[$([OX2H]),$([OX1-]),$([OX2][#6]),$([OX2]P)]),$([P+]([OX1-])([OX2][#6])([$([OX2H]),$([OX1-]),$([OX2][#6])])[$([OX2H]),$([OX1-]),$([OX2][#6]),$([OX2]P)])]'
},
{'label': 'Carbo-Thiocarboxylate',
'value': '[S-][CX3](=S)[#6]'},
{'label': 'Carbo-Thioester',
'value': 'S([#6])[CX3](=O)[#6]'},
{'label': 'Thio analog of carbonyl',
'value': '[#6X3](=[SX1])([!N])[!N]'},
{'label': 'Thiol, Sulfide or Disulfide Sulfur',
'value': '[SX2]'},
{'label': 'Thiol', 'value': '[#16X2H]'},
{'label': 'Sulfur with at-least one hydrogen.',
'value': '[#16!H0]'},
{'label': 'Thioamide',
'value': '[NX3][CX3]=[SX1]'},
{'label': 'Sulfide', 'value': '[#16X2H0]'},
{'label': 'Mono-sulfide',
'value': '[#16X2H0][!#16]'},
{'label': 'Di-sulfide',
'value': '[#16X2H0][#16X2H0]'},
{'label': 'Two Sulfides',
'value': '[#16X2H0][!#16].[#16X2H0][!#16]'},
{'label': 'Sulfinate',
'value': '[$([#16X3](=[OX1])[OX2H0]),$([#16X3+]([OX1-])[OX2H0])]'
},
{'label': 'Sulfinic Acid',
'value': '[$([#16X3](=[OX1])[OX2H,OX1H0-]),$([#16X3+]([OX1-])[OX2H,OX1H0-])]'
},
{'label': 'Sulfone. Low specificity.',
'value': '[$([#16X4](=[OX1])=[OX1]),$([#16X4+2]([OX1-])[OX1-])]'
},
{'label': 'Sulfone. High specificity.',
'value': '[$([#16X4](=[OX1])(=[OX1])([#6])[#6]),$([#16X4+2]([OX1-])([OX1-])([#6])[#6])]'
},
{'label': 'Sulfonic acid. High specificity.',
'value': '[$([#16X4](=[OX1])(=[OX1])([#6])[OX2H,OX1H0-]),$([#16X4+2]([OX1-])([OX1-])([#6])[OX2H,OX1H0-])]'
},
{'label': 'Sulfonate',
'value': '[$([#16X4](=[OX1])(=[OX1])([#6])[OX2H0]),$([#16X4+2]([OX1-])([OX1-])([#6])[OX2H0])]'
},
{'label': 'Sulfonamide.',
'value': '[$([#16X4]([NX3])(=[OX1])(=[OX1])[#6]),$([#16X4+2]([NX3])([OX1-])([OX1-])[#6])]'
},
{'label': 'Carbo-azosulfone',
'value': '[SX4](C)(C)(=O)=N'},
{'label': 'Sulfonamide',
'value': '[$([SX4](=[OX1])(=[OX1])([!O])[NX3]),$([SX4+2]([OX1-])([OX1-])([!O])[NX3])]'
},
{'label': 'Sulfoxide Low specificity.',
'value': '[$([#16X3]=[OX1]),$([#16X3+][OX1-])]'
},
{'label': 'Sulfoxide High specificity',
'value': '[$([#16X3](=[OX1])([#6])[#6]),$([#16X3+]([OX1-])([#6])[#6])]'
},
{'label': 'Sulfate',
'value': '[$([#16X4](=[OX1])(=[OX1])([OX2H,OX1H0-])[OX2][#6]),$([#16X4+2]([OX1-])([OX1-])([OX2H,OX1H0-])[OX2][#6])]'
},
{'label': 'Sulfuric acid ester (sulfate ester) Low specificity.',
'value': '[$([SX4](=O)(=O)(O)O),$([SX4+2]([O-])([O-])(O)O)]'
},
{'label': 'Sulfuric Acid Diester.',
'value': '[$([#16X4](=[OX1])(=[OX1])([OX2][#6])[OX2][#6]),$([#16X4](=[OX1])(=[OX1])([OX2][#6])[OX2][#6])]'
},
{'label': 'Sulfamate.',
'value': '[$([#16X4]([NX3])(=[OX1])(=[OX1])[OX2][#6]),$([#16X4+2]([NX3])([OX1-])([OX1-])[OX2][#6])]'
},
{'label': 'Sulfamic Acid.',
'value': '[$([#16X4]([NX3])(=[OX1])(=[OX1])[OX2H,OX1H0-]),$([#16X4+2]([NX3])([OX1-])([OX1-])[OX2H,OX1H0-])]'
},
{'label': 'Sulfenic acid.',
'value': '[#16X2][OX2H,OX1H0-]'},
{'label': 'Sulfenate.',
'value': '[#16X2][OX2H0]'},
{'label': 'Any carbon attached to any halogen',
'value': '[#6][F,Cl,Br,I]'},
{'label': 'Halogen', 'value': '[F,Cl,Br,I]'},
{'label': 'Sulfide', 'value': '[#16X2H0]'},
{'label': 'Mono-sulfide',
'value': '[#16X2H0][!#16]'},
{'label': 'Di-sulfide',
'value': '[#16X2H0][#16X2H0]'},
{'label': 'Hydrogen-bond acceptor',
'value': '[!$([#6,F,Cl,Br,I,o,s,nX3,#7v5,#15v5,#16v4,#16v6,*+1,*+2,*+3])]'
},
{'label': 'Hydrogen-bond donor.',
'value': '[!$([#6,H0,-,-2,-3])]'},
], key=lambda k: k['label']),
},
'dateStart': {
'title': 'Added after',
'type': 'string',
'format': 'date',
'style': {'margin-right': '30px;'},
},
'dateEnd': {'title': 'Added before', 'type': 'string',
'format': 'date'},
'smiles': {'title': 'SMILES or SMARTS',
'type': 'string'},
'substruc': {
'title': 'Structural search type',
'type': 'string',
'enum': ['with_substructure', 'flexmatch'],
'default': 'with_substructure',
},
'search_custom_fields__kv_any': {
'type': 'array',
'format': 'uiselect',
'items': [],
'placeholder': 'Choose column and value...',
'title': 'Filter by project data values:',
},
'archived': {
'title': 'Enable/disable archive mode',
'type': 'string',
'enum': ['true', 'false'],
'default': 'false',
},
}},
}
def alter_list_data_to_serialize(self, request, bundle):
'''Here we append a list of tags to the data of the GET request if the
search fields are required'''
userres = UserResource()
userbundle = userres.build_bundle(obj=request.user,
request=request)
userbundle = userres.full_dehydrate(userbundle)
bundle['user'] = userbundle.data
editor_projects = \
self._meta.authorization.editor_projects(request)
for bun in bundle['objects']:
bun.data['editor'] = bun.obj.id in editor_projects
if request.GET.get('schemaform', None):
searchfields = set([])
searchfield_items = []
for bun in bundle['objects']:
bun.data['editor'] = bun.obj.id in editor_projects
bundle['searchform'] = self.get_searchform(bundle,
)
# if self.determine_format(request) \
# == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' \
# or request.GET.get('format') == 'xls':
# cfr_string = \
# self.get_object_list(request).filter(id=request.GET.get('project_key'
# ))[0].custom_field_config.schemaform
# cfr_json = json.loads(cfr_string)
# bundle['custom_field_config'] = cfr_json['form']
return bundle
def create_response(
self,
request,
data,
response_class=HttpResponse,
**response_kwargs ):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
rc = response_class(content=serialized,
content_type=build_content_type(desired_format),
**response_kwargs)
if desired_format \
== 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet':
rc['Content-Disposition'] = \
'attachment; filename=project_data_explanation.xlsx'
return rc
|
{
"content_hash": "7f438ef5a69b9a1cd0b356569c9cc040",
"timestamp": "",
"source": "github",
"line_count": 877,
"max_line_length": 290,
"avg_line_length": 47.66020524515393,
"alnum_prop": 0.4560266041437389,
"repo_name": "strets123/cbh_chembl_ws_extension",
"id": "41e6f8adc0e99580f6ce7ccbd2f27930bc9fc205",
"size": "41840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cbh_chembl_ws_extension/projects.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "33048"
},
{
"name": "HTML",
"bytes": "9278"
},
{
"name": "Makefile",
"bytes": "1286"
},
{
"name": "Python",
"bytes": "192416"
}
],
"symlink_target": ""
}
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
from gremlin_python.process.graph_traversal import GraphTraversalSource
from gremlin_python.process.traversal import TraversalStrategies
class Graph(object):
def __init__(self):
if self.__class__ not in TraversalStrategies.global_cache:
TraversalStrategies.global_cache[self.__class__] = TraversalStrategies()
def traversal(self):
return GraphTraversalSource(self, TraversalStrategies.global_cache[self.__class__])
def __repr__(self):
return "graph[empty]"
class Element(object):
def __init__(self, id, label):
self.id = id
self.label = label
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash(self.id)
class Vertex(Element):
def __init__(self, id, label="vertex"):
Element.__init__(self, id, label)
def __repr__(self):
return "v[" + str(self.id) + "]"
class Edge(Element):
def __init__(self, id, outV, label, inV):
Element.__init__(self, id, label)
self.outV = outV
self.inV = inV
def __repr__(self):
return "e[" + str(self.id) + "][" + str(self.outV.id) + "-" + self.label + "->" + str(self.inV.id) + "]"
class VertexProperty(Element):
def __init__(self, id, label, value):
Element.__init__(self, id, label)
self.value = value
self.key = self.label
def __repr__(self):
return "vp[" + str(self.label) + "->" + str(self.value)[0:20] + "]"
class Property(object):
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return "p[" + str(self.key) + "->" + str(self.value)[0:20] + "]"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.key == other.key and self.value == other.value
def __hash__(self):
return hash(self.key) + hash(self.value)
class Path(object):
def __init__(self, labels, objects):
self.labels = labels
self.objects = objects
def __repr__(self):
return str(self.objects)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.objects == other.objects and self.labels == other.labels
def __hash__(self):
return hash(str(self.objects)) + hash(str(self.labels))
def __getitem__(self, key):
if isinstance(key, str):
objects = []
for i, labels in enumerate(self.labels):
if key in labels:
objects.append(self.objects[i])
if 0 == len(objects):
raise KeyError("The step with label " + key + " does not exist")
return objects if len(objects) > 1 else objects[0]
elif isinstance(key, int):
return self.objects[key]
else:
raise TypeError("The path access key must be either a string label or integer index")
def __len__(self):
return len(self.objects)
|
{
"content_hash": "516c3359a9239501509afa6e315679cf",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 114,
"avg_line_length": 31.401639344262296,
"alnum_prop": 0.6188984599321325,
"repo_name": "samiunn/incubator-tinkerpop",
"id": "d77cde1daa2c10a157b07211340b917ffb8d4098",
"size": "3831",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/jython/gremlin_python/structure/graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "316683"
},
{
"name": "Java",
"bytes": "5483560"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "15070"
}
],
"symlink_target": ""
}
|
import logging
import os
from urllib import parse
from wsgiref.util import FileWrapper
from django.http import HttpResponse, FileResponse, StreamingHttpResponse
from django.shortcuts import render
from django.conf import settings
from speech.models import Record
from speech.forms import FeedbackForm
from speech import config
def index(request):
context = {
'records': Record.objects.filter(error='')[:: -1][: config.AMOUNT_OF_RECORDS_TO_DISPLAY],
'max_record_length': config.MAX_RECORD_LENGTH,
}
return render(request, 'speech/index.html', context=context)
def contacts(request):
def accepted(request):
return render(request, 'speech/accepted.html')
form = FeedbackForm()
if request.method == 'POST':
form = FeedbackForm(request.POST)
if form.is_valid():
form.save()
return accepted(request)
context = {
'form': form,
}
return render(request, 'speech/contacts.html', context=context)
def talk(request):
if request.method == 'GET' and 'text' in request.GET.keys():
message = parse.unquote(request.GET['text'])
record = Record(text=message[: config.MAX_RECORD_LENGTH])
try:
if any(not symbol in config.ALLOWED_SYMBOLS for symbol in message):
raise ValueError('unexpected symbol')
if len(message) > config.MAX_RECORD_LENGTH:
raise ValueError('value is too long')
filename = os.path.join(settings.MEDIA_DIR, 'kadavr.mp3')
file = open(filename, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'audio/vnd.wave'
response['Content-Disposition'] = 'attachment; filename=sound.wav'
response['Content-Length'] = os.path.getsize(filename)
record.save()
return response
except BaseException as error:
record.error = error
record.save()
return error500(request)
else:
return error500(request)
def error(request, reason="", status=None):
context = {
'status': status,
}
return render(request, 'speech/error.html', context=context, status=status if status else 200)
def error400(*args, **kwargs):
return error(*args, **kwargs, status=400)
def error403(*args, **kwargs):
return error(*args, **kwargs, status=403)
def error404(*args, **kwargs):
return error(*args, **kwargs, status=404)
def error500(*args, **kwargs):
return error(*args, **kwargs, status=500)
|
{
"content_hash": "872375e5722b274453383a983de7208d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 98,
"avg_line_length": 28.797752808988765,
"alnum_prop": 0.6379243074522044,
"repo_name": "Lvadislav/russian-speech-site",
"id": "0ba7e69515d59d4c6bf9fb80c9259b7751d28ec8",
"size": "2563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "speech/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "172012"
},
{
"name": "HTML",
"bytes": "6577"
},
{
"name": "JavaScript",
"bytes": "309766"
},
{
"name": "Python",
"bytes": "24289"
}
],
"symlink_target": ""
}
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
class VanCafeSpider(BaseSpider):
name = 'van-cafe.com'
allowed_domains = ['www.van-cafe.com']
start_urls = ('http://www.van-cafe.com/',)
def __init__(self, *args, **kwargs):
super(VanCafeSpider, self).__init__(*args, **kwargs)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
categories = hxs.select('//div[@id="midnav"]/div[@class="midnavlinks"]//a/@href').extract()
for url in categories:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# subcategories
subcategories = hxs.select('//td[not(child::a[not(child::img)])]/span[@class="cellheader"]//a/@href').extract()
for url in subcategories:
url = urljoin_rfc(get_base_url(response), url)
yield Request(url)
# pages
next_page = hxs.select('//span[@class="nextprev"]/a[contains(text(),"Next")]/@href').extract()
if next_page:
url = urljoin_rfc(get_base_url(response), next_page[0])
yield Request(url)
# products
for product in self.parse_product(response):
yield product
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# products
products = hxs.select('//div[@id="mtbody"]//table//table//a/img/../..')
for product in products:
product_loader = ProductLoader(item=Product(), selector=product)
#product_loader.add_xpath('name', './/span[@class="cellheader"]/a/text()')
product_loader.add_xpath('price', './/span[@class="pricetext"]/text()',
re='.*\$(.*[0-9])')
sku = product.select('.//span[@class="sku"]/text()').extract()
if not sku:
continue
sku = re.sub('[.\- ]', '', sku[0])
product_loader.add_value('sku', sku)
if sku:
product_loader.add_value('name', sku.lower())
else:
product_loader.add_xpath('name', './/span[@class="cellheader"]/a/text()')
url = product.select('.//span[@class="cellheader"]/a/@href').extract()
if not url:
continue
url = urljoin_rfc(get_base_url(response), url[0])
product_loader.add_value('url', url)
yield product_loader.load_item()
|
{
"content_hash": "9c50ed29005fb93a22ff80e565fe9928",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 119,
"avg_line_length": 35.6125,
"alnum_prop": 0.5752895752895753,
"repo_name": "0--key/lib",
"id": "9380595f48ca521584cdf876555260ede2d42b92",
"size": "2849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "portfolio/Python/scrapy/vanparts/vancafe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28210"
},
{
"name": "Emacs Lisp",
"bytes": "76390"
},
{
"name": "HTML",
"bytes": "1136671"
},
{
"name": "JavaScript",
"bytes": "27718"
},
{
"name": "PHP",
"bytes": "378537"
},
{
"name": "Python",
"bytes": "1892998"
},
{
"name": "Shell",
"bytes": "4030"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ReceiptLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveIntegerField(default=0, choices=[(0, b'Error'), (1, b'Completed')])),
('started', models.DateTimeField(help_text=b'The time the script started processing', auto_now=True)),
('run_time', models.PositiveIntegerField(help_text=b'Time the script took to complete in seconds', null=True, blank=True)),
('query_from', models.DateTimeField(null=True, blank=True)),
('query_to', models.DateTimeField(null=True, blank=True)),
('total_emails', models.PositiveIntegerField(default=0, help_text=b'Number of entries received from the API')),
('total_errors', models.PositiveIntegerField(default=0, help_text=b'Number of entries that were not processable')),
('total_failed', models.PositiveIntegerField(default=0, help_text=b'Number of entries recorded as failed by the PA')),
('total_success', models.PositiveIntegerField(default=0, help_text=b'Number of entries that the PA indicated were successfully processed')),
('status_detail', models.TextField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "376fe190418a4b711193715ffaa0db04",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 156,
"avg_line_length": 52.41935483870968,
"alnum_prop": 0.6221538461538462,
"repo_name": "ministryofjustice/manchester_traffic_offences_pleas",
"id": "54d558f5ae111a94de535c88d6c1d1a19bc1fa83",
"size": "1649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/receipt/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "867"
},
{
"name": "Gherkin",
"bytes": "10122"
},
{
"name": "HTML",
"bytes": "184454"
},
{
"name": "JavaScript",
"bytes": "52955"
},
{
"name": "Python",
"bytes": "792658"
},
{
"name": "SCSS",
"bytes": "43568"
},
{
"name": "Shell",
"bytes": "1766"
}
],
"symlink_target": ""
}
|
__author__ = 'wcong'
'''
this is node exception
'''
|
{
"content_hash": "81e76e0a14cd9865f471facb6fe94bd3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 22,
"avg_line_length": 10.6,
"alnum_prop": 0.5660377358490566,
"repo_name": "wcong/ants",
"id": "c30c4d8fa0ae27635fe7368d2ab49d8fe32e45a6",
"size": "69",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ants/node/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "721345"
},
{
"name": "Shell",
"bytes": "1644"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from gocd_cli.command import BaseCommand
from gocd_cli.utils import get_settings
__all__ = ['Decrypt', 'Encrypt']
class BaseEncryptionCommand(object):
_encryption_module = None
_settings = None
@property
def settings(self):
if self._settings is None:
self._settings = get_settings()
return self._settings
@property
def encryption_module(self):
if self._encryption_module is None:
mod = self.settings.get('encryption_module') or 'gocd_cli.encryption.caesar'
self._encryption_module = __import__(mod, fromlist=('',))
return self._encryption_module
class Encrypt(BaseCommand, BaseEncryptionCommand):
usage = """One of either plaintext or key can be passed in
Flags:
plaintext: A string to encrypt
key: A configuration key from the settings file
"""
usage_summary = 'Encrypts the passed in plaintext or key to ciphertext'
def __init__(self, server, plaintext=None, key=None):
self.server = server
self._plaintext = plaintext
self._key = key
@property
def plaintext(self):
if self._key:
return self.settings.get(self._key)
else:
return self._plaintext
def label(self):
if self._key:
return '{0}_encrypted'.format(self._key.replace('_encrypted', ''))
else:
return 'Ciphertext'
def run(self):
ciphertext = self.encryption_module.encrypt(self.plaintext)
return self._return_value('{0}\n{1}'.format(
'encryption_module = {0}'.format(self.encryption_module.__name__),
'{0} = {1}'.format(self.label(), ciphertext)
), exit_code=0)
class Decrypt(BaseCommand, BaseEncryptionCommand):
usage = """One of either ciphertext or key can be passed in
Flags:
ciphertext: A string to decrypt
key: A configuration key from the settings file
"""
usage_summary = 'Decrypts the passed in ciphertext or key to plaintext'
def __init__(self, server, ciphertext=None, key=None):
self.server = server
self._ciphertext = ciphertext
self._key = key
@property
def ciphertext(self):
if self._key:
return self.settings.get(self._key)
else:
return self._ciphertext
def label(self):
if self._key:
return self._key.replace('_encrypted', '')
else:
return 'Plaintext'
def run(self):
plaintext = self.encryption_module.decrypt(self.ciphertext)
return self._return_value('{0}\n{1}'.format(
'encryption_module = {0}'.format(self.encryption_module.__name__),
'{0} = {1}'.format(self.label(), plaintext),
), exit_code=0)
|
{
"content_hash": "87df21f41fc3cd0e09b5e767a511fa13",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 88,
"avg_line_length": 29.520833333333332,
"alnum_prop": 0.6047988708539167,
"repo_name": "gaqzi/gocd-cli",
"id": "3d22f760c575763a9ccabb053a879899acea514f",
"size": "2834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gocd_cli/commands/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "60409"
}
],
"symlink_target": ""
}
|
"""Tests for the worker."""
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=unused-import,g-bad-import-order
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flags
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import rdfvalue
from grr.lib import scheduler
from grr.lib import test_lib
from grr.lib import worker
# A global collector for test results
RESULTS = []
class WorkerSendingTestFlow(flow.GRRFlow):
"""Tests that sent messages are correctly collected."""
@flow.StateHandler(next_state="Incoming")
def Start(self):
for i in range(10):
self.CallClient("Test",
rdfvalue.DataBlob(string="test%s" % i),
data=str(i),
next_state="Incoming")
@flow.StateHandler(auth_required=False)
def Incoming(self, responses):
# Add a delay here to catch thread races.
time.sleep(0.2)
# We push the result into a global array so we can examine it
# better.
for response in responses:
RESULTS.append(response.string)
class WorkerSendingTestFlow2(WorkerSendingTestFlow):
"""Only send a single request."""
@flow.StateHandler(next_state="Incoming")
def Start(self):
i = 1
self.CallClient("Test",
rdfvalue.DataBlob(string="test%s" % i),
data=str(i),
next_state="Incoming")
class WorkerSendingWKTestFlow(flow.WellKnownFlow):
well_known_session_id = rdfvalue.SessionID(
"aff4:/flows/WorkerSendingWKTestFlow")
def ProcessMessage(self, message):
RESULTS.append(message)
class GrrWorkerTest(test_lib.FlowTestsBaseclass):
"""Tests the GRR Worker."""
def SendResponse(self, session_id, data, client_id=None, send_status=True):
if not isinstance(data, rdfvalue.RDFValue):
data = rdfvalue.DataBlob(string=data)
with flow_runner.FlowManager(token=self.token) as flow_manager:
flow_manager.QueueResponse(session_id, rdfvalue.GrrMessage(
source=client_id,
session_id=session_id,
payload=data,
request_id=1, response_id=1))
if send_status:
flow_manager.QueueResponse(session_id, rdfvalue.GrrMessage(
source=client_id,
session_id=session_id,
payload=rdfvalue.GrrStatus(
status=rdfvalue.GrrStatus.ReturnedStatus.OK),
request_id=1, response_id=2,
type=rdfvalue.GrrMessage.Type.STATUS))
# Signal on the worker queue that this flow is ready.
data_store.DB.Set(worker.DEFAULT_WORKER_QUEUE,
"task:%s" % session_id, "X", token=self.token)
def testProcessMessages(self):
"""Test processing of several inbound messages."""
worker_obj = worker.GRRWorker(worker.DEFAULT_WORKER_QUEUE,
run_cron=False, token=self.token)
# Create a couple of flows
flow_obj = self.FlowSetup("WorkerSendingTestFlow")
session_id_1 = flow_obj.session_id
flow_obj.Close()
flow_obj = self.FlowSetup("WorkerSendingTestFlow2")
session_id_2 = flow_obj.session_id
flow_obj.Close()
# Check that client queue has messages
tasks_on_client_queue = scheduler.SCHEDULER.Query(
self.client_id, 100, token=self.token)
# should have 10 requests from WorkerSendingTestFlow and 1 from
# SendingTestFlow2
self.assertEqual(len(tasks_on_client_queue), 11)
# Send each of the flows a repeated message
self.SendResponse(session_id_1, "Hello1")
self.SendResponse(session_id_2, "Hello2")
self.SendResponse(session_id_1, "Hello1")
self.SendResponse(session_id_2, "Hello2")
# Clear the results global
del RESULTS[:]
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
# Ensure both requests ran exactly once
RESULTS.sort()
self.assertEqual(2, len(RESULTS))
self.assertEqual("Hello1", RESULTS[0])
self.assertEqual("Hello2", RESULTS[1])
# Check that client queue is cleared - should have 2 less messages (since
# two were completed).
tasks_on_client_queue = scheduler.SCHEDULER.Query(
self.client_id, 100, token=self.token)
self.assertEqual(len(tasks_on_client_queue), 9)
# Ensure that processed requests are removed from state subject
self.assertEqual((None, 0), data_store.DB.Resolve(
flow_runner.FlowManager.FLOW_STATE_TEMPLATE % session_id_1,
flow_runner.FlowManager.FLOW_REQUEST_TEMPLATE % 1,
token=self.token))
flow_obj = aff4.FACTORY.Open(session_id_1, token=self.token)
self.assertTrue(flow_obj.state.context.state !=
rdfvalue.Flow.State.TERMINATED)
flow_obj = aff4.FACTORY.Open(session_id_2, token=self.token)
self.assertTrue(flow_obj.state.context.state ==
rdfvalue.Flow.State.TERMINATED)
def testProcessMessagesWellKnown(self):
worker_obj = worker.GRRWorker(worker.DEFAULT_WORKER_QUEUE,
run_cron=False, token=self.token)
# Send a message to a WellKnownFlow - ClientStatsAuto.
client_id = rdfvalue.ClientURN("C.1100110011001100")
self.SendResponse(rdfvalue.SessionID("aff4:/flows/W:Stats"),
data=rdfvalue.ClientStats(RSS_size=1234),
client_id=client_id,
send_status=False)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
stats = client.Get(client.Schema.STATS)
self.assertEqual(stats.RSS_size, 1234)
# Make sure no notifications have been sent.
user = aff4.FACTORY.Open("aff4:/users/%s" % self.token.username,
token=self.token)
notifications = user.Get(user.Schema.PENDING_NOTIFICATIONS)
self.assertIsNone(notifications)
def CheckNotificationsDisappear(self, session_id):
worker_obj = worker.GRRWorker(worker.DEFAULT_WORKER_QUEUE,
run_cron=False, token=self.token)
scheduler.SCHEDULER.NotifyQueue(session_id, token=self.token)
sessions = scheduler.SCHEDULER.GetSessionsFromQueue("aff4:/W",
token=self.token)
# Check the notification is there.
self.assertEqual(len(sessions), 1)
self.assertEqual(sessions[0], session_id)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
sessions = scheduler.SCHEDULER.GetSessionsFromQueue("aff4:/W",
token=self.token)
# Check the notification is now gone.
self.assertEqual(len(sessions), 0)
def testWorkerDeletesNotificationsForBrokenObjects(self):
# Test notifications for objects that don't exist.
session_id = rdfvalue.SessionID("aff4:/flows/W:123456")
self.CheckNotificationsDisappear(session_id)
# Now check objects that are actually broken.
session_id = rdfvalue.SessionID("aff4:/flows/W:testobj")
obj = aff4.FACTORY.Create(session_id, "GRRFlow", token=self.token)
obj.Close()
# Overwrite the type of the object such that opening it will now fail.
data_store.DB.Set(session_id, "aff4:type", "DeprecatedClass",
token=self.token)
# Check it really does.
with self.assertRaises(aff4.InstanciationError):
aff4.FACTORY.Open(session_id, token=self.token)
self.CheckNotificationsDisappear(session_id)
def main(_):
test_lib.main()
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "1e172939535df0e7afe003ca4173d394",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 77,
"avg_line_length": 34.183035714285715,
"alnum_prop": 0.6610944234034217,
"repo_name": "MiniSEC/GRR_clone",
"id": "8a3b427de90ad77c7b3d15dab5012330ec7d2b57",
"size": "7729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/worker_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55093"
},
{
"name": "CSS",
"bytes": "153862"
},
{
"name": "JavaScript",
"bytes": "633797"
},
{
"name": "Python",
"bytes": "2863055"
},
{
"name": "Shell",
"bytes": "7959"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2013 Niko Skrypnik
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Object3D class
=============
"""
from kivy.properties import NumericProperty, ListProperty, ObjectProperty, \
AliasProperty
from kivy.graphics import Scale, Rotate, PushMatrix, PopMatrix, Translate, \
UpdateNormalMatrix
from kivy.graphics.instructions import InstructionGroup
from kivy.event import EventDispatcher
from kivy3.math.vectors import Vector3
class Object3D(EventDispatcher):
"""Base class for all 3D objects in rendered
3D world.
"""
def __init__(self, **kw):
super(Object3D, self).__init__(**kw)
self.name = kw.pop('name', '')
self.children = list()
self.parent = None
self._scale = Scale(1., 1., 1.)
self._position = Vector3(0, 0, 0)
self._rotation = Vector3(0, 0, 0)
self._position.set_change_cb(self.on_pos_changed)
self._rotation.set_change_cb(self.on_angle_change)
# general instructions
self._pop_matrix = PopMatrix()
self._push_matrix = PushMatrix()
self._translate = Translate(*self._position)
self._rotors = {
"x": Rotate(self._rotation.x, 1, 0, 0),
"y": Rotate(self._rotation.y, 0, 1, 0),
"z": Rotate(self._rotation.z, 0, 0, 1),
}
self._instructions = InstructionGroup()
def add(self, *objs):
for obj in objs:
self._add_child(obj)
def _add_child(self, obj):
self.children.append(obj)
obj.parent = self
def _set_position(self, val):
if isinstance(val, Vector3):
self._position = val
else:
self._position = Vector3(val)
self._position.set_change_cb(self.on_pos_changed)
def _get_position(self):
return self._position
position = AliasProperty(_get_position, _set_position)
pos = position # just shortcut
def _set_rotation(self, val):
if isinstance(val, Vector3):
self._rotation = val
else:
self._rotation = Vector3(val)
self._rotation.set_change_cb(self.on_angle_change)
self._rotors["x"].angle = self._rotation.x
self._rotors["y"].angle = self._rotation.y
self._rotors["z"].angle = self._rotation.z
def _get_rotation(self):
return self._rotation
rotation = AliasProperty(_get_rotation, _set_rotation)
rot = rotation
def _set_scale(self, val):
if isinstance(val, Scale):
self._scale = val
else:
self._scale = Scale(*val)
def _get_scale(self):
return self._scale
scale = AliasProperty(_get_scale, _set_scale)
def on_pos_changed(self, coord, v):
""" Some coordinate was changed """
self._translate.xyz = self._position
def on_angle_change(self, axis, angle):
self._rotors[axis].angle = angle
def as_instructions(self):
""" Get instructions set for renderer """
if not self._instructions.children:
self._instructions.add(self._push_matrix)
self._instructions.add(self._translate)
self._instructions.add(self.scale)
for rot in self._rotors.itervalues():
self._instructions.add(rot)
self._instructions.add(UpdateNormalMatrix())
for instr in self.custom_instructions():
self._instructions.add(instr)
for child in self.get_children_instructions():
self._instructions.add(child)
self._instructions.add(self._pop_matrix)
return self._instructions
def custom_instructions(self):
""" Should be overriden in subclasses to provide some extra
instructions
"""
return []
def get_children_instructions(self):
for child in self.children:
yield child.as_instructions()
|
{
"content_hash": "6cb64ae256926c8e636aeab16cb6564d",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 33.446666666666665,
"alnum_prop": 0.6252740681682281,
"repo_name": "nskrypnik/kivy3",
"id": "de9080efeea943009f449bcecef390ff3e471e4b",
"size": "5017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy3/core/object3d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "1441"
},
{
"name": "Python",
"bytes": "55547"
}
],
"symlink_target": ""
}
|
'''
Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted.
'''
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import NPCcoin_FakeStakeTest
from time import sleep
class zPoSFakeStakeAccepted(NPCcoin_FakeStakeTest):
def set_test_params(self):
''' Setup test environment
:param:
:return:
'''
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net', '-znpcstake']] * self.num_nodes
def run_test(self):
self.description = "Performs the same check as in Test_02 verifying that zPoS forked blocks that stake a zerocoin which is spent on mainchain on an higher block are still accepted."
self.init_test()
DENOM_TO_USE = 1000 # zc denomination
INITAL_MINED_BLOCKS = 321
MORE_MINED_BLOCKS = 301
FORK_DEPTH = 75
self.NUM_BLOCKS = 2
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 20:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
sleep(1)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 200 more blocks...")
self.node.generate(200)
sleep(2)
# 5) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
self.log.info("Mining 6 more blocks...")
self.node.generate(6)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
utxo_list = self.node.listunspent()
sleep(1)
# 7) Create valid forked zPoS blocks and send them
self.log.info("Creating stake zPoS blocks...")
err_msgs = self.test_spam("Fork", mints, spending_utxo_list=utxo_list, fZPoS=True, fRandomHeight=True, randomRange=FORK_DEPTH, randomRange2=50, fMustPass=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStakeAccepted().main()
|
{
"content_hash": "6ab5bcf306f847d9ff03e0fb1ff0dcda",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 189,
"avg_line_length": 37.464285714285715,
"alnum_prop": 0.5955672068636797,
"repo_name": "npccoin/npccoin",
"id": "b10c9ed797670cd867c7d0a1b8fe15baa93772f2",
"size": "4401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/p2p_zpos_fakestake_accepted.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1380148"
},
{
"name": "C++",
"bytes": "5979863"
},
{
"name": "CMake",
"bytes": "41802"
},
{
"name": "CSS",
"bytes": "44351"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "201247"
},
{
"name": "Makefile",
"bytes": "118299"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "6638"
},
{
"name": "Python",
"bytes": "982139"
},
{
"name": "QMake",
"bytes": "26270"
},
{
"name": "Shell",
"bytes": "55255"
}
],
"symlink_target": ""
}
|
import codecs
from invoke.vendor.six.moves.queue import Queue
from invoke.vendor.six.moves import zip_longest
from invoke.util import ExceptionHandlingThread
from pytest import skip
from fabric import Connection
_words = "/usr/share/dict/words"
def _worker(queue, cxn, start, num_words, count, expected):
tail = num_words - start
cmd = "tail -n {} {} | head -n {}".format(tail, _words, count)
stdout = cxn.run(cmd, hide=True).stdout
result = [x.strip() for x in stdout.splitlines()]
queue.put((cxn, result, expected))
class concurrency:
# TODO: still useful to use Group API here? Where does this responsibility
# fall between Group and Executor (e.g. phrasing this specifically as a
# generic subcase of Invoke level task parameterization)?
# TODO: spin up multiple temp SSHDs / Paramiko servers / ???
def setup(self):
cxn1 = Connection("localhost")
cxn2 = Connection("localhost")
cxn3 = Connection("localhost")
self.cxns = (cxn1, cxn2, cxn3)
def connections_objects_do_not_share_connection_state(self):
cxn1, cxn2, cxn3 = self.cxns
[x.open() for x in self.cxns]
# Prove no exterior connection caching, socket reuse, etc
# NOTE: would phrase these as chained 'is not' but pep8 linter is being
# stupid :(
assert cxn1 is not cxn2
assert cxn2 is not cxn3
assert cxn1.client is not cxn2.client
assert cxn2.client is not cxn3.client
ports = [x.transport.sock.getsockname()[1] for x in self.cxns]
assert ports[0] is not ports[1] is not ports[2]
def manual_threading_works_okay(self):
# TODO: needs https://github.com/pyinvoke/invoke/issues/438 fixed
# before it will reliably pass
skip()
# Kind of silly but a nice base case for "how would someone thread this
# stuff; and are there any bizarre gotchas lurking in default
# config/context/connection state?"
# Specifically, cut up the local (usually 100k's long) words dict into
# per-thread chunks, then read those chunks via shell command, as a
# crummy "make sure each thread isn't polluting things like stored
# stdout" sanity test
queue = Queue()
# TODO: skip test on Windows or find suitable alternative file
with codecs.open(_words, encoding="utf-8") as fd:
data = [x.strip() for x in fd.readlines()]
threads = []
num_words = len(data)
chunksize = len(data) / len(self.cxns) # will be an int, which is fine
for i, cxn in enumerate(self.cxns):
start = i * chunksize
end = max([start + chunksize, num_words])
chunk = data[start:end]
kwargs = dict(
queue=queue,
cxn=cxn,
start=start,
num_words=num_words,
count=len(chunk),
expected=chunk,
)
thread = ExceptionHandlingThread(target=_worker, kwargs=kwargs)
threads.append(thread)
for t in threads:
t.start()
for t in threads:
t.join(5) # Kinda slow, but hey, maybe the test runner is hot
while not queue.empty():
cxn, result, expected = queue.get(block=False)
for resultword, expectedword in zip_longest(result, expected):
err = u"({2!r}, {3!r}->{4!r}) {0!r} != {1!r}".format(
resultword, expectedword, cxn, expected[0], expected[-1]
)
assert resultword == expectedword, err
|
{
"content_hash": "20668ef28d1e39bc86d3a3d3d99a3f30",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 39.824175824175825,
"alnum_prop": 0.6067880794701986,
"repo_name": "fabric/fabric",
"id": "99c80092eb29079ade4638ba98be419beb99d1dc",
"size": "3624",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "integration/concurrency.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "295293"
}
],
"symlink_target": ""
}
|
import nltk
import re
def __parse_hashtags(t):
'''
Get any hashtags from the hashtags entity.
'''
if t.get('entities') is not None:
return [h['text'].lower() for h in t['entities']['hashtags']]
else:
return []
def __parse_links(t):
'''
Get any links from the urls entity.
'''
if t.get('entities') is not None:
return [u['expanded_url'] for u in t['entities']['urls']]
else:
return []
def __parse_mentions(t):
'''
Get any mentions from the user_mentions entity.
'''
if t.get('entities') is not None:
return [m['screen_name'].lower() for m in t['entities']['user_mentions']]
else:
return []
def __parse_phrases(t, count):
ngrams = nltk.util.ngrams(t['text'].lower().split(), count)
return [' '.join(ngram) for ngram in ngrams]
def __parse_time(timestamp):
'''
Get the timestamp for the tweet, remove the seconds and minutes, and
store the timestamp for analysis.
'''
m = re.search(r'(\d\d:\d\d:\d\d \+\d\d\d\d)', timestamp)
if m is not None:
return re.sub(r':\d\d:\d\d ', ':00:00 ', m.group(1))
else:
return None
def __parse_place(place):
'''
Capture the place id, country, and full_name.
'''
if place is not None:
return (place['id'], place['country'], place['full_name'])
else:
return None
def parse_tweets(tweets):
analysis = {'hashtags': [], 'mentions': [], 'links': [], 'phrase3': [],
'phrase4': [], 'phrase5': [], 'times': [], 'places': []}
for tweet in tweets:
# ht, mt, li = __parse_words(tweet)
analysis['hashtags'].extend(__parse_hashtags(tweet))
analysis['mentions'].extend(__parse_mentions(tweet))
analysis['links'].extend(__parse_links(tweet))
analysis['phrase3'].extend(__parse_phrases(tweet, 3))
analysis['phrase4'].extend(__parse_phrases(tweet, 4))
analysis['phrase5'].extend(__parse_phrases(tweet, 5))
time = __parse_time(tweet['created_at'])
if time is not None:
analysis['times'].append(time)
place = __parse_place(tweet['place'])
if place is not None:
analysis['places'].append(place)
return analysis
|
{
"content_hash": "a05cf4818bb114dc3f3ad85a688c5347",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 81,
"avg_line_length": 27.70731707317073,
"alnum_prop": 0.565580985915493,
"repo_name": "averagesecurityguy/twanalyze",
"id": "5e21690f15faddd2c1db6031d0631a005f5eb3d8",
"size": "2272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twanalyze/parse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19019"
}
],
"symlink_target": ""
}
|
PLAYER = ['/usr/bin/mpv']
# Respect default terminal colors in curses interface.
DEFAULT_TERMINAL_COLORS = False
|
{
"content_hash": "391c28213352c5d6b0a5b5f819cc2a1a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 54,
"avg_line_length": 28.5,
"alnum_prop": 0.7543859649122807,
"repo_name": "hjalti/ruv-cli",
"id": "9270b1b66cd76e12545a94b874f9e11d24cc8e9e",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ruv/default_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "362"
},
{
"name": "Python",
"bytes": "26534"
}
],
"symlink_target": ""
}
|
hello~
|
{
"content_hash": "fb606e1aa1da4767b3d7fb7e72dd811f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 6,
"avg_line_length": 7,
"alnum_prop": 0.7142857142857143,
"repo_name": "choznerol/c4lab-git-tutorial",
"id": "4ee95d23e713af43d99280fda42f70712d5aa78f",
"size": "7",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercise-1_from-pull-to-push/rouanshen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "433"
}
],
"symlink_target": ""
}
|
"""
Project Honeypot Http:BL API Client
Example:
.. code:: python
import httpbl
ip_address = '127.10.20.5'
print 'Querying {}'.format(ip_address)
bl = httpbl.HttpBL('my-key')
print(bl.query(ip_address))
"""
import socket
__version__ = '1.0.1'
DNSBL_SUFFIX = 'dnsbl.httpbl.org.'
# Visitor Types
SEARCH_ENGINE = 0
SUSPICIOUS = 1
HARVESTER = 2
COMMENT_SPAMMER = 4
# List of Search Engines, used to return the name of the search engine
SEARCH_ENGINES = ['Undocumented',
'AltaVista',
'Ask',
'Baidu',
'Excite',
'Google',
'Looksmart',
'Lycos',
'MSN',
'Yahoo',
'Cuil',
'InfoSeek',
'Miscellaneous']
# Text mappings for visitor types
DESCRIPTIONS = {COMMENT_SPAMMER: 'Comment Spammer',
HARVESTER: 'Harvester',
SEARCH_ENGINE: 'Search Engine',
SUSPICIOUS: 'Suspicious'}
class HttpBL(object):
"""Query the the Project Honeypot Http:BL API"""
def __init__(self, key):
"""Initialize the HttpBL object with your Project Honeypot Key
:param key: Project Honeypot Http:BL Key
:type key: str
"""
self.key = key
def query(self, ip_address):
"""Query the Project Honeypot Http:BL API for the given IP address
:param ip_address: IP address to query
:type ip_address: str
:rtype: dict
"""
try:
return self._decode_response(
socket.gethostbyname(self._build_query(ip_address)))
except socket.gaierror: # Not listed
return {
'days_since_last_activity': None,
'name': None,
'threat_score': 0,
'type': None
}
def _build_query(self, ip_address):
"""Returns the Http:BL query string to use
:param ip_address: IP address to query
:type ip_address: str
:returns: str
"""
return '{}.{}.{}'.format(
self.key, self._reverse_ip(ip_address), DNSBL_SUFFIX)
def _reverse_ip(self, ip_address):
"""Take an IP address in 127.0.0.1 format and return it as 1.0.0.127
:param ip_address: IP address to query
:type ip_address: str
:returns: str
"""
return '.'.join(ip_address.split('.')[::-1])
def _decode_response(self, ip_address):
"""Decodes a HttpBL response IP and return data structure of response
data.
:param ip_address: IP address to query
:type ip_address: str
:rtype: dict
:raises: ValueError
"""
# Reverse the IP, reassign the octets to integers
vt, ts, days, rc = [int(o) for o in ip_address.split('.')[::-1]]
# 127 reflects a valid query response, all others are errors
if rc != 127:
raise ValueError('Invalid Response Code: {}'.format(rc))
# Build a list of visitor types since one IP can be multiple
visitor_types = []
if vt & COMMENT_SPAMMER:
visitor_types.append(COMMENT_SPAMMER)
if vt & HARVESTER:
visitor_types.append(HARVESTER)
if vt & SUSPICIOUS:
visitor_types.append(SUSPICIOUS)
name = None
if not vt:
try:
name = SEARCH_ENGINES[ts]
except IndexError:
name = SEARCH_ENGINES[0]
# Return the response dictionary
return {'days_since_last_activity': days if vt else None,
'name': name,
'threat_score': ts if vt else None,
'type': visitor_types if vt else [SEARCH_ENGINE]}
|
{
"content_hash": "44f7245f8530c028f3fe6b20918b05da",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 27.22695035460993,
"alnum_prop": 0.5324303203959364,
"repo_name": "gmr/httpbl",
"id": "409eaf2f689b293e5f80b6b70c8d183083088a93",
"size": "3839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "httpbl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6652"
}
],
"symlink_target": ""
}
|
import pybullet as p
p.connect(p.GUI)
cube = p.loadURDF("cube.urdf")
frequency = 240
timeStep = 1. / frequency
p.setGravity(0, 0, -9.8)
p.changeDynamics(cube, -1, linearDamping=0, angularDamping=0)
p.setPhysicsEngineParameter(fixedTimeStep=timeStep)
for i in range(frequency):
p.stepSimulation()
pos, orn = p.getBasePositionAndOrientation(cube)
print(pos)
|
{
"content_hash": "08c86f96588d6fe9b8af0ac8ebb1cb1a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.7653631284916201,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "a706dd00eea1395f7b16e4d18ea1192e96f5c7c1",
"size": "358",
"binary": false,
"copies": "2",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/examples/integrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns('apps.reportes.views',
#reportes_consolidados
url(r'^reportes_consolidados/$', 'reportes_consolidados'),
url(r'^reportes_detallados/$', 'reportes_detallados'),
url(r'^reportes_transacciones/$', 'reportes_transacciones'),
url(r'^reportes/$', 'busqueda'),
url(r'^reportesT/$', 'transacciones'),
)
|
{
"content_hash": "743d69a391a823e29b5c73d670a99268",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.6919191919191919,
"repo_name": "AnthonyWainer/sisJuridico",
"id": "848f4c0647cf8585063cc1c0e03295f078592729",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sisjuridico/apps/reportes/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "143302"
},
{
"name": "HTML",
"bytes": "157349"
},
{
"name": "JavaScript",
"bytes": "350233"
},
{
"name": "Python",
"bytes": "79724"
},
{
"name": "Shell",
"bytes": "1512"
}
],
"symlink_target": ""
}
|
from odoo import models
class AccountInvoiceLine(models.Model):
_inherit = ['account.invoice.line']
def get_digital_purchases(self):
partner = self.env.user.partner_id
# Get paid invoices
purchases = self.sudo().search_read(
domain=[('invoice_id.state', '=', 'paid'), ('invoice_id.partner_id', '=', partner.id), ('product_id.product_tmpl_id.type', '=', 'digital')],
fields=['product_id'],
)
# I only want product_ids, but search_read insists in giving me a list of
# (product_id: <id>, name: <product code> <template_name> <attributes>)
return map(lambda x: x['product_id'][0], purchases)
|
{
"content_hash": "46499a29d70096e4431f59e91abc41f0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 152,
"avg_line_length": 35.94736842105263,
"alnum_prop": 0.6046852122986823,
"repo_name": "vileopratama/vitech",
"id": "b6b28026ddeb99945d5d422b5ddee20c7e9d91f9",
"size": "783",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/addons/website_sale_digital/models/account_invoice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="streamtube", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "f6a11d61e9bdf14f3d381bd8525fcedc",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 82,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.6221662468513854,
"repo_name": "plotly/plotly.py",
"id": "c886449d3bd9370ec8bcbf0a542c495fc65a1c66",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/streamtube/_metasrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "workshop.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "e7a2ff46733b26287e7664e6d700ff5d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "solidit/votacao",
"id": "37563af854cda34da6e27a61ec259e3261e5ccab",
"size": "251",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9646"
}
],
"symlink_target": ""
}
|
from ssdb.client import StrictSSDB, SSDB
from ssdb.connection import (BlockingConnectionPool, ConnectionPool, Connection)
from ssdb.utils import SortedDict
from ssdb.exceptions import (AuthenticationError, ConnectionError,
BusyLoadingError, DataError, InvalidResponse,
PubSubError, SSDBError, ResponseError, WatchError)
__version__ = '0.0.3'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = ['SSDB', 'StrictSSDB', 'ConnectionPool', 'BlockingConnectionPool',
'Connection', 'SSDBError', 'ConnectionError', 'ResponseError',
'AuthenticationError', 'InvalidResponse', 'DataError', 'PubSubError',
'WatchError', 'BusyLoadingError']
|
{
"content_hash": "a9d6e5cc16cd5cad920ad276ba4e491c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 45.6875,
"alnum_prop": 0.6757865937072504,
"repo_name": "wrongwaycn/ssdb-py",
"id": "8995c9f1b1ceaab59e7bfbe0f3f0e80785bc777e",
"size": "745",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ssdb/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Python",
"bytes": "162629"
},
{
"name": "Shell",
"bytes": "6703"
}
],
"symlink_target": ""
}
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
drilldown_results = []
incident = demisto.incident()
if not incident:
raise ValueError("Error - demisto.incident() expected to return current incident "
"from context but returned None")
labels = incident.get('labels', [])
for label in labels:
if label.get('type') == 'successful_drilldown_enrichment':
is_successful = label.get('value')
if is_successful == 'false':
return CommandResults(readable_output='Drilldown enrichment failed.')
if label.get('type') == 'Drilldown':
try:
drilldown_results = json.loads(label.get('value', []))
except Exception as e:
raise ValueError(f'Drilldown is not in a valid JSON structure:\n{e}')
if not drilldown_results:
return CommandResults(readable_output='Drilldown was not configured for notable.')
if isinstance(drilldown_results, list):
events_arr = []
for event in drilldown_results:
events_arr.append(event)
markdown = tableToMarkdown("", events_arr, headers=events_arr[0].keys())
else:
markdown = tableToMarkdown("", drilldown_results)
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': markdown}
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
return_results(main())
except Exception as e:
return_error(f'Got an error while parsing Splunk events: {e}', error=e)
|
{
"content_hash": "021d1fee5fc393bf59937fd3641e4c66",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 100,
"avg_line_length": 38.61904761904762,
"alnum_prop": 0.6171393341553637,
"repo_name": "VirusTotal/content",
"id": "703ff460cce1d45590a2dbb4a18f05808c4960de",
"size": "1622",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/SplunkPy/Scripts/SplunkShowDrilldown/SplunkShowDrilldown.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, current_app
from flask_login import current_user, login_required
main = Blueprint('main', __name__)
public = Blueprint('public', __name__) # Admin login not required
from .views import (
agreements, communications, outcomes, search, service_updates,
services, suppliers, stats, users, buyers, admin_manager
)
from app.main import errors
@main.before_request
@login_required
def require_login():
if current_user.is_authenticated and not current_user.role.startswith('admin'):
return current_app.login_manager.unauthorized()
@main.after_request
def add_cache_control(response):
response.cache_control.no_cache = True
return response
|
{
"content_hash": "fe3e05d79d5031743b2eead9b1e39518",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 83,
"avg_line_length": 27.84,
"alnum_prop": 0.7413793103448276,
"repo_name": "alphagov/digitalmarketplace-admin-frontend",
"id": "1f814b870f8321d56940a7c9ba0c749b34807131",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/main/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "45"
},
{
"name": "HTML",
"bytes": "106840"
},
{
"name": "JavaScript",
"bytes": "13307"
},
{
"name": "Makefile",
"bytes": "608"
},
{
"name": "Nix",
"bytes": "3768"
},
{
"name": "Python",
"bytes": "544967"
},
{
"name": "SCSS",
"bytes": "10644"
},
{
"name": "Shell",
"bytes": "378"
}
],
"symlink_target": ""
}
|
"""Compile reStructuredText to HTML, using Nikola architecture."""
from __future__ import unicode_literals, print_function
import io
import lxml.html
from pkg_resources import resource_filename
from mako.template import Template
from nikola.plugin_categories import Command
class CommandRst2Html(Command):
"""Compile reStructuredText to HTML, using Nikola architecture."""
name = "rst2html"
doc_usage = "infile"
doc_purpose = "compile reStructuredText to HTML files"
needs_config = False
def _execute(self, options, args):
"""Compile reStructuredText to standalone HTML files."""
compiler = self.site.plugin_manager.getPluginByName('rest', 'PageCompiler').plugin_object
if len(args) != 1:
print("This command takes only one argument (input file name).")
return 2
source = args[0]
with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
output, error_level, deps, shortcode_deps = compiler.compile_string(data, source, True)
rstcss_path = resource_filename('nikola', 'data/themes/base/assets/css/rst.css')
with io.open(rstcss_path, "r", encoding="utf8") as fh:
rstcss = fh.read()
template_path = resource_filename('nikola', 'plugins/command/rst2html/rst2html.tmpl')
template = Template(filename=template_path)
template_output = template.render(rstcss=rstcss, output=output)
parser = lxml.html.HTMLParser(remove_blank_text=True)
doc = lxml.html.document_fromstring(template_output, parser)
html = b'<!DOCTYPE html>\n' + lxml.html.tostring(doc, encoding='utf8', method='html', pretty_print=True)
print(html.decode('utf-8'))
if error_level < 3:
return 0
else:
return 1
|
{
"content_hash": "3b3137db3f6be890357036c84fc10c77",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 112,
"avg_line_length": 40.666666666666664,
"alnum_prop": 0.6601092896174864,
"repo_name": "andredias/nikola",
"id": "6e6aa3bd1d87bf2e923da15cb0d9526a0b762d73",
"size": "2971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nikola/plugins/command/rst2html/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18794"
},
{
"name": "JavaScript",
"bytes": "24667"
},
{
"name": "Python",
"bytes": "1169986"
},
{
"name": "Shell",
"bytes": "11393"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
|
{
"content_hash": "b8687bc4a4e757601675e70857cb9737",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 90,
"avg_line_length": 26.365384615384617,
"alnum_prop": 0.6542669584245077,
"repo_name": "fullcoins/fullcoin",
"id": "b4e0dabd92c16dde007bab98c1ecf3bfbe75f9a3",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/linearize/linearize-hashes.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "320255"
},
{
"name": "C++",
"bytes": "3590528"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "142321"
},
{
"name": "Makefile",
"bytes": "83376"
},
{
"name": "Objective-C",
"bytes": "3283"
},
{
"name": "Objective-C++",
"bytes": "7238"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "221553"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "18043"
},
{
"name": "Shell",
"bytes": "44913"
}
],
"symlink_target": ""
}
|
from requestbuilder import Arg
from requestbuilder.mixins import TabifyingMixin
from euca2ools.commands.ec2 import EC2Request
class DescribeInstanceTypes(EC2Request, TabifyingMixin):
DESCRIPTION = '[Eucalyptus only] Show information about instance types'
ARGS = [Arg('InstanceType', metavar='INSTANCETYPE', nargs='*',
help='limit results to specific instance types'),
Arg('--by-zone', dest='by_zone', action='store_true',
route_to=None,
help='show info for each availability zone separately'),
Arg('--show-capacity', dest='Availability', action='store_true',
help='show info about instance capacity')]
LIST_TAGS = ['instanceTypeDetails', 'availability']
def configure(self):
EC2Request.configure(self)
if self.args.get('by_zone', False):
self.params['Availability'] = True
def print_result(self, result):
vmtype_names = [] # Use a list since py2.6 lacks OrderedDict
vmtypes = {} # vmtype -> info and total capacity
zones = {} # zone -> vmtype -> info and zone capacity
for vmtype in result.get('instanceTypeDetails', []):
vmtype_names.append(vmtype['name'])
vmtypes[vmtype['name']] = {'cpu': vmtype.get('cpu'),
'memory': vmtype.get('memory'),
'disk': vmtype.get('disk'),
'available': 0,
'max': 0}
if self.params.get('Availability', False):
for zone in vmtype.get('availability', []):
available = int(zone.get('available', 0))
max_ = int(zone.get('max', 0))
vmtypes[vmtype['name']]['available'] += available
vmtypes[vmtype['name']]['max'] += max_
zones.setdefault(zone['zoneName'], {})
zones[zone['zoneName']][vmtype['name']] = {
'cpu': vmtype.get('cpu'),
'memory': vmtype.get('memory'),
'disk': vmtype.get('disk'),
'available': available,
'max': max_}
if self.args.get('by_zone'):
for zone, zone_vmtypes in sorted(zones.iteritems()):
print self.tabify(('AVAILABILITYZONE', zone))
self._print_vmtypes(zone_vmtypes, vmtype_names)
print
else:
self._print_vmtypes(vmtypes, vmtype_names)
def _print_vmtypes(self, vmtypes, vmtype_names):
# Fields and column headers
fields = {'name': 'Name',
'cpu': 'CPUs',
'memory': 'Memory (MiB)',
'disk': 'Disk (GiB)',
'used': 'Used',
'total': 'Total',
'used_pct': 'Used %'}
field_lengths = dict((field, len(header)) for field, header
in fields.iteritems())
vmtype_infos = []
for vmtype_name in vmtype_names:
total = int(vmtypes[vmtype_name].get('max', 0))
used = total - int(vmtypes[vmtype_name].get('available', 0))
if total != 0:
used_pct = '{0:.0%}'.format(float(used) / float(total))
else:
used_pct = ''
vmtype_info = {'name': vmtype_name,
'cpu': vmtypes[vmtype_name].get('cpu'),
'memory': vmtypes[vmtype_name].get('memory'),
'disk': vmtypes[vmtype_name].get('disk'),
'used': used,
'total': total,
'used_pct': used_pct}
vmtype_infos.append(vmtype_info)
for field in fields:
if len(str(vmtype_info[field])) > field_lengths[field]:
field_lengths[field] = len(str(vmtype_info[field]))
type_template = ('{{name:<{name}}} {{cpu:>{cpu}}} '
'{{memory:>{memory}}} {{disk:>{disk}}}')
if self.args.get('Availability', False):
type_template += (' {{used:>{used}}} / {{total:>{total}}} '
'{{used_pct:>{used_pct}}}')
type_template = type_template.format(**field_lengths)
print 'INSTANCETYPE\t', type_template.format(**fields)
for vmtype_info in vmtype_infos:
print 'INSTANCETYPE\t', type_template.format(**vmtype_info)
|
{
"content_hash": "c6fad5f14d1174de4e3e633e45a3b89f",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 76,
"avg_line_length": 48.1578947368421,
"alnum_prop": 0.4926775956284153,
"repo_name": "nagyistoce/euca2ools",
"id": "69c11a2dd43df4b07593a56410194e854d77944c",
"size": "5922",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "euca2ools/commands/ec2/describeinstancetypes.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1230322"
},
{
"name": "Shell",
"bytes": "872"
}
],
"symlink_target": ""
}
|
from ..utils import Scraper
from .exams_helpers import *
from bs4 import BeautifulSoup
from collections import OrderedDict
from datetime import datetime
from pytz import timezone
import re
class UTMExams:
"""A scraper for UTM exams."""
host = 'https://m.utm.utoronto.ca/'
@staticmethod
def scrape(location='.'):
Scraper.logger.info('UTMExams initialized.')
depts = UTMExams.get_page_links('list_dept.php?type=2')
Scraper.logger.info('Got departments (1/3).')
courses = []
for dept in depts:
courses.extend(UTMExams.get_page_links(dept))
Scraper.logger.info('Got courses (2/3).')
exams = UTMExams.retrieve_exams(courses)
Scraper.logger.info('Got exams (3/3).')
for id_, doc in exams.items():
Scraper.save_json(doc, location, id_)
Scraper.logger.info('UTMExams completed.')
@staticmethod
def retrieve_exams(courses):
exams = OrderedDict()
for course in courses:
headers = {
'Referer': UTMExams.host
}
html = Scraper.get('%s%s' % (UTMExams.host, course),
headers=headers)
soup = BeautifulSoup(html, 'html.parser')
course_code = soup.find('div', class_='title').text.strip()
lecture_code = None
# some course names include lecture code (see CHI200Y5Y)
if ' ' in course_code:
course_code, lecture_code = course_code.split(' ')
data = [br.previous_sibling.string.strip()
for br in soup.find('div', class_='info').find_all('br')]
date = data[0].split(': ')[1]
exam_id, course_id = get_course_id(course_code, date)
period = get_period(date)
if not exam_id or not period:
continue
start = convert_time(data[1].split(': ')[1])
end = convert_time(data[2].split(': ')[1])
duration = end - start
sections = [UTMExams.parse_sections(room.split(': ')[1])
for room in [x for x in data[3:] if 'Room:' in x]]
# append lecture code to section range if it exists
for i in range(len(sections)):
sections[i]['lecture'] = lecture_code or ''
doc = OrderedDict([
('id', exam_id),
('course_id', course_id),
('course_code', course_code),
('campus', 'UTM'),
('period', period),
('date', date),
('start_time', start),
('end_time', end),
('duration', duration),
('sections', [])
])
if exam_id not in exams:
exams[exam_id] = doc
for section in sections:
exams[exam_id]['sections'].append(OrderedDict([
('lecture_code', section['lecture']),
('exam_section', section['section']),
('location', section['room'])
]))
return exams
@staticmethod
def get_page_links(endpoint):
headers = {
'Referer': UTMExams.host
}
html = Scraper.get('%s%s' % (UTMExams.host, endpoint),
headers=headers)
soup = BeautifulSoup(html, 'html.parser')
return [li.find('a')['href']
for li in soup.find('ul', class_='link').find_all('li')]
@staticmethod
def parse_sections(room):
section = ''
if '(' in room:
room, section = [x.strip()
for x in re.sub('[()]', ' ', room).split(' ')]
return {'section': section, 'room': room}
|
{
"content_hash": "ecba1cca45d13e8296e909b3a683636c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 77,
"avg_line_length": 32.20338983050848,
"alnum_prop": 0.5036842105263157,
"repo_name": "cobalt-uoft/uoft-scrapers",
"id": "e42e5450112d45c170a687e43dcb57008ce5d0b7",
"size": "3800",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "uoftscrapers/scrapers/exams/utm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106873"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask import render_template
from config import DB
from db_layer import MyDataBaseLayer
#####################################################################
DB_Conn = 0
app = Flask(__name__)
@app.route("/")
def app_root():
global DB_Conn
return render_template("root.html",name=123)
def run_webpanel(dbobj):
global DB_Conn
DB_Conn = dbobj
app.run()
|
{
"content_hash": "18d070b149a9f793800e3e65a56ad4cc",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 69,
"avg_line_length": 20.35,
"alnum_prop": 0.5675675675675675,
"repo_name": "thomasvincent/utilities",
"id": "0c27888289f5607f38cc1af1960a8581f95cbdd8",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RSS-IRC-Daemon/webpanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9845"
},
{
"name": "CSS",
"bytes": "19867"
},
{
"name": "Java",
"bytes": "7272"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "2411624"
},
{
"name": "Shell",
"bytes": "7495"
}
],
"symlink_target": ""
}
|
class Inventory:
def __init__(self):
self.slots = []
def add_item(self, item):
self.slots.append(item)
class SortedInventory(Inventory):
def add_item(self, item):
super().add_item(item)
self.slots.sort()
|
{
"content_hash": "d23c00a6d5ef3617d56e21b72d19400c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 33,
"avg_line_length": 19.071428571428573,
"alnum_prop": 0.550561797752809,
"repo_name": "CaseyNord/Treehouse",
"id": "7cc0f9de6ef499e7ae53b19025583f8cd24a7738",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Object Oriented Python/inventory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "33012"
},
{
"name": "CSS",
"bytes": "46954"
},
{
"name": "HTML",
"bytes": "78185"
},
{
"name": "JavaScript",
"bytes": "3217811"
},
{
"name": "Python",
"bytes": "133602"
}
],
"symlink_target": ""
}
|
"""
Implement slices and various slice computations.
"""
import itertools
from llvmlite import ir
from numba.six.moves import zip_longest
from numba import cgutils, types, typing
from .imputils import (lower_builtin, lower_getattr,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
def fix_index(builder, idx, size):
"""
Fix negative index by adding *size* to it. Positive
indices are left untouched.
"""
is_negative = builder.icmp_signed('<', idx, ir.Constant(size.type, 0))
wrapped_index = builder.add(idx, size)
return builder.select(is_negative, wrapped_index, idx)
def fix_slice(builder, slice, size):
"""
Fix *slice* start and stop to be valid (inclusive and exclusive, resp)
indexing bounds for a sequence of the given *size*.
"""
# See PySlice_GetIndicesEx()
zero = ir.Constant(size.type, 0)
minus_one = ir.Constant(size.type, -1)
def fix_bound(bound_name, lower_repl, upper_repl):
bound = getattr(slice, bound_name)
bound = fix_index(builder, bound, size)
# Store value
setattr(slice, bound_name, bound)
# Still negative? => clamp to lower_repl
underflow = builder.icmp_signed('<', bound, zero)
with builder.if_then(underflow, likely=False):
setattr(slice, bound_name, lower_repl)
# Greater than size? => clamp to upper_repl
overflow = builder.icmp_signed('>=', bound, size)
with builder.if_then(overflow, likely=False):
setattr(slice, bound_name, upper_repl)
with builder.if_else(cgutils.is_neg_int(builder, slice.step)) as (if_neg_step, if_pos_step):
with if_pos_step:
# < 0 => 0; >= size => size
fix_bound('start', zero, size)
fix_bound('stop', zero, size)
with if_neg_step:
# < 0 => -1; >= size => size - 1
lower = minus_one
upper = builder.add(size, minus_one)
fix_bound('start', lower, upper)
fix_bound('stop', lower, upper)
def get_slice_length(builder, slicestruct):
"""
Given a slice, compute the number of indices it spans, i.e. the
number of iterations that for_range_slice() will execute.
Pseudo-code:
assert step != 0
if step > 0:
if stop <= start:
return 0
else:
return (stop - start - 1) // step + 1
else:
if stop >= start:
return 0
else:
return (stop - start + 1) // step + 1
(see PySlice_GetIndicesEx() in CPython)
"""
start = slicestruct.start
stop = slicestruct.stop
step = slicestruct.step
one = ir.Constant(start.type, 1)
zero = ir.Constant(start.type, 0)
is_step_negative = cgutils.is_neg_int(builder, step)
delta = builder.sub(stop, start)
# Nominal case
pos_dividend = builder.sub(delta, one)
neg_dividend = builder.add(delta, one)
dividend = builder.select(is_step_negative, neg_dividend, pos_dividend)
nominal_length = builder.add(one, builder.sdiv(dividend, step))
# Catch zero length
is_zero_length = builder.select(is_step_negative,
builder.icmp_signed('>=', delta, zero),
builder.icmp_signed('<=', delta, zero))
# Clamp to 0 if is_zero_length
return builder.select(is_zero_length, zero, nominal_length)
def get_slice_bounds(builder, slicestruct):
"""
Return the [lower, upper) indexing bounds of a slice.
"""
start = slicestruct.start
stop = slicestruct.stop
zero = start.type(0)
one = start.type(1)
# This is a bit pessimal, e.g. it will return [1, 5) instead
# of [1, 4) for `1:5:2`
is_step_negative = builder.icmp_signed('<', slicestruct.step, zero)
lower = builder.select(is_step_negative,
builder.add(stop, one), start)
upper = builder.select(is_step_negative,
builder.add(start, one), stop)
return lower, upper
def fix_stride(builder, slice, stride):
"""
Fix the given stride for the slice's step.
"""
return builder.mul(slice.step, stride)
def guard_invalid_slice(context, builder, typ, slicestruct):
"""
Guard against *slicestruct* having a zero step (and raise ValueError).
"""
if typ.has_step:
cgutils.guard_null(context, builder, slicestruct.step,
(ValueError, "slice step cannot be zero"))
def get_defaults(context):
"""
Get the default values for a slice's members:
(start for positive step, start for negative step,
stop for positive step, stop for negative step, step)
"""
maxint = (1 << (context.address_size - 1)) - 1
return (0, maxint, maxint, - maxint - 1, 1)
#---------------------------------------------------------------------------
# The slice structure
@lower_builtin(slice, types.VarArg(types.Any))
def slice_constructor_impl(context, builder, sig, args):
default_start_pos, default_start_neg, default_stop_pos, default_stop_neg, default_step = \
[context.get_constant(types.intp, x) for x in get_defaults(context)]
# Fetch non-None arguments
slice_args = [None] * 3
for i, (ty, val) in enumerate(zip(sig.args, args)):
if ty is types.none:
slice_args[i] = None
else:
slice_args[i] = val
# Fill omitted arguments
def get_arg_value(i, default):
val = slice_args[i]
if val is None:
return default
else:
return val
step = get_arg_value(2, default_step)
is_step_negative = builder.icmp_signed('<', step,
context.get_constant(types.intp, 0))
default_stop = builder.select(is_step_negative,
default_stop_neg, default_stop_pos)
default_start = builder.select(is_step_negative,
default_start_neg, default_start_pos)
stop = get_arg_value(1, default_stop)
start = get_arg_value(0, default_start)
ty = sig.return_type
sli = context.make_helper(builder, sig.return_type)
sli.start = start
sli.stop = stop
sli.step = step
res = sli._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_getattr(types.SliceType, "start")
def slice_start_impl(context, builder, typ, value):
sli = context.make_helper(builder, typ, value)
return sli.start
@lower_getattr(types.SliceType, "stop")
def slice_stop_impl(context, builder, typ, value):
sli = context.make_helper(builder, typ, value)
return sli.stop
@lower_getattr(types.SliceType, "step")
def slice_step_impl(context, builder, typ, value):
if typ.has_step:
sli = context.make_helper(builder, typ, value)
return sli.step
else:
return context.get_constant(types.intp, 1)
|
{
"content_hash": "e2c0efb901deb0fc7744bb050d7fa40a",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 96,
"avg_line_length": 33.5311004784689,
"alnum_prop": 0.5971746575342466,
"repo_name": "jriehl/numba",
"id": "cc81883a1dbad992eb84503147a88f05fcab4fba",
"size": "7008",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numba/targets/slicing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7023"
},
{
"name": "C",
"bytes": "657637"
},
{
"name": "C++",
"bytes": "49158"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Jupyter Notebook",
"bytes": "110326"
},
{
"name": "Python",
"bytes": "6611899"
},
{
"name": "Shell",
"bytes": "7290"
}
],
"symlink_target": ""
}
|
""" Functions for handling the execution of a pipeline graph
"""
import logging
from dask import delayed
logger = logging.getLogger(__name__)
def delay_pipeline(pipeline, pipe):
""" Return a ``dask.delayed`` pipeline ready to execute
Args:
pipeline (list[Task]): A list of curried ``Task`` ready to be
run using data from ``pipe``. This list may be constructed as the
output of :ref:`setup_pipeline`, for example.
pipe (dict): Dictionary storing ``data`` and ``record`` information.
Returns:
dask.delayed: A delayed pipeline ready to be executed
"""
_pipeline = delayed(pipeline[0].curry())(pipe)
for task in pipeline[1:]:
_pipeline = delayed(task.curry())(_pipeline)
return _pipeline
|
{
"content_hash": "faf67ebd5a0d665708fba8240e26fbd0",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 77,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.6481958762886598,
"repo_name": "c11/yatsm",
"id": "027515a48789ee0e16575886a79119e41f4871d1",
"size": "776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yatsm/pipeline/_exec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "72629"
},
{
"name": "Python",
"bytes": "371164"
},
{
"name": "Shell",
"bytes": "2391"
}
],
"symlink_target": ""
}
|
# (c) 2009-2014 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware that handles GET requests on collections to display directories.
See `Developers info`_ for more information about the WsgiDAV architecture.
.. _`Developers info`: http://wsgidav.readthedocs.org/en/latest/develop.html
"""
from wsgidav.dav_error import DAVError, HTTP_OK, HTTP_MEDIATYPE_NOT_SUPPORTED
from wsgidav.version import __version__
from middleware import BaseMiddleware
import os
import sys
import urllib
import util
__docformat__ = "reStructuredText"
msOfficeTypeToExtMap = {
"excel": ("xls", "xlt", "xlm", "xlsm", "xlsx", "xltm", "xltx"),
"powerpoint": ("pps", "ppt", "pptm", "pptx", "potm", "potx", "ppsm", "ppsx"),
"word": ("doc", "dot", "docm", "docx", "dotm", "dotx"),
"visio": ("vsd", "vsdm", "vsdx", "vstm", "vstx"),
}
msOfficeExtToTypeMap = {}
for t, el in msOfficeTypeToExtMap.iteritems():
for e in el:
msOfficeExtToTypeMap[e] = t
PAGE_CSS = """\
img { border: 0; padding: 0 2px; vertical-align: text-bottom; }
th, td { padding: 2px 20px 2px 2px; }
th { text-align: left; }
th.right { text-align: right; }
td { font-family: monospace; vertical-align: bottom; white-space: pre; }
td.right { text-align: right; }
table { border: 0; }
a.symlink { font-style: italic; }
p.trailer { font-size: smaller; }
"""
PAGE_SCRIPT = """\
function onLoad() {
// console.log("loaded.");
}
/* Event delegation handler for clicks on a-tags with class 'msoffice'. */
function onClickTable(event) {
var target = event.target || event.srcElement,
href = target.href;
if( href && target.className === "msoffice" ){
if( openWithSharePointPlugin(href) ){
// prevent default processing
return false;
}
}
}
function openWithSharePointPlugin(url) {
var res = false,
control = null,
isFF = false;
// Get the most recent version of the SharePoint plugin
if( window.ActiveXObject ){
try {
control = new ActiveXObject("SharePoint.OpenDocuments.3"); // Office 2007
} catch(e) {
try {
control = new ActiveXObject("SharePoint.OpenDocuments.2"); // Office 2003
} catch(e2) {
try {
control = new ActiveXObject("SharePoint.OpenDocuments.1"); // Office 2000/XP
} catch(e3) {
window.console && console.warn("Could not create ActiveXObject('SharePoint.OpenDocuments'). Check your browsers security settings.");
return false;
}
}
}
if( !control ){
window.console && console.warn("Cannot instantiate the required ActiveX control to open the document. This is most likely because you do not have Office installed or you have an older version of Office.");
}
} else {
window.console && console.log("Non-IE: using FFWinPlugin Plug-in...");
control = document.getElementById("winFirefoxPlugin");
isFF = true;
}
try {
// window.console && console.log("SharePoint.OpenDocuments.EditDocument('" + url + "')...");
res = control.EditDocument(url);
// window.console && console.log("SharePoint.OpenDocuments.EditDocument('" + url + "')... res = ", res);
if( !res ){
window.console && console.warn("SharePoint.OpenDocuments.EditDocument('" + url + "') returned false.");
}
} catch (e){
window.console && console.warn("SharePoint.OpenDocuments.EditDocument('" + url + "') failed.", e);
}
return res;
}
"""
def escapeName(name):
"""
Returns the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
"""
return name.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
class WsgiDavDirBrowser(BaseMiddleware):
"""WSGI middleware that handles GET requests on collections to display directories."""
def __init__(self, application, config):
self._application = application
self._verbose = 2
def __call__(self, environ, start_response):
path = environ["PATH_INFO"]
davres = None
if environ["wsgidav.provider"]:
davres = environ["wsgidav.provider"].getResourceInst(path, environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD") and davres and davres.isCollection:
# if "mozilla" not in environ.get("HTTP_USER_AGENT").lower():
# # issue 14: Nautilus sends GET on collections
# # http://code.google.com/p/wsgidav/issues/detail?id=14
# util.status("Directory browsing disabled for agent '%s'" % environ.get("HTTP_USER_AGENT"))
# self._fail(HTTP_NOT_IMPLEMENTED)
# return self._application(environ, start_response)
if util.getContentLength(environ) != 0:
self._fail(HTTP_MEDIATYPE_NOT_SUPPORTED,
"The server does not handle any body content.")
if environ["REQUEST_METHOD"] == "HEAD":
return util.sendStatusResponse(environ, start_response, HTTP_OK)
# Support DAV mount (http://www.ietf.org/rfc/rfc4709.txt)
dirConfig = environ["wsgidav.config"].get("dir_browser", {})
if dirConfig.get("davmount") and "davmount" in environ.get("QUERY_STRING"):
# collectionUrl = davres.getHref()
collectionUrl = util.makeCompleteUrl(environ)
collectionUrl = collectionUrl.split("?")[0]
res = """
<dm:mount xmlns:dm="http://purl.org/NET/webdav/mount">
<dm:url>%s</dm:url>
</dm:mount>""" % (collectionUrl)
# TODO: support <dm:open>%s</dm:open>
start_response("200 OK", [("Content-Type", "application/davmount+xml"),
("Content-Length", str(len(res))),
("Cache-Control", "private"),
("Date", util.getRfc1123Time()),
])
return [ res ]
# Profile calls
# if True:
# from cProfile import Profile
# profile = Profile()
# profile.runcall(self._listDirectory, environ, start_response)
# # sort: 0:"calls",1:"time", 2: "cumulative"
# profile.print_stats(sort=2)
return self._listDirectory(davres, environ, start_response)
return self._application(environ, start_response)
@staticmethod
def isSuitable(config):
return config.get("dir_browser") and config["dir_browser"].get("enable", True)
def _fail(self, value, contextinfo=None, srcexception=None, errcondition=None):
"""Wrapper to raise (and log) DAVError."""
e = DAVError(value, contextinfo, srcexception, errcondition)
if self._verbose >= 2:
print >>sys.stdout, "Raising DAVError %s" % e.getUserInfo()
raise e
def _listDirectory(self, davres, environ, start_response):
"""
@see: http://www.webdav.org/specs/rfc4918.html#rfc.section.9.4
"""
assert davres.isCollection
dirConfig = environ["wsgidav.config"].get("dir_browser", {})
displaypath = urllib.unquote(davres.getHref())
isReadOnly = environ["wsgidav.provider"].isReadOnly()
trailer = dirConfig.get("response_trailer")
if trailer:
trailer = trailer.replace("${version}",
"<a href='https://github.com/mar10/wsgidav/'>WsgiDAV/%s</a>" % __version__)
trailer = trailer.replace("${time}", util.getRfc1123Time())
else:
trailer = ("Seafile WebDAV Server, based on <a href='https://github.com/mar10/wsgidav/'>WsgiDAV/%s</a> - %s"
% (__version__, util.getRfc1123Time()))
html = []
html.append("<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01//EN' 'http://www.w3.org/TR/html4/strict.dtd'>");
html.append("<html>")
html.append("<head>")
html.append("<meta http-equiv='Content-Type' content='text/html; charset=UTF-8'>")
html.append("<meta name='generator' content='WsgiDAV %s'>" % __version__)
html.append("<title>WsgiDAV - Index of %s </title>" % displaypath)
html.append("<script type='text/javascript'>%s</script>" % PAGE_SCRIPT)
html.append("<style type='text/css'>%s</style>" % PAGE_CSS)
# Special CSS to enable MS Internet Explorer behaviour
if dirConfig.get("ms_mount"):
html.append("<style type='text/css'> A {behavior: url(#default#AnchorClick);} </style>")
if dirConfig.get("ms_sharepoint_plugin"):
html.append("<object id='winFirefoxPlugin' type='application/x-sharepoint' width='0' height='0' style=''visibility: hidden;'></object>")
html.append("</head>")
html.append("<body onload='onLoad()'>")
# Title
html.append("<h1>Index of %s</h1>" % displaypath)
# Add DAV-Mount link and Web-Folder link
links = []
if dirConfig.get("davmount"):
links.append("<a title='Open this folder in a WebDAV client.' href='%s?davmount'>Mount</a>" % util.makeCompleteUrl(environ))
if dirConfig.get("ms_mount"):
links.append("<a title='Open as Web Folder (requires Microsoft Internet Explorer)' href='' FOLDER='%s'>Open as Web Folder</a>" % util.makeCompleteUrl(environ))
# html.append("<a href='' FOLDER='%ssetup.py'>Open setup.py as WebDAV</a>" % util.makeCompleteUrl(environ))
if links:
html.append("<p>%s</p>" % " – ".join(links))
html.append("<hr>")
# Listing
html.append("<table onclick='return onClickTable(event)'>")
html.append("<thead>")
html.append("<tr><th>Name</th> <th>Type</th> <th class='right'>Size</th> <th class='right'>Last modified</th> </tr>")
html.append("</thead>")
html.append("<tbody>")
if davres.path in ("", "/"):
html.append("<tr><td>Top level share</td> <td></td> <td></td> <td></td> </tr>")
else:
parentUrl = util.getUriParent(davres.getHref())
html.append("<tr><td><a href='" + parentUrl + "'>Parent Directory</a></td> <td></td> <td></td> <td></td> </tr>")
# Ask collection for member info list
dirInfoList = davres.getDirectoryInfo()
if dirInfoList is None:
# No pre-build info: traverse members
dirInfoList = []
childList = davres.getDescendants(depth="1", addSelf=False)
for res in childList:
di = res.getDisplayInfo()
href = res.getHref()
infoDict = {"href": href,
"class": "",
"displayName": escapeName(res.getDisplayName()),
"lastModified": res.getLastModified(),
"isCollection": res.isCollection,
"contentLength": res.getContentLength(),
"displayType": di.get("type"),
"displayTypeComment": di.get("typeComment"),
}
if not isReadOnly and not res.isCollection:
ext = os.path.splitext(href)[1].lstrip(".").lower()
officeType = msOfficeExtToTypeMap.get(ext)
if officeType:
# print "OT", officeType
# print "OT", dirConfig
if dirConfig.get("ms_sharepoint_plugin"):
infoDict["class"] = "msoffice"
elif dirConfig.get("ms_sharepoint_urls"):
infoDict["href"] = "ms-%s:ofe|u|%s" % (officeType, href)
dirInfoList.append(infoDict)
#
for infoDict in dirInfoList:
lastModified = infoDict.get("lastModified")
if lastModified is None:
infoDict["strModified"] = ""
else:
infoDict["strModified"] = util.getRfc1123Time(lastModified)
infoDict["strSize"] = "-"
if not infoDict.get("isCollection"):
contentLength = infoDict.get("contentLength")
if contentLength is not None:
infoDict["strSize"] = util.byteNumberString(contentLength)
html.append("""\
<tr><td><a href="%(href)s" class="%(class)s">%(displayName)s</a></td>
<td>%(displayType)s</td>
<td class='right'>%(strSize)s</td>
<td class='right'>%(strModified)s</td></tr>""" % infoDict)
html.append("</tbody>")
html.append("</table>")
html.append("<hr>")
if "http_authenticator.username" in environ:
if environ.get("http_authenticator.username"):
html.append("<p>Authenticated user: '%s', realm: '%s'.</p>"
% (environ.get("http_authenticator.username"),
environ.get("http_authenticator.realm")))
# else:
# html.append("<p>Anonymous</p>")
if trailer:
html.append("<p class='trailer'>%s</p>" % trailer)
html.append("</body></html>")
body = "\n".join(html)
start_response("200 OK", [("Content-Type", "text/html"),
("Content-Length", str(len(body))),
("Date", util.getRfc1123Time()),
])
return [ body ]
|
{
"content_hash": "a7ce351fe4ebf806fec1c9dcfd79e1af",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 217,
"avg_line_length": 43.42042042042042,
"alnum_prop": 0.5363441455149042,
"repo_name": "saukrIppl/seahub",
"id": "8b20fdf75faabb2dbfaccffde0da6cd51364dcb3",
"size": "14459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdpart/wsgidav/dir_browser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from .views import *
urlpatterns = [
# Add bin data
url(r'^add_bin_data/', add_bin_data, name ="add_bin_data"),
]
|
{
"content_hash": "993ccbe543ff35edc1d0e57196eccb87",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 63,
"avg_line_length": 16.3,
"alnum_prop": 0.6196319018404908,
"repo_name": "bath-hacker/binny",
"id": "41c9c420f1efdcede623bf44627e217a8fe27499",
"size": "163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binny/collector/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "774"
},
{
"name": "HTML",
"bytes": "7174"
},
{
"name": "JavaScript",
"bytes": "7071"
},
{
"name": "Python",
"bytes": "21331"
}
],
"symlink_target": ""
}
|
from __future__ import division
import sqlite3
from bisect import bisect_left
import plotly.plotly as py
from plotly.graph_objs import Scatter, Figure, Layout, Data, YAxis, XAxis
from feemodel.util import DataSample
from feemodel.app.predict import PVALS_DBFILE
from feemodeldata.plotting.plotrrd import BASEDIR
def get_waits(dbfile=PVALS_DBFILE):
db = None
try:
db = sqlite3.connect(dbfile)
txs = db.execute("select feerate, waittime from txs").fetchall()
blockheights = db.execute("select blockheight from txs").fetchall()
blockheights = [tx[0] for tx in blockheights]
return txs, min(blockheights), max(blockheights)
finally:
if db is not None:
db.close()
def get_txgroups(txs, feerates=(10000, 15000, 20000, 50000)):
"""Sort the txs by feerate."""
txs.sort()
txfeerates, _dum = zip(*txs)
idxs = [bisect_left(txfeerates, feerate) for feerate in feerates]
idxs.insert(0, 0)
print("idxs are {}.".format(idxs))
txgroups = [txs[idxs[i]:idxs[i+1]] for i in range(len(idxs)-1)]
return txgroups
def get_traces(txgroups):
traces = []
for txgroup in txgroups:
feerates, waits = zip(*txgroup)
minfeerate = min(feerates)
maxfeerate = max(feerates)
waitdata = DataSample(waits)
percentilepts = [i / 100 for i in range(1, 99)]
percentiles = [waitdata.get_percentile(p) for p in percentilepts]
percentilepts.insert(0, 0)
percentiles.insert(0, 0)
trace = Scatter(
x=percentiles,
y=percentilepts,
name="{} <= feerate <= {}".format(minfeerate, maxfeerate)
)
traces.append(trace)
return traces
def plotwaits(traces, minheight, maxheight, basedir=BASEDIR):
title = ("Empirical CDF of waittimes from blocks {}-{}".
format(minheight, maxheight))
data = Data(traces)
layout = Layout(
title=title,
yaxis=YAxis(
title="Empirical CDF",
range=[0, 1]
),
xaxis=XAxis(
title="Wait time (s)",
rangemode="tozero",
type="log"
),
hovermode="closest"
)
fig = Figure(data=data, layout=layout)
basedir = basedir if basedir.endswith('/') else basedir + '/'
filename = basedir + "waits_cdf"
return py.plot(fig, filename=filename, auto_open=False)
def main(basedir=BASEDIR):
txs, minheight, maxheight = get_waits(PVALS_DBFILE)
print("Got {} txs.".format(len(txs)))
txgroups = get_txgroups(txs)
print("Got txgroups.")
traces = get_traces(txgroups)
print("Got traces.")
url = plotwaits(traces, minheight, maxheight, basedir=basedir)
print(url)
|
{
"content_hash": "1c596269e2a34b06ee227280bb4bfddc",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 29.24468085106383,
"alnum_prop": 0.6213168424881775,
"repo_name": "bitcoinfees/bitcoin-feemodel-data",
"id": "b7e6129db622711592b894cfa7f14f8bbe198a09",
"size": "2749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feemodeldata/plotting/plotwaits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55502"
}
],
"symlink_target": ""
}
|
from math import factorial
print reduce(lambda x, y: x * y, filter(lambda n: factorial(n-1)%n == n-1, [i for i in range(1,21)]), 1)
|
{
"content_hash": "203539f102e0af4a397c7eb3955bc4ae",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 104,
"avg_line_length": 65.5,
"alnum_prop": 0.6641221374045801,
"repo_name": "jacksarick/My-Code",
"id": "80854cf317daac3e56a6aa407a89d10b3bbcf763",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/python challenges/euler/005_smallest_multiple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "37910"
},
{
"name": "C",
"bytes": "5513"
},
{
"name": "COBOL",
"bytes": "1951"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "Common Lisp",
"bytes": "499"
},
{
"name": "Elixir",
"bytes": "1272"
},
{
"name": "Erlang",
"bytes": "138"
},
{
"name": "HTML",
"bytes": "36533"
},
{
"name": "Haskell",
"bytes": "719"
},
{
"name": "Java",
"bytes": "5551"
},
{
"name": "JavaScript",
"bytes": "23535"
},
{
"name": "Lua",
"bytes": "2625"
},
{
"name": "NetLogo",
"bytes": "18923"
},
{
"name": "PHP",
"bytes": "191"
},
{
"name": "Python",
"bytes": "146043"
},
{
"name": "Racket",
"bytes": "11152"
},
{
"name": "Rust",
"bytes": "264"
},
{
"name": "Tcl",
"bytes": "762"
}
],
"symlink_target": ""
}
|
import unittest
from base64 import b64encode
from airflow import configuration
from airflow import models
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.models import DAG, TaskInstance
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
TEST_DAG_ID = 'unit_tests'
DEFAULT_DATE = datetime(2017, 1, 1)
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class SSHOperatorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
from airflow.contrib.hooks.ssh_hook import SSHHook
hook = SSHHook(ssh_conn_id='ssh_default')
hook.no_host_key_check = True
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'provide_context': True
}
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args)
dag.schedule_interval = '@once'
self.hook = hook
self.dag = dag
def test_hook_created_correctly(self):
TIMEOUT = 20
SSH_ID = "ssh_default"
task = SSHOperator(
task_id="test",
command="echo -n airflow",
dag=self.dag,
timeout=TIMEOUT,
ssh_conn_id="ssh_default"
)
self.assertIsNotNone(task)
task.execute(None)
self.assertEqual(TIMEOUT, task.ssh_hook.timeout)
self.assertEqual(SSH_ID, task.ssh_hook.ssh_conn_id)
def test_json_command_execution(self):
configuration.conf.set("core", "enable_xcom_pickling", "False")
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command="echo -n airflow",
do_xcom_push=True,
dag=self.dag,
)
self.assertIsNotNone(task)
ti = TaskInstance(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertIsNotNone(ti.duration)
self.assertEqual(ti.xcom_pull(task_ids='test', key='return_value'),
b64encode(b'airflow').decode('utf-8'))
def test_pickle_command_execution(self):
configuration.conf.set("core", "enable_xcom_pickling", "True")
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command="echo -n airflow",
do_xcom_push=True,
dag=self.dag,
)
self.assertIsNotNone(task)
ti = TaskInstance(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertIsNotNone(ti.duration)
self.assertEqual(ti.xcom_pull(task_ids='test', key='return_value'), b'airflow')
def test_command_execution_with_env(self):
configuration.conf.set("core", "enable_xcom_pickling", "True")
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command="echo -n airflow",
do_xcom_push=True,
dag=self.dag,
)
self.assertIsNotNone(task)
ti = TaskInstance(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertIsNotNone(ti.duration)
self.assertEqual(ti.xcom_pull(task_ids='test', key='return_value'), b'airflow')
def test_no_output_command(self):
configuration.conf.set("core", "enable_xcom_pickling", "True")
task = SSHOperator(
task_id="test",
ssh_hook=self.hook,
command="sleep 1",
do_xcom_push=True,
dag=self.dag,
)
self.assertIsNotNone(task)
ti = TaskInstance(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertIsNotNone(ti.duration)
self.assertEqual(ti.xcom_pull(task_ids='test', key='return_value'), b'')
def test_arg_checking(self):
import os
from airflow.exceptions import AirflowException
conn_id = "conn_id_for_testing"
TIMEOUT = 5
os.environ['AIRFLOW_CONN_' + conn_id.upper()] = "ssh://test_id@localhost"
# Exception should be raised if neither ssh_hook nor ssh_conn_id is provided
with self.assertRaisesRegex(AirflowException,
"Cannot operate without ssh_hook or ssh_conn_id."):
task_0 = SSHOperator(task_id="test", command="echo -n airflow",
timeout=TIMEOUT, dag=self.dag)
task_0.execute(None)
# if ssh_hook is invalid/not provided, use ssh_conn_id to create SSHHook
task_1 = SSHOperator(
task_id="test_1",
ssh_hook="string_rather_than_SSHHook", # invalid ssh_hook
ssh_conn_id=conn_id,
command="echo -n airflow",
timeout=TIMEOUT,
dag=self.dag
)
try:
task_1.execute(None)
except Exception:
pass
self.assertEqual(task_1.ssh_hook.ssh_conn_id, conn_id)
task_2 = SSHOperator(
task_id="test_2",
ssh_conn_id=conn_id, # no ssh_hook provided
command="echo -n airflow",
timeout=TIMEOUT,
dag=self.dag
)
try:
task_2.execute(None)
except Exception:
pass
self.assertEqual(task_2.ssh_hook.ssh_conn_id, conn_id)
# if both valid ssh_hook and ssh_conn_id are provided, ignore ssh_conn_id
task_3 = SSHOperator(
task_id="test_3",
ssh_hook=self.hook,
ssh_conn_id=conn_id,
command="echo -n airflow",
timeout=TIMEOUT,
dag=self.dag
)
try:
task_3.execute(None)
except Exception:
pass
self.assertEqual(task_3.ssh_hook.ssh_conn_id, self.hook.ssh_conn_id)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "1406bcfdc022672480299619370ab5bb",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 87,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.569813829787234,
"repo_name": "r39132/airflow",
"id": "3cf12cab6c612eab34bba060f2f95b84490890ca",
"size": "6828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/contrib/operators/test_ssh_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
}
|
def create_model(project_id, dataset_id, display_name):
"""Create a model."""
# [START automl_vision_object_detection_create_model]
from google.cloud import automl
# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# dataset_id = "YOUR_DATASET_ID"
# display_name = "your_models_display_name"
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = f"projects/{project_id}/locations/us-central1"
# Leave model unset to use the default base model provided by Google
# train_budget_milli_node_hours: The actual train_cost will be equal or
# less than this value.
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodelmetadata
metadata = automl.ImageObjectDetectionModelMetadata(
train_budget_milli_node_hours=24000
)
model = automl.Model(
display_name=display_name,
dataset_id=dataset_id,
image_object_detection_model_metadata=metadata,
)
# Create a model with the model metadata in the region.
response = client.create_model(parent=project_location, model=model)
print("Training operation name: {}".format(response.operation.name))
print("Training started...")
# [END automl_vision_object_detection_create_model]
return response
|
{
"content_hash": "aa1fcaa822d106dcfa35f7563c39a994",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 113,
"avg_line_length": 41.3235294117647,
"alnum_prop": 0.7103202846975089,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "d00c0a669b8cb228d0658b4dfa2c2f419a110645",
"size": "1980",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "automl/snippets/vision_object_detection_create_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
from .se import Lower_SE
from .sma import Lower_SMA
|
{
"content_hash": "50d1b259f0f5bb20df1442259fd416a6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 26,
"avg_line_length": 26,
"alnum_prop": 0.7692307692307693,
"repo_name": "ljwolf/spvcm",
"id": "899bccd21c2a9f94735151c115790d1e30562ab5",
"size": "52",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spvcm/lower_level/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1282235"
},
{
"name": "Jupyter Notebook",
"bytes": "2152740"
},
{
"name": "Python",
"bytes": "286595"
}
],
"symlink_target": ""
}
|
from pyface.tasks.api import Editor, Task, TaskPane
from traits.api import Bool, Instance, Property, Str, cached_property
# Local imports.
from pyface.tasks.action.listening_action import ListeningAction
class TaskAction(ListeningAction):
""" An Action that makes a callback to a Task.
Note that this is a convenience class. Actions associated with a Task need
not inherit TaskAction, although they must, of course, inherit Action.
"""
#### ListeningAction interface ############################################
object = Property(depends_on='task')
#### TaskAction interface #################################################
# The Task with which the action is associated. Set by the framework.
task = Instance(Task)
###########################################################################
# Protected interface.
###########################################################################
def _get_object(self):
return self.task
def destroy(self):
# Disconnect listeners to task and dependent properties.
self.task = None
super(TaskAction, self).destroy()
class TaskWindowAction(TaskAction):
""" An Action that makes a callback to a Task's window.
"""
#### ListeningAction interface ############################################
object = Property(depends_on='task.window')
###########################################################################
# Protected interface.
###########################################################################
def _get_object(self):
if self.task:
return self.task.window
return None
class CentralPaneAction(TaskAction):
""" An Action that makes a callback to a Task's central pane.
"""
#### ListeningAction interface ############################################
object = Property(depends_on='central_pane')
#### CentralPaneAction interface ##########################################
# The central pane with which the action is associated.
central_pane = Property(Instance(TaskPane), depends_on='task')
###########################################################################
# Protected interface.
###########################################################################
@cached_property
def _get_central_pane(self):
if self.task:
return self.task.window.get_central_pane(self.task)
return None
def _get_object(self):
return self.central_pane
class DockPaneAction(TaskAction):
""" An Action the makes a callback to one of a Task's dock panes.
"""
#### ListeningAction interface ############################################
object = Property(depends_on='dock_pane')
#### DockPaneAction interface #############################################
# The dock pane with which the action is associated. Set by the framework.
dock_pane = Property(Instance(TaskPane), depends_on='task')
# The ID of the dock pane with which the action is associated.
dock_pane_id = Str
###########################################################################
# Protected interface.
###########################################################################
@cached_property
def _get_dock_pane(self):
if self.task:
return self.task.window.get_dock_pane(self.dock_pane_id, self.task)
return None
def _get_object(self):
return self.dock_pane
class EditorAction(CentralPaneAction):
""" An action that makes a callback to the active editor in an editor pane.
"""
#### ListeningAction interface ############################################
object = Property(depends_on='active_editor')
#### EditorAction interface ###############################################
# The active editor in the central pane with which the action is associated.
active_editor = Property(Instance(Editor),
depends_on='central_pane.active_editor')
###########################################################################
# Protected interface.
###########################################################################
@cached_property
def _get_active_editor(self):
if self.central_pane is not None:
return self.central_pane.active_editor
return None
def _get_object(self):
return self.active_editor
|
{
"content_hash": "8420edf1e74b391ece1daa0928bdb15b",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 80,
"avg_line_length": 32.76642335766423,
"alnum_prop": 0.4847404767208732,
"repo_name": "geggo/pyface",
"id": "66d7f5d9db6945c7f3e3e060b57789355e6b1978",
"size": "4518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyface/tasks/action/task_action.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2246684"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from log import views
urlpatterns = patterns('',
url(r'^entry/$', views.EntryListAPIView.as_view(), name='log_entry_list'),
url(r'^entry/id/$', views.EntryCreateAPIView.as_view(), name='log_entry_id'),
# Use optional ending slash since Backbone models don't use it by default.
url(r'^entry/id/(?P<id>[0-9]+)(/?)$', views.EntryDetailAPIView.as_view()),
)
|
{
"content_hash": "3363538d8a7f318d8e113539e312808c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 37.81818181818182,
"alnum_prop": 0.6778846153846154,
"repo_name": "thachhoang/log",
"id": "53142e044af11e71de5552e08159a50c67557f38",
"size": "416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "log/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7036"
},
{
"name": "HTML",
"bytes": "5367"
},
{
"name": "JavaScript",
"bytes": "15471"
},
{
"name": "Python",
"bytes": "22702"
}
],
"symlink_target": ""
}
|
import os
import sys
from .server import app
def main():
"""The application command-line entry point."""
# Get contextual information
project_directory = os.path.abspath(os.path.normpath('.'))
relationships_filename = 'relationships.json'
# Set default config values
app.config.setdefault('PROJECT_DIRECTORY', project_directory)
app.config.setdefault('WORKSPACE_SEND_FILE_OPTIONS', {})
app.config.setdefault('SUPPORTED_IMAGE_EXTENSIONS', [])
app.config.setdefault('RELATIONSHIPS_FILENAME', relationships_filename)
# Set dependent default config values
relationships_path = os.path.join(app.config['PROJECT_DIRECTORY'], app.config['RELATIONSHIPS_FILENAME'])
app.config.setdefault('RELATIONSHIPS_PATH', relationships_path)
# TODO: command-line processing
args = sys.argv[1:]
port = int(args[0]) if len(args) > 0 else None
# Set overridden config values
if port is not None:
app.config['PORT'] = port
# Run the server, using the specified workspace path
app.run(app.config['HOST'], app.config['PORT'], debug=True, use_reloader=app.config['DEBUG_MOCKDOWN'])
|
{
"content_hash": "b88ace0cb2bd366dde28faf4382f5f96",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 108,
"avg_line_length": 35.60606060606061,
"alnum_prop": 0.6885106382978723,
"repo_name": "joeyespo/mockdown",
"id": "e86519206e2c91cf582726b2850e6949648f8b68",
"size": "1175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mockdown/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6391"
},
{
"name": "Python",
"bytes": "8753"
}
],
"symlink_target": ""
}
|
"""Sending email
This program uses :py:mod:`smtplib` to send emails.
Notes:
* ``smtp_info`` file has each item on a separate line.
* Email address used is specially created for this chapter.
* Use :func:`input` for password to prevent storing in unencrypted file.
"""
def main():
# Connecting to an SMTP Server
import smtplib
with open('smtp_info') as config:
email, password, server, port = config.read().splitlines()
smtp_obj = smtplib.SMTP_SSL(server, port) # Using port 465
print(type(smtp_obj))
# Sending the SMTP "Hello" Message
print(smtp_obj.ehlo())
# Logging in to the SMTP Server
print(smtp_obj.login(email, password))
# Sending an Email
unsent = smtp_obj.sendmail(email, 'contact.me@JoseALerma.com',
'Subject: Bot test.\nDear Myself,\nAlways remember that with great '
'power comes great responsibility.\nRegards,\nYou')
print(unsent.keys())
# Disconnecting from the SMTP Server
print(smtp_obj.quit())
if __name__ == '__main__':
main()
|
{
"content_hash": "f96e43fd56f35e2fd3cc770b6ff1d33c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 99,
"avg_line_length": 27.575,
"alnum_prop": 0.6300997280145059,
"repo_name": "JoseALermaIII/python-tutorials",
"id": "b50cdb66bb6d53c8ebf3b696f247d21b2c36efce",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythontutorials/books/AutomateTheBoringStuff/Ch16/P1_sendingEmail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "326"
},
{
"name": "Python",
"bytes": "2020260"
}
],
"symlink_target": ""
}
|
'''
salt.serializers.yamlex
~~~~~~~~~~~~~~~~~~~~~~~~~~
YAMLEX is a format that allows for things like sls files to be
more intuitive.
It's an extension of YAML that implements all the salt magic:
- it implies omap for any dict like.
- it implies that string like data are str, not unicode
- ...
For example, the file `states.sls` has this contents:
.. code-block:: yaml
foo:
bar: 42
baz: [1, 2, 3]
The file can be parsed into Python like this
.. code-block:: python
from salt.serializers import yamlex
with open('state.sls', 'r') as stream:
obj = yamlex.deserialize(stream)
Check that ``obj`` is an OrderedDict
.. code-block:: python
from salt.utils.odict import OrderedDict
assert isinstance(obj, dict)
assert isinstance(obj, OrderedDict)
yamlex `__repr__` and `__str__` objects' methods render YAML understandable
string. It means that they are template friendly.
.. code-block:: python
print '{0}'.format(obj)
returns:
::
{foo: {bar: 42, baz: [1, 2, 3]}}
and they are still valid YAML:
.. code-block:: python
from salt.serializers import yaml
yml_obj = yaml.deserialize(str(obj))
assert yml_obj == obj
yamlex implements also custom tags:
!aggregate
this tag allows structures aggregation.
For example:
.. code-block:: yaml
placeholder: !aggregate foo
placeholder: !aggregate bar
placeholder: !aggregate baz
is rendered as
.. code-block:: yaml
placeholder: [foo, bar, baz]
!reset
this tag flushes the computing value.
.. code-block:: yaml
placeholder: {!aggregate foo: {foo: 42}}
placeholder: {!aggregate foo: {bar: null}}
!reset placeholder: {!aggregate foo: {baz: inga}}
is roughly equivalent to
.. code-block:: yaml
placeholder: {!aggregate foo: {baz: inga}}
Document is defacto an aggregate mapping.
'''
# pylint: disable=invalid-name,no-member,missing-docstring,no-self-use
# pylint: disable=too-few-public-methods,too-many-public-methods
# Import python libs
from __future__ import absolute_import
import logging
import datetime
from copy import copy
# Import Salt Libs
from salt.serializers import DeserializationError, SerializationError
from salt.utils.aggregation import aggregate, Map, Sequence
from salt.utils.odict import OrderedDict
# Import 3rd-party libs
import yaml
from yaml.nodes import MappingNode
from yaml.constructor import ConstructorError
from yaml.scanner import ScannerError
import salt.ext.six as six
__all__ = ['deserialize', 'serialize', 'available']
log = logging.getLogger(__name__)
available = True
# prefer C bindings over python when available
# CSafeDumper causes test failures under python3
BaseLoader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader)
BaseDumper = yaml.SafeDumper if six.PY3 else getattr(yaml, 'CSafeDumper', yaml.SafeDumper)
ERROR_MAP = {
("found character '\\t' "
"that cannot start any token"): 'Illegal tab character'
}
def deserialize(stream_or_string, **options):
'''
Deserialize any string of stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower yaml module.
'''
options.setdefault('Loader', Loader)
try:
return yaml.load(stream_or_string, **options)
except ScannerError as error:
err_type = ERROR_MAP.get(error.problem, 'Unknown yaml render error')
line_num = error.problem_mark.line + 1
raise DeserializationError(err_type,
line_num,
error.problem_mark.buffer)
except ConstructorError as error:
raise DeserializationError(error)
except Exception as error:
raise DeserializationError(error)
def serialize(obj, **options):
'''
Serialize Python data to YAML.
:param obj: the data structure to serialize
:param options: options given to lower yaml module.
'''
options.setdefault('Dumper', Dumper)
try:
response = yaml.dump(obj, **options)
if response.endswith('\n...\n'):
return response[:-5]
if response.endswith('\n'):
return response[:-1]
return response
except Exception as error:
raise SerializationError(error)
class Loader(BaseLoader): # pylint: disable=W0232
'''
Create a custom YAML loader that uses the custom constructor. This allows
for the YAML loading defaults to be manipulated based on needs within salt
to make things like sls file more intuitive.
'''
DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:omap'
def compose_document(self):
node = BaseLoader.compose_document(self)
node.tag = '!aggregate'
return node
def construct_yaml_omap(self, node):
'''
Build the SLSMap
'''
sls_map = SLSMap()
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark)
self.flatten_mapping(node)
for key_node, value_node in node.value:
# !reset instruction applies on document only.
# It tells to reset previous decoded value for this present key.
reset = key_node.tag == u'!reset'
# even if !aggregate tag apply only to values and not keys
# it's a reason to act as a such nazi.
if key_node.tag == u'!aggregate':
log.warning('!aggregate applies on values only, not on keys')
value_node.tag = key_node.tag
key_node.tag = self.resolve_sls_tag(key_node)[0]
key = self.construct_object(key_node, deep=False)
try:
hash(key)
except TypeError:
err = ('While constructing a mapping {0} found unacceptable '
'key {1}').format(node.start_mark, key_node.start_mark)
raise ConstructorError(err)
value = self.construct_object(value_node, deep=False)
if key in sls_map and not reset:
value = merge_recursive(sls_map[key], value)
sls_map[key] = value
return sls_map
def construct_sls_str(self, node):
'''
Build the SLSString.
'''
# Ensure obj is str, not py2 unicode or py3 bytes
obj = self.construct_scalar(node)
if six.PY2:
obj = obj.encode('utf-8')
return SLSString(obj)
def construct_sls_int(self, node):
'''
Verify integers and pass them in correctly is they are declared
as octal
'''
if node.value == '0':
pass
elif node.value.startswith('0') \
and not node.value.startswith(('0b', '0x')):
node.value = node.value.lstrip('0')
# If value was all zeros, node.value would have been reduced to
# an empty string. Change it to '0'.
if node.value == '':
node.value = '0'
return int(node.value)
def construct_sls_aggregate(self, node):
try:
tag, deep = self.resolve_sls_tag(node)
except:
raise ConstructorError('unable to build reset')
node = copy(node)
node.tag = tag
obj = self.construct_object(node, deep)
if obj is None:
return AggregatedSequence()
elif tag == self.DEFAULT_MAPPING_TAG:
return AggregatedMap(obj)
elif tag == self.DEFAULT_SEQUENCE_TAG:
return AggregatedSequence(obj)
return AggregatedSequence([obj])
def construct_sls_reset(self, node):
try:
tag, deep = self.resolve_sls_tag(node)
except:
raise ConstructorError('unable to build reset')
node = copy(node)
node.tag = tag
return self.construct_object(node, deep)
def resolve_sls_tag(self, node):
if isinstance(node, yaml.nodes.ScalarNode):
# search implicit tag
tag = self.resolve(yaml.nodes.ScalarNode, node.value, [True, True])
deep = False
elif isinstance(node, yaml.nodes.SequenceNode):
tag = self.DEFAULT_SEQUENCE_TAG
deep = True
elif isinstance(node, yaml.nodes.MappingNode):
tag = self.DEFAULT_MAPPING_TAG
deep = True
else:
raise ConstructorError('unable to resolve tag')
return tag, deep
Loader.add_constructor('!aggregate', Loader.construct_sls_aggregate) # custom type
Loader.add_constructor('!reset', Loader.construct_sls_reset) # custom type
Loader.add_constructor('tag:yaml.org,2002:omap', Loader.construct_yaml_omap) # our overwrite
Loader.add_constructor('tag:yaml.org,2002:str', Loader.construct_sls_str) # our overwrite
Loader.add_constructor('tag:yaml.org,2002:int', Loader.construct_sls_int) # our overwrite
Loader.add_multi_constructor('tag:yaml.org,2002:null', Loader.construct_yaml_null)
Loader.add_multi_constructor('tag:yaml.org,2002:bool', Loader.construct_yaml_bool)
Loader.add_multi_constructor('tag:yaml.org,2002:float', Loader.construct_yaml_float)
Loader.add_multi_constructor('tag:yaml.org,2002:binary', Loader.construct_yaml_binary)
Loader.add_multi_constructor('tag:yaml.org,2002:timestamp', Loader.construct_yaml_timestamp)
Loader.add_multi_constructor('tag:yaml.org,2002:pairs', Loader.construct_yaml_pairs)
Loader.add_multi_constructor('tag:yaml.org,2002:set', Loader.construct_yaml_set)
Loader.add_multi_constructor('tag:yaml.org,2002:seq', Loader.construct_yaml_seq)
Loader.add_multi_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_multi_constructor(None, Loader.construct_undefined)
class SLSMap(OrderedDict):
'''
Ensures that dict str() and repr() are YAML friendly.
.. code-block:: python
>>> mapping = OrderedDict([('a', 'b'), ('c', None)])
>>> print mapping
OrderedDict([('a', 'b'), ('c', None)])
>>> sls_map = SLSMap(mapping)
>>> print sls_map.__str__()
{a: b, c: null}
'''
def __str__(self):
return serialize(self, default_flow_style=True)
def __repr__(self, _repr_running=None):
return serialize(self, default_flow_style=True)
class SLSString(str):
'''
Ensures that str str() and repr() are YAML friendly.
.. code-block:: python
>>> scalar = str('foo')
>>> print 'foo'
foo
>>> sls_scalar = SLSString(scalar)
>>> print sls_scalar
"foo"
'''
def __str__(self):
return serialize(self, default_style='"')
def __repr__(self):
return serialize(self, default_style='"')
class AggregatedMap(SLSMap, Map):
pass
class AggregatedSequence(Sequence):
pass
class Dumper(BaseDumper): # pylint: disable=W0232
'''
sls dumper.
'''
def represent_odict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', list(data.items()))
Dumper.add_multi_representer(type(None), Dumper.represent_none)
if six.PY2:
Dumper.add_multi_representer(six.binary_type, Dumper.represent_str)
Dumper.add_multi_representer(six.text_type, Dumper.represent_unicode)
Dumper.add_multi_representer(long, Dumper.represent_long) # pylint: disable=incompatible-py3-code
else:
Dumper.add_multi_representer(six.binary_type, Dumper.represent_binary)
Dumper.add_multi_representer(six.text_type, Dumper.represent_str)
Dumper.add_multi_representer(bool, Dumper.represent_bool)
Dumper.add_multi_representer(int, Dumper.represent_int)
Dumper.add_multi_representer(float, Dumper.represent_float)
Dumper.add_multi_representer(list, Dumper.represent_list)
Dumper.add_multi_representer(tuple, Dumper.represent_list)
Dumper.add_multi_representer(dict, Dumper.represent_odict) # make every dict like obj to be represented as a map
Dumper.add_multi_representer(set, Dumper.represent_set)
Dumper.add_multi_representer(datetime.date, Dumper.represent_date)
Dumper.add_multi_representer(datetime.datetime, Dumper.represent_datetime)
Dumper.add_multi_representer(None, Dumper.represent_undefined)
def merge_recursive(obj_a, obj_b, level=False):
'''
Merge obj_b into obj_a.
'''
return aggregate(obj_a, obj_b, level,
map_class=AggregatedMap,
sequence_class=AggregatedSequence)
|
{
"content_hash": "9fdf163e5ae7264d695a5bc8fbc1080d",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 113,
"avg_line_length": 31.096618357487923,
"alnum_prop": 0.632204443063539,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "65fbf58e4859aeb8ae38d2feaa8a9f6681124fe2",
"size": "12898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/serializers/yamlex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import os, requests, json
from typing import Any
from urllib.parse import urljoin
from igf_data.utils.dbutils import read_json_data
from igf_data.utils.fileutils import check_file_path
def get_request(
url: str,
headers: Any = None,
verify: bool = False,
jsonify: bool = True) \
-> Any:
try:
res = \
requests.get(
url=url,
headers=headers,
verify=verify)
if res.status_code != 200:
raise ValueError(
f"Failed get request, got status: {res.status_code}")
if jsonify:
res = res.json()
return res
except Exception as e:
raise ValueError(e)
def post_request(
url: str,
data: Any,
headers: Any = None,
verify: bool = False,
file_attachment: Any = None,
jsonify: bool = True) \
-> Any:
try:
files = None
if file_attachment is not None:
files = {
'file': (
os.path.join(file_attachment),
open(file_attachment, 'rb'), 'application/json')}
res = \
requests.post(
url=url,
data=data,
headers=headers,
verify=verify,
files=files)
if res.status_code != 200:
raise ValueError(
f"Failed post request, got status: {res.status_code}")
if jsonify:
res = res.json()
return res
except Exception as e:
raise
def get_login_token(
portal_config_file: str,
verify: bool = False,
url_suffix: str = '/api/v1/security/login') \
-> Any:
try:
portal_config = read_json_data(portal_config_file)
if isinstance(portal_config, list):
portal_config = portal_config[0]
base_url = portal_config.get('base_url')
login_data = portal_config.get('login_data')
if login_data is None:
raise KeyError("Missing logging info")
if isinstance(login_data, dict):
login_data = json.dumps(login_data)
if base_url is None:
raise KeyError("Missing base url")
url = urljoin(base_url, url_suffix)
json_res = \
post_request(
url=url,
data=login_data,
headers={"Content-Type": "application/json"},
verify=verify)
token = json_res.get('access_token')
return token
except Exception as e:
raise ValueError(
f"Failed to get token from portal, error: {e}")
def get_data_from_portal(
portal_config_file: str,
url_suffix: str,
verify: bool = False,
jsonify: bool = True,
request_mode: str = 'get') \
-> Any:
try:
check_file_path(portal_config_file)
portal_config = read_json_data(portal_config_file)
if isinstance(portal_config, list):
portal_config = portal_config[0]
base_url = portal_config.get('base_url')
if base_url is None:
raise KeyError("Missing base url")
url = urljoin(base_url, url_suffix)
token = \
get_login_token(
portal_config_file=portal_config_file,
verify=verify)
if request_mode == 'get':
res = \
get_request(
url=url,
headers={"accept": "application/json", "Authorization": f"Bearer {token}"},
verify=verify,
jsonify=jsonify)
elif request_mode == 'post':
res = \
post_request(
url=url,
data=None,
headers={"accept": "application/json", "Authorization": f"Bearer {token}"},
verify=verify,
jsonify=jsonify)
else:
raise ValueError(
f"Unsupported request mode: {request_mode}")
return res
except Exception as e:
raise ValueError(e)
def upload_files_to_portal(
portal_config_file: str,
file_path: str,
url_suffix: str,
verify: bool = False,
jsonify: bool = True) \
-> Any:
try:
check_file_path(file_path)
portal_config = read_json_data(portal_config_file)
if isinstance(portal_config, list):
portal_config = portal_config[0]
base_url = portal_config.get('base_url')
if base_url is None:
raise KeyError("Missing base url")
url = urljoin(base_url, url_suffix)
token = \
get_login_token(
portal_config_file=portal_config_file,
verify=verify)
res = \
post_request(
url=url,
data=None,
headers={"accept": "application/json", "Authorization": f"Bearer {token}"},
file_attachment=file_path,
verify=verify,
jsonify=jsonify)
return res
except Exception as e:
raise ValueError(
f"Failed to upload file {file_path} to portal, error: {e}")
|
{
"content_hash": "0c7a63cbe99680019e0c377e80af9554",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 85,
"avg_line_length": 27.581818181818182,
"alnum_prop": 0.5910788837618106,
"repo_name": "imperial-genomics-facility/data-management-python",
"id": "f5b2157bd3c48df0b953274117619a46f17631cf",
"size": "4551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "igf_portal/api_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2900"
},
{
"name": "HTML",
"bytes": "77727"
},
{
"name": "JavaScript",
"bytes": "1074"
},
{
"name": "Jinja",
"bytes": "399"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "2553178"
},
{
"name": "R",
"bytes": "376"
},
{
"name": "Shell",
"bytes": "536"
}
],
"symlink_target": ""
}
|
"""Admin application."""
import importlib
import pkgutil
from flask_admin import Admin
from flask_admin.base import AdminIndexView, MenuLink
from flask.ext.admin.contrib.sqla import ModelView
from flask_login import current_user
from pygotham import factory, filters
__all__ = ('create_app',)
class HomeView(AdminIndexView):
"""Only show the admin to authenticated admin users."""
def is_accessible(self):
return current_user.has_role('admin')
def create_app(settings_override=None):
"""Return the PyGotham admin application.
:param settings_override: a ``dict`` of settings to override.
"""
app = factory.create_app(__name__, __path__, settings_override)
app.jinja_env.filters['rst'] = filters.rst_to_html
# Because the admin is being wrapped inside an app, the url needs to
# be overridden to use / instead of the default of /admin/. One of
# the side effects of doing this is that the static assets won't
# serve correctly without overriding static_url_path as well.
admin = Admin(
app, name='PyGotham',
static_url_path='/admin',
index_view=HomeView(endpoint='', url='/'),
)
# Iterate through all the modules of the current package. For each
# module, check the public API for any instances of types that can
# be added to the Flask-Admin menu and register them.
for _, name, _ in pkgutil.iter_modules(__path__):
module = importlib.import_module('{}.{}'.format(__name__, name))
for attr in dir(module):
view = getattr(module, attr)
if isinstance(view, ModelView):
admin.add_view(view)
elif isinstance(view, MenuLink):
admin.add_link(view)
return app
|
{
"content_hash": "93382e7a4026f6cc02a83f0150459739",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 31.375,
"alnum_prop": 0.6607854297097325,
"repo_name": "djds23/pygotham-1",
"id": "8800c5b43d8264be55d4b04ab9a3347f5793d4d1",
"size": "1757",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygotham/admin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "56139"
},
{
"name": "HTML",
"bytes": "36124"
},
{
"name": "JavaScript",
"bytes": "116"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "109394"
},
{
"name": "Ruby",
"bytes": "1526"
},
{
"name": "Shell",
"bytes": "129"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='NIDAQtools',
version='0.1',
description='High-level tools for working with NI DAQmx devices',
author='Jaap Kokorian',
author_email='jkokorian@gmail.com',
url='',
packages=['nidaqtools'],
)
|
{
"content_hash": "23693817982bc86e8d93692755fa1b88",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 27.5,
"alnum_prop": 0.64,
"repo_name": "jkokorian/pydaqtools",
"id": "7476f9830e4e8a97a5abfcf846c5588f97bb555f",
"size": "298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4564"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO
import time
import os
import uinput
#print('Button Pressed')
GPIO.setmode(GPIO.BCM)
GPIO.setup(24, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = GPIO.input(24)
if input_state == False:
#print('Button Pressed') #<- for debugging only
with uinput.Device([uinput.KEY_ENTER]) as device:
time.sleep(1)
device.emit_combo([uinput.KEY_ENTER])
time.sleep(10)
|
{
"content_hash": "91ca220c2d6d323e139500920bf37965",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 57,
"avg_line_length": 24.61111111111111,
"alnum_prop": 0.654627539503386,
"repo_name": "andreknieriem/photobooth",
"id": "c7edef341e69e2ea6c3040cebf0369cd5b85655e",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4351"
},
{
"name": "Dockerfile",
"bytes": "1985"
},
{
"name": "JavaScript",
"bytes": "97524"
},
{
"name": "PHP",
"bytes": "98558"
},
{
"name": "Python",
"bytes": "512"
},
{
"name": "SCSS",
"bytes": "21227"
},
{
"name": "Shell",
"bytes": "9657"
}
],
"symlink_target": ""
}
|
list = [hello];print(list);
|
{
"content_hash": "17c335bef12d4780c13cbc90eb036927",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 27,
"alnum_prop": 0.6666666666666666,
"repo_name": "DataScience-SacState/HackDavis",
"id": "f103b1cc975820d476575667b0ad7e1120160096",
"size": "27",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/dc48b5bd-ab72-44ea-b455-4acd3762254c.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "10306"
},
{
"name": "Java",
"bytes": "364"
},
{
"name": "JavaScript",
"bytes": "20595"
},
{
"name": "Python",
"bytes": "43"
},
{
"name": "Rebol",
"bytes": "15"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from tempest.common import credentials_factory as common_creds
from tempest.common import tempest_fixtures as fixtures
from tempest import config
from tempest.lib import auth
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.identity.v2 import token_client as v2_client
from tempest.lib.services.identity.v3 import token_client as v3_client
from tempest.tests import fake_config
from tempest.tests.lib import base
from tempest.tests.lib import fake_identity
class ConfiguredV2CredentialsTests(base.TestCase):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'tenant_name': 'fake_tenant_name'
}
identity_response = fake_identity._fake_v2_response
credentials_class = auth.KeystoneV2Credentials
tokenclient_class = v2_client.TokenClient
identity_version = 'v2'
def setUp(self):
super(ConfiguredV2CredentialsTests, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.stubs.Set(self.tokenclient_class, 'raw_request',
self.identity_response)
def _get_credentials(self, attributes=None):
if attributes is None:
attributes = self.attributes
return self.credentials_class(**attributes)
def _check(self, credentials, credentials_class, filled):
# Check the right version of credentials has been returned
self.assertIsInstance(credentials, credentials_class)
# Check the id attributes are filled in
attributes = [x for x in credentials.ATTRIBUTES if (
'_id' in x and x != 'domain_id')]
for attr in attributes:
if filled:
self.assertIsNotNone(getattr(credentials, attr))
else:
self.assertIsNone(getattr(credentials, attr))
def _verify_credentials(self, credentials_class, filled=True,
identity_version=None):
for ctype in common_creds.CREDENTIAL_TYPES:
if identity_version is None:
creds = common_creds.get_configured_credentials(
credential_type=ctype, fill_in=filled)
else:
creds = common_creds.get_configured_credentials(
credential_type=ctype, fill_in=filled,
identity_version=identity_version)
self._check(creds, credentials_class, filled)
def test_create(self):
creds = self._get_credentials()
self.assertEqual(self.attributes, creds._initial)
def test_create_invalid_attr(self):
self.assertRaises(lib_exc.InvalidCredentials,
self._get_credentials,
attributes=dict(invalid='fake'))
def test_get_configured_credentials(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class)
def test_get_configured_credentials_unfilled(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
filled=False)
def test_get_configured_credentials_version(self):
# version specified and not loaded from config
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
identity_version=self.identity_version)
def test_is_valid(self):
creds = self._get_credentials()
self.assertTrue(creds.is_valid())
class ConfiguredV3CredentialsTests(ConfiguredV2CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'project_name': 'fake_project_name',
'user_domain_name': 'fake_domain_name'
}
credentials_class = auth.KeystoneV3Credentials
identity_response = fake_identity._fake_v3_response
tokenclient_class = v3_client.V3TokenClient
identity_version = 'v3'
def setUp(self):
super(ConfiguredV3CredentialsTests, self).setUp()
# Additional config items reset by cfg fixture after each test
cfg.CONF.set_default('auth_version', 'v3', group='identity')
# Identity group items
for prefix in ['', 'alt_', 'admin_']:
if prefix == 'admin_':
group = 'auth'
else:
group = 'identity'
cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
group=group)
|
{
"content_hash": "d56395af276d745ee9d0381b313fb846",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 40.422413793103445,
"alnum_prop": 0.6464064832586905,
"repo_name": "HybridF5/tempest",
"id": "3c104b2e484ceea5d3e8113d09830c87ce1be94c",
"size": "5323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/tests/common/test_configured_creds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3636851"
},
{
"name": "Shell",
"bytes": "8175"
}
],
"symlink_target": ""
}
|
from extended_choices import Choices
LOG_LEVELS = Choices(
# constant, db_id, friendly string
# Indicates the importance of the event.
# Events with level >= HIGH are denormalised on the
# case as well and 'mark' the progress of a case
# indirectly
("HIGH", 29, "HIGH"),
("MODERATE", 21, "MODERATE"),
("MINOR", 11, "MINOR"),
)
LOG_TYPES = Choices(
# constant, db_id, friendly string
("OUTCOME", "outcome", "outcome"), # codes that CLA understands and uses. E.g. CLSP
("SYSTEM", "system", "system"), # system codes, somethimes shown to users as well. E.g. CASE_CREATED
("EVENT", "event", "event"),
)
LOG_ROLES = Choices(
# constant, db_id, friendly string
# Not currently used.
("OPERATOR", "operator", "operator"),
("SPECIALIST", "specialist", "special"),
)
|
{
"content_hash": "09dbbcb557bac3e0067cf6e45f6b3c03",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 105,
"avg_line_length": 31.807692307692307,
"alnum_prop": 0.6299879081015719,
"repo_name": "ministryofjustice/cla_backend",
"id": "64d7fc9cfd18be11082184f72134d93ec08118a5",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/cla_eventlog/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
}
|
"""
luigi.hadoop has moved to :py:mod:`luigi.contrib.hadoop`
"""
# Delete this file any time after 28 July 2015
import warnings
from luigi.contrib.hadoop import *
warnings.warn("luigi.hadoop module has been moved to luigi.contrib.hadoop",
DeprecationWarning)
|
{
"content_hash": "9b828a4975a84598626e0d9556ba2fd0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 75,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.7282608695652174,
"repo_name": "17zuoye/luigi",
"id": "6fb4c90c3150cab1acdef27e91eeeacd74824bc4",
"size": "879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "luigi/hadoop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19120"
},
{
"name": "JavaScript",
"bytes": "41929"
},
{
"name": "Python",
"bytes": "1001737"
},
{
"name": "Shell",
"bytes": "2297"
}
],
"symlink_target": ""
}
|
"""Allows to configure custom shell commands to turn a value for a sensor."""
from collections.abc import Mapping
from datetime import timedelta
import json
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_COMMAND,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import setup_reload_service
from . import check_output_or_log
from .const import CONF_COMMAND_TIMEOUT, DEFAULT_TIMEOUT, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONF_JSON_ATTRIBUTES = "json_attributes"
DEFAULT_NAME = "Command Sensor"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_JSON_ATTRIBUTES): cv.ensure_list_csv,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Command Sensor."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
command = config.get(CONF_COMMAND)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
command_timeout = config.get(CONF_COMMAND_TIMEOUT)
if value_template is not None:
value_template.hass = hass
json_attributes = config.get(CONF_JSON_ATTRIBUTES)
data = CommandSensorData(hass, command, command_timeout)
add_entities(
[CommandSensor(hass, data, name, unit, value_template, json_attributes)], True
)
class CommandSensor(SensorEntity):
"""Representation of a sensor that is using shell commands."""
def __init__(
self, hass, data, name, unit_of_measurement, value_template, json_attributes
):
"""Initialize the sensor."""
self._hass = hass
self.data = data
self._attributes = None
self._json_attributes = json_attributes
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Get the latest data and updates the state."""
self.data.update()
value = self.data.value
if self._json_attributes:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if isinstance(json_dict, Mapping):
self._attributes = {
k: json_dict[k]
for k in self._json_attributes
if k in json_dict
}
else:
_LOGGER.warning("JSON result was not a dictionary")
except ValueError:
_LOGGER.warning("Unable to parse output as JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN
)
else:
self._state = value
class CommandSensorData:
"""The class for handling the data retrieval."""
def __init__(self, hass, command, command_timeout):
"""Initialize the data object."""
self.value = None
self.hass = hass
self.command = command
self.timeout = command_timeout
def update(self):
"""Get the latest data with a shell command."""
command = self.command
if " " not in command:
prog = command
args = None
args_compiled = None
else:
prog, args = command.split(" ", 1)
args_compiled = template.Template(args, self.hass)
if args_compiled:
try:
args_to_render = {"arguments": args}
rendered_args = args_compiled.render(args_to_render)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
pass
else:
# Template used. Construct the string used in the shell
command = f"{prog} {rendered_args}"
_LOGGER.debug("Running command: %s", command)
self.value = check_output_or_log(command, self.timeout)
|
{
"content_hash": "d893c7eb4afff876635ef96abf9d0249",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 86,
"avg_line_length": 32,
"alnum_prop": 0.6061422413793104,
"repo_name": "kennedyshead/home-assistant",
"id": "10c5a16f60b37f9fe451577e16a51f86a925bcdd",
"size": "5568",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/command_line/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import sys,os
import pickle
import socket
class ClientArgv(object):
def __init__(self,argvs):
self.argvs = argvs
self.argvs_parser()
self.handle()
def handle(self):
self.connect()
#接收打印欢迎信息
server_data = self.client_socket.recv(1024)
print server_data
if self.auther():
self.comm_argv()
#处理参数
def argvs_parser(self):
argv_list = ['-s','-p']
if len(self.argvs) < 5:
self.help()
sys.exit()
for i in argv_list:
if i not in self.argvs:
sys.exit('Argv is not found please try again !!!')
try:
self.host = self.argvs[self.argvs.index('-s')+1]
self.port = int(self.argvs[self.argvs.index('-p')+1])
except (ValueError,IndexError) as e:
self.help()
sys.exit()
#定义help信息
def help(self):
print '''
MyFTP Client command argv
-s :Server Host Address IP or Domain
-p :Server Port
'''
def comm_help(self):
print '''
get [file] :Download file
put [file] :Upload file
cd [path] :change dir path
rm [path] :delete file
exit :exit Ftp system
'''
#连接服务器端socket
def connect(self):
try:
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.client_socket.connect((self.host,self.port))
except socket.error as e:
sys.exit('connect server filed')
#用户认证模块
def auther(self):
auther_count = 0
while auther_count < 3:
user_name = raw_input('Please input username:')
if len(user_name) == 0:continue
user_pass = raw_input('Please input passwd:')
if len(user_pass) == 0:continue
data = pickle.dumps({'user_name':user_name,'user_pass':user_pass})
self.client_socket.send(data)
server_data = self.client_socket.recv(1024)
if server_data == '200':
return True
else:
print '%s user name or password error please try agin'%server_data
auther_count += 1
else:
sys.exit('User or Passwd too many mistakes')
#命令调度
def comm_argv(self):
while True:
self.command = raw_input('>>>')
if len(self.command.split()) == 0:continue
if hasattr(self,self.command.split()[0]):
func = getattr(self,self.command.split()[0])
func()
else:
self.comm_help()
#下载文件
def get(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
self.client_socket.send(self.command)
status_coding = self.client_socket.recv(1024)
if status_coding == '203':
print 'file is not found'
else:
self.client_socket.send('start')
file_size = int(self.client_socket.recv(1024))
self.client_socket.send('ok')
file_data = 0
with open(comm_list[1],'wb') as file_write:
while file_data != file_size:
data = self.client_socket.recv(2048)
file_write.write(data)
file_data += len(data)
print '%s Transfer ok'%comm_list[1]
self.client_socket.send('ok')
#上传文件
def put(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
#发送命令
self.client_socket.send(self.command)
#接受服务器确认收到命令的消息
self.client_socket.recv(1024)
if not os.path.isfile(comm_list[1]):
print 'File is not found'
else:
file_size = str(os.path.getsize(comm_list[1]))
self.client_socket.send(file_size)
self.client_socket.recv(100)
file_data = 0
with open(comm_list[1],'rb') as file_read:
while file_data != int(file_size):
data = file_read.read(2048)
file_data += len(data)
self.client_socket.sendall(data)
self.client_socket.recv(1024)
#列出文件目录
def ls(self):
self.client_socket.send(self.command)
file_number = int(self.client_socket.recv(1024))
self.client_socket.send('OK')
for i in range(file_number):
self.client_socket.send('ok')
file_name = self.client_socket.recv(1024)
print file_name
def rm(self):
self.client_socket.send(self.command)
rm_data = self.client_socket.recv(1024)
print rm_data
#切换文件目录
def cd(self):
comm_list = self.command.split()
if len(comm_list) < 2:
self.comm_help()
sys.exit()
#退出FTP客户端
def exit(self):
sys.exit('Exiting')
|
{
"content_hash": "589541e3f59ff119dd3fcf565662a7d1",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 82,
"avg_line_length": 34.16891891891892,
"alnum_prop": 0.5159185287719992,
"repo_name": "AlanProject/day08",
"id": "b38b0a30bf29b4f5b5388261580ce69c0b056762",
"size": "5250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MyFTP_Client/modules/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12234"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.streamanalytics import StreamAnalyticsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-streamanalytics
# USAGE
python create_an_event_hub_output_with_json_serialization.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = StreamAnalyticsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="56b5e0a9-b645-407d-99b0-c64f86013e3d",
)
response = client.outputs.create_or_replace(
resource_group_name="sjrg6912",
job_name="sj3310",
output_name="output5195",
output={
"properties": {
"datasource": {
"properties": {
"eventHubName": "sdkeventhub",
"partitionKey": "partitionKey",
"serviceBusNamespace": "sdktest",
"sharedAccessPolicyKey": "sharedAccessPolicyKey=",
"sharedAccessPolicyName": "RootManageSharedAccessKey",
},
"type": "Microsoft.ServiceBus/EventHub",
},
"serialization": {"properties": {"encoding": "UTF8", "format": "Array"}, "type": "Json"},
"watermarkSettings": {
"maxWatermarkDifferenceAcrossPartitions": "16:14:30",
"watermarkMode": "SendCurrentPartitionWatermark",
},
}
},
)
print(response)
# x-ms-original-file: specification/streamanalytics/resource-manager/Microsoft.StreamAnalytics/preview/2021-10-01-preview/examples/Output_Create_EventHub_JSON.json
if __name__ == "__main__":
main()
|
{
"content_hash": "e6aa00704a749ca509d27c41b9526635",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 163,
"avg_line_length": 39.301886792452834,
"alnum_prop": 0.6159385501680269,
"repo_name": "Azure/azure-sdk-for-python",
"id": "94e7738d29a20c0ec23d8b37dc6363a2ab75c0f4",
"size": "2551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/streamanalytics/azure-mgmt-streamanalytics/generated_samples/create_an_event_hub_output_with_json_serialization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from angr.errors import SimEngineError, SimMemoryError
from angr.analyses.bindiff import differing_constants
from angr.analyses.bindiff import UnmatchedStatementsException
from angr import Analysis, register_analysis
from . import chain_builder
from . import gadget_analyzer
from . import common
import pickle
import inspect
import logging
import progressbar
from .errors import RopException
from .rop_gadget import RopGadget, StackPivot
from multiprocessing import Pool
l = logging.getLogger('angrop.rop')
_global_gadget_analyzer = None
# global initializer for multiprocessing
def _set_global_gadget_analyzer(rop_gadget_analyzer):
global _global_gadget_analyzer
_global_gadget_analyzer = rop_gadget_analyzer
def run_worker(addr):
return _global_gadget_analyzer.analyze_gadget(addr)
# todo what if we have mov eax, [rsp+0x20]; ret (cache would need to know where it is or at least a min/max)
# todo what if we have pop eax; mov ebx, eax; need to encode that we cannot set them to different values
class ROP(Analysis):
"""
This class is a semantic aware rop gadget finder
It is a work in progress, so don't be surprised if something doesn't quite work
After calling find_gadgets(), find_gadgets_single_threaded() or load_gadgets(),
self.gadgets, self.stack_pivots, and self._duplicates is populated.
Additionally, all public methods from ChainBuilder are copied into ROP.
"""
def __init__(self, only_check_near_rets=True, max_block_size=20, max_sym_mem_accesses=4, fast_mode=None):
"""
Initializes the rop gadget finder
:param only_check_near_rets: If true we skip blocks that are not near rets
:param max_block_size: limits the size of blocks considered, longer blocks are less likely to be good rop
gadgets so we limit the size we consider
:param fast_mode: if set to True sets options to run fast, if set to False sets options to find more gadgets
if set to None makes a decision based on the size of the binary
:return:
"""
# params
self._max_block_size = max_block_size
self._only_check_near_rets = only_check_near_rets
self._max_sym_mem_accesses = max_sym_mem_accesses
a = self.project.arch
self._sp_reg = a.register_names[a.sp_offset]
self._ip_reg = a.register_names[a.ip_offset]
self._base_pointer = a.register_names[a.bp_offset]
# get list of multipurpose registers
self._reg_list = a.default_symbolic_registers
# prune the register list of the instruction pointer and the stack pointer
self._reg_list = [r for r in self._reg_list if r not in (self._sp_reg, self._ip_reg)]
# get ret locations
self._ret_locations = self._get_ret_locations()
# list of RopGadget's
self.gadgets = []
self.stack_pivots = []
self._duplicates = []
# RopChain settings
self.badbytes = []
self.roparg_filler = None
num_to_check = len(list(self._addresses_to_check()))
# fast mode
if fast_mode is None:
if num_to_check > 20000:
fast_mode = True
l.warning("Enabling fast mode for large binary")
else:
fast_mode = False
self._fast_mode = fast_mode
if self._fast_mode:
self._max_block_size = 12
self._max_sym_mem_accesses = 1
num_to_check = len(list(self._addresses_to_check()))
l.info("There are %d addresses within %d bytes of a ret",
num_to_check, self._max_block_size)
# gadget analyzer
self._gadget_analyzer = gadget_analyzer.GadgetAnalyzer(self.project, self._reg_list, self._max_block_size,
self._fast_mode, self._max_sym_mem_accesses)
# chain builder
self._chain_builder = None
# silence annoying loggers
logging.getLogger('angr.engines.vex.ccall').setLevel(logging.CRITICAL)
logging.getLogger('angr.engines.vex.expressions.ccall').setLevel(logging.CRITICAL)
logging.getLogger('angr.engines.vex.irop').setLevel(logging.CRITICAL)
logging.getLogger('angr.state_plugins.symbolic_memory').setLevel(logging.CRITICAL)
logging.getLogger('pyvex.lifting.libvex').setLevel(logging.CRITICAL)
logging.getLogger('angr.procedures.cgc.deallocate').setLevel(logging.CRITICAL)
def find_gadgets(self, processes=4, show_progress=True):
"""
Finds all the gadgets in the binary by calling analyze_gadget on every address near a ret.
Saves gadgets in self.gadgets
Saves stack pivots in self.stack_pivots
:param processes: number of processes to use
"""
self.gadgets = []
pool = Pool(processes=processes, initializer=_set_global_gadget_analyzer, initargs=(self._gadget_analyzer,))
it = pool.imap_unordered(run_worker, self._addresses_to_check_with_caching(show_progress), chunksize=5)
for gadget in it:
if gadget is not None:
if isinstance(gadget, RopGadget):
self.gadgets.append(gadget)
elif isinstance(gadget, StackPivot):
self.stack_pivots.append(gadget)
pool.close()
# fix up gadgets from cache
for g in self.gadgets:
if g.addr in self._cache:
dups = {g.addr}
for addr in self._cache[g.addr]:
dups.add(addr)
g_copy = g.copy()
g_copy.addr = addr
self.gadgets.append(g_copy)
self._duplicates.append(dups)
self.gadgets = sorted(self.gadgets, key=lambda x: x.addr)
self._reload_chain_funcs()
def find_gadgets_single_threaded(self, show_progress=True):
"""
Finds all the gadgets in the binary by calling analyze_gadget on every address near a ret
Saves gadgets in self.gadgets
Saves stack pivots in self.stack_pivots
"""
self.gadgets = []
_set_global_gadget_analyzer(self._gadget_analyzer)
for _, addr in enumerate(self._addresses_to_check_with_caching(show_progress)):
gadget = _global_gadget_analyzer.analyze_gadget(addr)
if gadget is not None:
if isinstance(gadget, RopGadget):
self.gadgets.append(gadget)
elif isinstance(gadget, StackPivot):
self.stack_pivots.append(gadget)
# fix up gadgets from cache
for g in self.gadgets:
if g.addr in self._cache:
dups = {g.addr}
for addr in self._cache[g.addr]:
dups.add(addr)
g_copy = g.copy()
g_copy.addr = addr
self.gadgets.append(g_copy)
self._duplicates.append(dups)
self.gadgets = sorted(self.gadgets, key=lambda x: x.addr)
self._reload_chain_funcs()
def save_gadgets(self, path):
"""
Saves gadgets in a file.
:param path: A path for a file where the gadgets are stored
"""
with open(path, "wb") as f:
pickle.dump(self._get_cache_tuple(), f)
def load_gadgets(self, path):
"""
Loads gadgets from a file.
:param path: A path for a file where the gadgets are loaded
"""
cache_tuple = pickle.load(open(path, "rb"))
self._load_cache_tuple(cache_tuple)
def set_badbytes(self, badbytes):
"""
Define badbytes which should not appear in the generated ropchain.
:param badbytes: a list of 8 bit integers
"""
if not isinstance(badbytes, list):
print("Require a list, e.g: [0x00, 0x09]")
return
self.badbytes = badbytes
if len(self.gadgets) > 0:
self.chain_builder._set_badbytes(self.badbytes)
def set_roparg_filler(self, roparg_filler):
"""
Define rop gadget filler argument. These will be used if the rop chain needs to pop
useless registers.
If roparg_filler is None, symbolic values will be used and the concrete values will
be whatever the constraint solver chooses (usually 0).
:param roparg_filler: A integer which is used when popping useless register or None.
"""
if not isinstance(roparg_filler, (int, type(None))):
print("Require an integer, e.g: 0x41414141 or None")
return
self.roparg_filler = roparg_filler
if len(self.gadgets) > 0:
self.chain_builder._set_roparg_filler(self.roparg_filler)
def get_badbytes(self):
"""
Returns list of badbytes.
:returns the list of badbytes
"""
return self.badbytes
def _get_cache_tuple(self):
return self.gadgets, self.stack_pivots, self._duplicates
def _load_cache_tuple(self, cache_tuple):
self.gadgets, self.stack_pivots, self._duplicates = cache_tuple
self._reload_chain_funcs()
def _reload_chain_funcs(self):
for f_name, f in inspect.getmembers(self.chain_builder, predicate=inspect.ismethod):
if f_name.startswith("_"):
continue
setattr(self, f_name, f)
@property
def chain_builder(self):
if self._chain_builder is not None:
return self._chain_builder
elif len(self.gadgets) > 0:
self._chain_builder = chain_builder.ChainBuilder(self.project, self.gadgets, self._duplicates,
self._reg_list, self._base_pointer, self.badbytes,
self.roparg_filler)
return self._chain_builder
else:
raise Exception("No gadgets available, call find_gadgets() or load_gadgets() if you haven't already.")
def _block_has_ip_relative(self, addr, bl):
"""
Checks if a block has any ip relative instructions
"""
string = bl.bytes
test_addr = 0x41414140 + addr % 0x10
bl2 = self.project.factory.block(test_addr, byte_string=string)
try:
diff_constants = differing_constants(bl, bl2)
except UnmatchedStatementsException:
return True
# check if it changes if we move it
bl_end = addr + bl.size
bl2_end = test_addr + bl2.size
filtered_diffs = []
for d in diff_constants:
if d.value_a < addr or d.value_a >= bl_end or \
d.value_b < test_addr or d.value_b >= bl2_end:
filtered_diffs.append(d)
return len(filtered_diffs) > 0
def _addresses_to_check_with_caching(self, show_progress=True):
num_addrs = len(list(self._addresses_to_check()))
widgets = ['ROP: ', progressbar.Percentage(), ' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ', progressbar.ETA(), ' ', progressbar.FileTransferSpeed()]
progress = progressbar.ProgressBar(widgets=widgets, maxval=num_addrs)
if show_progress:
progress.start()
self._cache = dict()
seen = dict()
for i, a in enumerate(self._addresses_to_check()):
if show_progress:
progress.update(i)
try:
bl = self.project.factory.block(a)
if bl.size > self._max_block_size:
continue
block_data = bl.bytes
except (SimEngineError, SimMemoryError):
continue
if block_data in seen:
self._cache[seen[block_data]].add(a)
continue
else:
if self._is_jumpkind_valid(bl.vex.jumpkind) and \
len(bl.vex.constant_jump_targets) == 0 and \
not self._block_has_ip_relative(a, bl):
seen[block_data] = a
self._cache[a] = set()
yield a
if show_progress:
progress.finish()
def _addresses_to_check(self):
"""
:return: all the addresses to check
"""
if self._only_check_near_rets:
# align block size
alignment = self.project.arch.instruction_alignment
block_size = (self._max_block_size & ((1 << self.project.arch.bits) - alignment)) + alignment
slices = [(addr-block_size, addr) for addr in self._ret_locations]
current_addr = 0
for st, _ in slices:
current_addr = max(current_addr, st)
end_addr = st + block_size + alignment
for i in range(current_addr, end_addr, alignment):
segment = self.project.loader.main_object.find_segment_containing(i)
if segment is not None and segment.is_executable:
yield i
current_addr = max(current_addr, end_addr)
else:
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
l.debug("Analyzing segment with address range: 0x%x, 0x%x" % (segment.min_addr, segment.max_addr))
for addr in range(segment.min_addr, segment.max_addr):
yield addr
def _get_ret_locations(self):
"""
:return: all the locations in the binary with a ret instruction
"""
try:
return self._get_ret_locations_by_string()
except RopException:
pass
addrs = []
seen = set()
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr-segment.min_addr
alignment = self.project.arch.instruction_alignment
# hack for arm thumb
if self.project.arch.linux_name == "aarch64" or self.project.arch.linux_name == "arm":
alignment = 1
# iterate through the code looking for rets
for addr in range(segment.min_addr, segment.min_addr + num_bytes, alignment):
# dont recheck addresses we've seen before
if addr in seen:
continue
try:
block = self.project.factory.block(addr)
# it it has a ret get the return address
if block.vex.jumpkind.startswith("Ijk_Ret"):
ret_addr = block.instruction_addrs[-1]
# hack for mips pipelining
if self.project.arch.linux_name.startswith("mips"):
ret_addr = block.instruction_addrs[-2]
if ret_addr not in seen:
addrs.append(ret_addr)
# save the addresses in the block
seen.update(block.instruction_addrs)
except (SimEngineError, SimMemoryError):
pass
return sorted(addrs)
def _get_ret_locations_by_string(self):
"""
uses a string filter to find the return instructions
:return: all the locations in the binary with a ret instruction
"""
if self.project.arch.linux_name == "x86_64" or self.project.arch.linux_name == "i386":
ret_instructions = {b"\xc2", b"\xc3", b"\xca", b"\xcb"}
else:
raise RopException("Only have ret strings for i386 and x86_64")
addrs = []
try:
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr-segment.min_addr
read_bytes = self.project.loader.memory.load(segment.min_addr, num_bytes)
for ret_instruction in ret_instructions:
for loc in common.str_find_all(read_bytes, ret_instruction):
addrs.append(loc + segment.min_addr)
except KeyError:
l.warning("Key error with segment analysis")
# try reading from state
state = self.project.factory.entry_state()
for segment in self.project.loader.main_object.segments:
if segment.is_executable:
num_bytes = segment.max_addr - segment.min_addr
read_bytes = state.solver.eval(state.memory.load(segment.min_addr, num_bytes), cast_to=bytes)
for ret_instruction in ret_instructions:
for loc in common.str_find_all(read_bytes, ret_instruction):
addrs.append(loc + segment.min_addr)
return sorted(addrs)
@staticmethod
def _is_jumpkind_valid(jk):
if jk in {'Ijk_Boring', 'Ijk_Call', 'Ijk_Ret'}:
return True
return False
register_analysis(ROP, 'ROP')
|
{
"content_hash": "68ed560c3fa9de6b4eb8304f72eb22ac",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 118,
"avg_line_length": 40.822274881516584,
"alnum_prop": 0.5757822023567656,
"repo_name": "salls/angrop",
"id": "b0076fad48db250094723a4558f77acecab3cbe8",
"size": "17227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angrop/rop.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "128435"
}
],
"symlink_target": ""
}
|
'''Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gengl.py 601 2007-02-04 05:36:59Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_WGL as _link_function
from pyglet.gl.lib import c_ptrdiff_t, c_void
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h
# H (C:\cygwin\home\Alex\pyglet\tools\wgl.h:7)
# H (C:\cygwin\home\Alex\pyglet\tools\wgl.h:7)
WIN32_LEAN_AND_MEAN = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:40
GLAPI = 0 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:51
WGL_WGLEXT_VERSION = 6 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:59
# ARB_buffer_region (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:61)
WGL_FRONT_COLOR_BUFFER_BIT_ARB = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:62
WGL_BACK_COLOR_BUFFER_BIT_ARB = 2 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:63
WGL_DEPTH_BUFFER_BIT_ARB = 4 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:64
WGL_STENCIL_BUFFER_BIT_ARB = 8 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:65
# ARB_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:68)
WGL_SAMPLE_BUFFERS_ARB = 8257 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:69
WGL_SAMPLES_ARB = 8258 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:70
# ARB_extensions_string (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:73)
# ARB_pixel_format (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:76)
WGL_NUMBER_PIXEL_FORMATS_ARB = 8192 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:77
WGL_DRAW_TO_WINDOW_ARB = 8193 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:78
WGL_DRAW_TO_BITMAP_ARB = 8194 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:79
WGL_ACCELERATION_ARB = 8195 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:80
WGL_NEED_PALETTE_ARB = 8196 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:81
WGL_NEED_SYSTEM_PALETTE_ARB = 8197 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:82
WGL_SWAP_LAYER_BUFFERS_ARB = 8198 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:83
WGL_SWAP_METHOD_ARB = 8199 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:84
WGL_NUMBER_OVERLAYS_ARB = 8200 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:85
WGL_NUMBER_UNDERLAYS_ARB = 8201 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:86
WGL_TRANSPARENT_ARB = 8202 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:87
WGL_TRANSPARENT_RED_VALUE_ARB = 8247 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:88
WGL_TRANSPARENT_GREEN_VALUE_ARB = 8248 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:89
WGL_TRANSPARENT_BLUE_VALUE_ARB = 8249 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:90
WGL_TRANSPARENT_ALPHA_VALUE_ARB = 8250 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:91
WGL_TRANSPARENT_INDEX_VALUE_ARB = 8251 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:92
WGL_SHARE_DEPTH_ARB = 8204 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:93
WGL_SHARE_STENCIL_ARB = 8205 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:94
WGL_SHARE_ACCUM_ARB = 512 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:95
WGL_SUPPORT_GDI_ARB = 512 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:96
WGL_SUPPORT_OPENGL_ARB = 8208 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:97
WGL_DOUBLE_BUFFER_ARB = 8209 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:98
WGL_STEREO_ARB = 8210 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:99
WGL_PIXEL_TYPE_ARB = 8211 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:100
WGL_COLOR_BITS_ARB = 8212 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:101
WGL_RED_BITS_ARB = 8213 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:102
WGL_RED_SHIFT_ARB = 8214 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:103
WGL_GREEN_BITS_ARB = 8215 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:104
WGL_GREEN_SHIFT_ARB = 8216 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:105
WGL_BLUE_BITS_ARB = 8217 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:106
WGL_BLUE_SHIFT_ARB = 8218 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:107
WGL_ALPHA_BITS_ARB = 8219 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:108
WGL_ALPHA_SHIFT_ARB = 8220 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:109
WGL_ACCUM_BITS_ARB = 8221 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:110
WGL_ACCUM_RED_BITS_ARB = 513 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:111
WGL_ACCUM_GREEN_BITS_ARB = 513 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:112
WGL_ACCUM_BLUE_BITS_ARB = 8224 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:113
WGL_ACCUM_ALPHA_BITS_ARB = 8225 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:114
WGL_DEPTH_BITS_ARB = 8226 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:115
WGL_STENCIL_BITS_ARB = 8227 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:116
WGL_AUX_BUFFERS_ARB = 8228 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:117
WGL_NO_ACCELERATION_ARB = 8229 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:118
WGL_GENERIC_ACCELERATION_ARB = 8230 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:119
WGL_FULL_ACCELERATION_ARB = 8231 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:120
WGL_SWAP_EXCHANGE_ARB = 8232 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:121
WGL_SWAP_COPY_ARB = 8233 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:122
WGL_SWAP_UNDEFINED_ARB = 8234 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:123
WGL_TYPE_RGBA_ARB = 8235 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:124
WGL_TYPE_COLORINDEX_ARB = 8236 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:125
# ARB_make_current_read (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:128)
ERROR_INVALID_PIXEL_TYPE_ARB = 8259 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:129
ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB = 8276 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:130
# ARB_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:133)
WGL_DRAW_TO_PBUFFER_ARB = 8237 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:134
WGL_MAX_PBUFFER_PIXELS_ARB = 514 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:135
WGL_MAX_PBUFFER_WIDTH_ARB = 514 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:136
WGL_MAX_PBUFFER_HEIGHT_ARB = 8240 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:137
WGL_PBUFFER_LARGEST_ARB = 8243 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:138
WGL_PBUFFER_WIDTH_ARB = 8244 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:139
WGL_PBUFFER_HEIGHT_ARB = 8245 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:140
WGL_PBUFFER_LOST_ARB = 8246 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:141
# ARB_render_texture (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:144)
WGL_BIND_TO_TEXTURE_RGB_ARB = 8304 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:145
WGL_BIND_TO_TEXTURE_RGBA_ARB = 8305 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:146
WGL_TEXTURE_FORMAT_ARB = 8306 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:147
WGL_TEXTURE_TARGET_ARB = 8307 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:148
WGL_MIPMAP_TEXTURE_ARB = 8308 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:149
WGL_TEXTURE_RGB_ARB = 8309 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:150
WGL_TEXTURE_RGBA_ARB = 8310 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:151
WGL_NO_TEXTURE_ARB = 8311 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:152
WGL_TEXTURE_CUBE_MAP_ARB = 8312 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:153
WGL_TEXTURE_1D_ARB = 8313 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:154
WGL_TEXTURE_2D_ARB = 8314 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:155
WGL_MIPMAP_LEVEL_ARB = 8315 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:156
WGL_CUBE_MAP_FACE_ARB = 8316 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:157
WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB = 8317 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:158
WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB = 519 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:159
WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB = 519 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:160
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB = 8320 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:161
WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB = 8321 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:162
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB = 8322 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:163
WGL_FRONT_LEFT_ARB = 8323 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:164
WGL_FRONT_RIGHT_ARB = 8324 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:165
WGL_BACK_LEFT_ARB = 8325 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:166
WGL_BACK_RIGHT_ARB = 8326 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:167
WGL_AUX0_ARB = 8327 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:168
WGL_AUX1_ARB = 8328 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:169
WGL_AUX2_ARB = 8329 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:170
WGL_AUX3_ARB = 8330 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:171
WGL_AUX4_ARB = 8331 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:172
WGL_AUX5_ARB = 8332 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:173
WGL_AUX6_ARB = 8333 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:174
WGL_AUX7_ARB = 520 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:175
WGL_AUX8_ARB = 520 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:176
WGL_AUX9_ARB = 8336 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:177
# ARB_pixel_format_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:180)
WGL_TYPE_RGBA_FLOAT_ARB = 8608 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:181
# EXT_make_current_read (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:184)
ERROR_INVALID_PIXEL_TYPE_EXT = 8259 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:185
# EXT_pixel_format (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:188)
WGL_NUMBER_PIXEL_FORMATS_EXT = 8192 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:189
WGL_DRAW_TO_WINDOW_EXT = 8193 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:190
WGL_DRAW_TO_BITMAP_EXT = 8194 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:191
WGL_ACCELERATION_EXT = 8195 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:192
WGL_NEED_PALETTE_EXT = 8196 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:193
WGL_NEED_SYSTEM_PALETTE_EXT = 8197 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:194
WGL_SWAP_LAYER_BUFFERS_EXT = 8198 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:195
WGL_SWAP_METHOD_EXT = 8199 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:196
WGL_NUMBER_OVERLAYS_EXT = 8200 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:197
WGL_NUMBER_UNDERLAYS_EXT = 8201 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:198
WGL_TRANSPARENT_EXT = 8202 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:199
WGL_TRANSPARENT_VALUE_EXT = 8203 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:200
WGL_SHARE_DEPTH_EXT = 8204 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:201
WGL_SHARE_STENCIL_EXT = 8205 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:202
WGL_SHARE_ACCUM_EXT = 512 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:203
WGL_SUPPORT_GDI_EXT = 512 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:204
WGL_SUPPORT_OPENGL_EXT = 8208 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:205
WGL_DOUBLE_BUFFER_EXT = 8209 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:206
WGL_STEREO_EXT = 8210 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:207
WGL_PIXEL_TYPE_EXT = 8211 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:208
WGL_COLOR_BITS_EXT = 8212 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:209
WGL_RED_BITS_EXT = 8213 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:210
WGL_RED_SHIFT_EXT = 8214 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:211
WGL_GREEN_BITS_EXT = 8215 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:212
WGL_GREEN_SHIFT_EXT = 8216 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:213
WGL_BLUE_BITS_EXT = 8217 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:214
WGL_BLUE_SHIFT_EXT = 8218 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:215
WGL_ALPHA_BITS_EXT = 8219 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:216
WGL_ALPHA_SHIFT_EXT = 8220 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:217
WGL_ACCUM_BITS_EXT = 8221 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:218
WGL_ACCUM_RED_BITS_EXT = 513 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:219
WGL_ACCUM_GREEN_BITS_EXT = 513 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:220
WGL_ACCUM_BLUE_BITS_EXT = 8224 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:221
WGL_ACCUM_ALPHA_BITS_EXT = 8225 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:222
WGL_DEPTH_BITS_EXT = 8226 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:223
WGL_STENCIL_BITS_EXT = 8227 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:224
WGL_AUX_BUFFERS_EXT = 8228 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:225
WGL_NO_ACCELERATION_EXT = 8229 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:226
WGL_GENERIC_ACCELERATION_EXT = 8230 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:227
WGL_FULL_ACCELERATION_EXT = 8231 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:228
WGL_SWAP_EXCHANGE_EXT = 8232 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:229
WGL_SWAP_COPY_EXT = 8233 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:230
WGL_SWAP_UNDEFINED_EXT = 8234 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:231
WGL_TYPE_RGBA_EXT = 8235 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:232
WGL_TYPE_COLORINDEX_EXT = 8236 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:233
# EXT_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:236)
WGL_DRAW_TO_PBUFFER_EXT = 8237 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:237
WGL_MAX_PBUFFER_PIXELS_EXT = 514 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:238
WGL_MAX_PBUFFER_WIDTH_EXT = 514 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:239
WGL_MAX_PBUFFER_HEIGHT_EXT = 8240 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:240
WGL_OPTIMAL_PBUFFER_WIDTH_EXT = 8241 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:241
WGL_OPTIMAL_PBUFFER_HEIGHT_EXT = 8242 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:242
WGL_PBUFFER_LARGEST_EXT = 8243 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:243
WGL_PBUFFER_WIDTH_EXT = 8244 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:244
WGL_PBUFFER_HEIGHT_EXT = 8245 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:245
# EXT_depth_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:248)
WGL_DEPTH_FLOAT_EXT = 8256 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:249
# 3DFX_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:252)
WGL_SAMPLE_BUFFERS_3DFX = 8288 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:253
WGL_SAMPLES_3DFX = 8289 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:254
# EXT_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:257)
WGL_SAMPLE_BUFFERS_EXT = 8257 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:258
WGL_SAMPLES_EXT = 8258 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:259
# I3D_digital_video_control (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:262)
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D = 8272 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:263
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D = 8273 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:264
WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D = 8274 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:265
WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D = 8275 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:266
# I3D_gamma (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:269)
WGL_GAMMA_TABLE_SIZE_I3D = 516 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:270
WGL_GAMMA_EXCLUDE_DESKTOP_I3D = 516 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:271
# I3D_genlock (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:274)
WGL_GENLOCK_SOURCE_MULTIVIEW_I3D = 8260 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:275
WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D = 8261 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:276
WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D = 8262 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:277
WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D = 8263 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:278
WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D = 8264 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:279
WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D = 8265 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:280
WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D = 8266 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:281
WGL_GENLOCK_SOURCE_EDGE_RISING_I3D = 8267 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:282
WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D = 8268 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:283
# I3D_image_buffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:286)
WGL_IMAGE_BUFFER_MIN_ACCESS_I3D = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:287
WGL_IMAGE_BUFFER_LOCK_I3D = 2 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:288
# I3D_swap_frame_lock (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:291)
# NV_render_depth_texture (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:294)
WGL_BIND_TO_TEXTURE_DEPTH_NV = 8355 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:295
WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV = 8356 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:296
WGL_DEPTH_TEXTURE_FORMAT_NV = 8357 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:297
WGL_TEXTURE_DEPTH_COMPONENT_NV = 8358 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:298
WGL_DEPTH_COMPONENT_NV = 8359 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:299
# NV_render_texture_rectangle (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:302)
WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV = 8352 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:303
WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV = 8353 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:304
WGL_TEXTURE_RECTANGLE_NV = 8354 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:305
# ATI_pixel_format_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:308)
WGL_TYPE_RGBA_FLOAT_ATI = 8608 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:309
# NV_float_buffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:312)
WGL_FLOAT_COMPONENTS_NV = 8368 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:313
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV = 8369 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:314
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV = 8370 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:315
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV = 8371 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:316
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV = 8372 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:317
WGL_TEXTURE_FLOAT_R_NV = 8373 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:318
WGL_TEXTURE_FLOAT_RG_NV = 8374 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:319
WGL_TEXTURE_FLOAT_RGB_NV = 8375 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:320
WGL_TEXTURE_FLOAT_RGBA_NV = 8376 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:321
# ARB_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:327)
HANDLE = POINTER(None) # C:\cygwin\home\Alex\pyglet\tools\wgl.h:58
HPBUFFERARB = HANDLE # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:328
# EXT_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:330)
HPBUFFEREXT = HANDLE # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:331
# ARB_buffer_region (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:334)
WGL_ARB_buffer_region = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:335
HDC = HANDLE # C:\cygwin\home\Alex\pyglet\tools\wgl.h:61
UINT = c_uint # C:\cygwin\home\Alex\pyglet\tools\wgl.h:50
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:337
wglCreateBufferRegionARB = _link_function('wglCreateBufferRegionARB', HANDLE, [HDC, c_int, UINT], 'ARB_buffer_region')
VOID = None # C:\cygwin\home\Alex\pyglet\tools\wgl.h:45
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:338
wglDeleteBufferRegionARB = _link_function('wglDeleteBufferRegionARB', VOID, [HANDLE], 'ARB_buffer_region')
BOOL = c_long # C:\cygwin\home\Alex\pyglet\tools\wgl.h:52
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:339
wglSaveBufferRegionARB = _link_function('wglSaveBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:340
wglRestoreBufferRegionARB = _link_function('wglRestoreBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
PFNWGLCREATEBUFFERREGIONARBPROC = CFUNCTYPE(HANDLE, HDC, c_int, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:342
PFNWGLDELETEBUFFERREGIONARBPROC = CFUNCTYPE(VOID, HANDLE) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:343
PFNWGLSAVEBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:344
PFNWGLRESTOREBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int, c_int, c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:345
# ARB_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:348)
WGL_ARB_multisample = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:349
# ARB_extensions_string (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:352)
WGL_ARB_extensions_string = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:353
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:355
wglGetExtensionsStringARB = _link_function('wglGetExtensionsStringARB', c_char_p, [HDC], 'ARB_extensions_string')
PFNWGLGETEXTENSIONSSTRINGARBPROC = CFUNCTYPE(c_char_p, HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:357
# ARB_pixel_format (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:360)
WGL_ARB_pixel_format = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:361
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:363
wglGetPixelFormatAttribivARB = _link_function('wglGetPixelFormatAttribivARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'ARB_pixel_format')
FLOAT = c_float # C:\cygwin\home\Alex\pyglet\tools\wgl.h:55
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:364
wglGetPixelFormatAttribfvARB = _link_function('wglGetPixelFormatAttribfvARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'ARB_pixel_format')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:365
wglChoosePixelFormatARB = _link_function('wglChoosePixelFormatARB', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'ARB_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:367
PFNWGLGETPIXELFORMATATTRIBFVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:368
PFNWGLCHOOSEPIXELFORMATARBPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:369
# ARB_make_current_read (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:372)
WGL_ARB_make_current_read = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:373
HGLRC = HANDLE # C:\cygwin\home\Alex\pyglet\tools\wgl.h:60
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:375
wglMakeContextCurrentARB = _link_function('wglMakeContextCurrentARB', BOOL, [HDC, HDC, HGLRC], 'ARB_make_current_read')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:376
wglGetCurrentReadDCARB = _link_function('wglGetCurrentReadDCARB', HDC, [], 'ARB_make_current_read')
PFNWGLMAKECONTEXTCURRENTARBPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:378
PFNWGLGETCURRENTREADDCARBPROC = CFUNCTYPE(HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:379
# ARB_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:382)
WGL_ARB_pbuffer = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:383
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:385
wglCreatePbufferARB = _link_function('wglCreatePbufferARB', HPBUFFERARB, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'ARB_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:386
wglGetPbufferDCARB = _link_function('wglGetPbufferDCARB', HDC, [HPBUFFERARB], 'ARB_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:387
wglReleasePbufferDCARB = _link_function('wglReleasePbufferDCARB', c_int, [HPBUFFERARB, HDC], 'ARB_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:388
wglDestroyPbufferARB = _link_function('wglDestroyPbufferARB', BOOL, [HPBUFFERARB], 'ARB_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:389
wglQueryPbufferARB = _link_function('wglQueryPbufferARB', BOOL, [HPBUFFERARB, c_int, POINTER(c_int)], 'ARB_pbuffer')
PFNWGLCREATEPBUFFERARBPROC = CFUNCTYPE(HPBUFFERARB, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:391
PFNWGLGETPBUFFERDCARBPROC = CFUNCTYPE(HDC, HPBUFFERARB) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:392
PFNWGLRELEASEPBUFFERDCARBPROC = CFUNCTYPE(c_int, HPBUFFERARB, HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:393
PFNWGLDESTROYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:394
PFNWGLQUERYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:395
# ARB_render_texture (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:398)
WGL_ARB_render_texture = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:399
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:401
wglBindTexImageARB = _link_function('wglBindTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:402
wglReleaseTexImageARB = _link_function('wglReleaseTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:403
wglSetPbufferAttribARB = _link_function('wglSetPbufferAttribARB', BOOL, [HPBUFFERARB, POINTER(c_int)], 'ARB_render_texture')
PFNWGLBINDTEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:405
PFNWGLRELEASETEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:406
PFNWGLSETPBUFFERATTRIBARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:407
# ARB_pixel_format_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:410)
WGL_ARB_pixel_format_float = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:411
# EXT_display_color_table (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:414)
WGL_EXT_display_color_table = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:415
GLboolean = c_ubyte # C:\cygwin\home\Alex\pyglet\tools\wgl.h:18
GLushort = c_ushort # C:\cygwin\home\Alex\pyglet\tools\wgl.h:25
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:417
wglCreateDisplayColorTableEXT = _link_function('wglCreateDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
GLuint = c_uint # C:\cygwin\home\Alex\pyglet\tools\wgl.h:26
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:418
wglLoadDisplayColorTableEXT = _link_function('wglLoadDisplayColorTableEXT', GLboolean, [POINTER(GLushort), GLuint], 'EXT_display_color_table')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:419
wglBindDisplayColorTableEXT = _link_function('wglBindDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:420
wglDestroyDisplayColorTableEXT = _link_function('wglDestroyDisplayColorTableEXT', VOID, [GLushort], 'EXT_display_color_table')
PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:422
PFNWGLLOADDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, POINTER(GLushort), GLuint) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:423
PFNWGLBINDDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:424
PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(VOID, GLushort) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:425
# EXT_extensions_string (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:428)
WGL_EXT_extensions_string = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:429
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:431
wglGetExtensionsStringEXT = _link_function('wglGetExtensionsStringEXT', c_char_p, [], 'EXT_extensions_string')
PFNWGLGETEXTENSIONSSTRINGEXTPROC = CFUNCTYPE(c_char_p) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:433
# EXT_make_current_read (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:436)
WGL_EXT_make_current_read = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:437
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:439
wglMakeContextCurrentEXT = _link_function('wglMakeContextCurrentEXT', BOOL, [HDC, HDC, HGLRC], 'EXT_make_current_read')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:440
wglGetCurrentReadDCEXT = _link_function('wglGetCurrentReadDCEXT', HDC, [], 'EXT_make_current_read')
PFNWGLMAKECONTEXTCURRENTEXTPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:442
PFNWGLGETCURRENTREADDCEXTPROC = CFUNCTYPE(HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:443
# EXT_pbuffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:446)
WGL_EXT_pbuffer = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:447
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:449
wglCreatePbufferEXT = _link_function('wglCreatePbufferEXT', HPBUFFEREXT, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'EXT_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:450
wglGetPbufferDCEXT = _link_function('wglGetPbufferDCEXT', HDC, [HPBUFFEREXT], 'EXT_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:451
wglReleasePbufferDCEXT = _link_function('wglReleasePbufferDCEXT', c_int, [HPBUFFEREXT, HDC], 'EXT_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:452
wglDestroyPbufferEXT = _link_function('wglDestroyPbufferEXT', BOOL, [HPBUFFEREXT], 'EXT_pbuffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:453
wglQueryPbufferEXT = _link_function('wglQueryPbufferEXT', BOOL, [HPBUFFEREXT, c_int, POINTER(c_int)], 'EXT_pbuffer')
PFNWGLCREATEPBUFFEREXTPROC = CFUNCTYPE(HPBUFFEREXT, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:455
PFNWGLGETPBUFFERDCEXTPROC = CFUNCTYPE(HDC, HPBUFFEREXT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:456
PFNWGLRELEASEPBUFFERDCEXTPROC = CFUNCTYPE(c_int, HPBUFFEREXT, HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:457
PFNWGLDESTROYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:458
PFNWGLQUERYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:459
# EXT_pixel_format (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:462)
WGL_EXT_pixel_format = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:463
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:465
wglGetPixelFormatAttribivEXT = _link_function('wglGetPixelFormatAttribivEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'EXT_pixel_format')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:466
wglGetPixelFormatAttribfvEXT = _link_function('wglGetPixelFormatAttribfvEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'EXT_pixel_format')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:467
wglChoosePixelFormatEXT = _link_function('wglChoosePixelFormatEXT', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'EXT_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:469
PFNWGLGETPIXELFORMATATTRIBFVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:470
PFNWGLCHOOSEPIXELFORMATEXTPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:471
# EXT_swap_control (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:474)
WGL_EXT_swap_control = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:475
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:477
wglSwapIntervalEXT = _link_function('wglSwapIntervalEXT', BOOL, [c_int], 'EXT_swap_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:478
wglGetSwapIntervalEXT = _link_function('wglGetSwapIntervalEXT', c_int, [], 'EXT_swap_control')
PFNWGLSWAPINTERVALEXTPROC = CFUNCTYPE(BOOL, c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:480
PFNWGLGETSWAPINTERVALEXTPROC = CFUNCTYPE(c_int) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:481
# EXT_depth_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:484)
WGL_EXT_depth_float = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:485
# NV_vertex_array_range (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:488)
WGL_NV_vertex_array_range = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:489
GLsizei = c_int # C:\cygwin\home\Alex\pyglet\tools\wgl.h:23
GLfloat = c_float # C:\cygwin\home\Alex\pyglet\tools\wgl.h:27
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:491
wglAllocateMemoryNV = _link_function('wglAllocateMemoryNV', POINTER(c_void), [GLsizei, GLfloat, GLfloat, GLfloat], 'NV_vertex_array_range')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:492
wglFreeMemoryNV = _link_function('wglFreeMemoryNV', None, [POINTER(None)], 'NV_vertex_array_range')
PFNWGLALLOCATEMEMORYNVPROC = CFUNCTYPE(POINTER(c_void), GLsizei, GLfloat, GLfloat, GLfloat) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:494
PFNWGLFREEMEMORYNVPROC = CFUNCTYPE(None, POINTER(None)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:495
# 3DFX_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:498)
WGL_3DFX_multisample = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:499
# EXT_multisample (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:502)
WGL_EXT_multisample = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:503
# OML_sync_control (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:506)
WGL_OML_sync_control = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:507
INT64 = c_longlong # C:\cygwin\home\Alex\pyglet\tools\wgl.h:42
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:509
wglGetSyncValuesOML = _link_function('wglGetSyncValuesOML', BOOL, [HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
INT32 = c_int # C:\cygwin\home\Alex\pyglet\tools\wgl.h:35
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:510
wglGetMscRateOML = _link_function('wglGetMscRateOML', BOOL, [HDC, POINTER(INT32), POINTER(INT32)], 'OML_sync_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:511
wglSwapBuffersMscOML = _link_function('wglSwapBuffersMscOML', INT64, [HDC, INT64, INT64, INT64], 'OML_sync_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:512
wglSwapLayerBuffersMscOML = _link_function('wglSwapLayerBuffersMscOML', INT64, [HDC, c_int, INT64, INT64, INT64], 'OML_sync_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:513
wglWaitForMscOML = _link_function('wglWaitForMscOML', BOOL, [HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:514
wglWaitForSbcOML = _link_function('wglWaitForSbcOML', BOOL, [HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
PFNWGLGETSYNCVALUESOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:516
PFNWGLGETMSCRATEOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT32), POINTER(INT32)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:517
PFNWGLSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, INT64, INT64, INT64) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:518
PFNWGLSWAPLAYERBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, c_int, INT64, INT64, INT64) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:519
PFNWGLWAITFORMSCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:520
PFNWGLWAITFORSBCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:521
# I3D_digital_video_control (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:524)
WGL_I3D_digital_video_control = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:525
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:527
wglGetDigitalVideoParametersI3D = _link_function('wglGetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:528
wglSetDigitalVideoParametersI3D = _link_function('wglSetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:530
PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:531
# I3D_gamma (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:534)
WGL_I3D_gamma = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:535
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:537
wglGetGammaTableParametersI3D = _link_function('wglGetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:538
wglSetGammaTableParametersI3D = _link_function('wglSetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
USHORT = c_ushort # C:\cygwin\home\Alex\pyglet\tools\wgl.h:49
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:539
wglGetGammaTableI3D = _link_function('wglGetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:540
wglSetGammaTableI3D = _link_function('wglSetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
PFNWGLGETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:542
PFNWGLSETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:543
PFNWGLGETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:544
PFNWGLSETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:545
# I3D_genlock (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:548)
WGL_I3D_genlock = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:549
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:551
wglEnableGenlockI3D = _link_function('wglEnableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:552
wglDisableGenlockI3D = _link_function('wglDisableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:553
wglIsEnabledGenlockI3D = _link_function('wglIsEnabledGenlockI3D', BOOL, [HDC, POINTER(BOOL)], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:554
wglGenlockSourceI3D = _link_function('wglGenlockSourceI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:555
wglGetGenlockSourceI3D = _link_function('wglGetGenlockSourceI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:556
wglGenlockSourceEdgeI3D = _link_function('wglGenlockSourceEdgeI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:557
wglGetGenlockSourceEdgeI3D = _link_function('wglGetGenlockSourceEdgeI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:558
wglGenlockSampleRateI3D = _link_function('wglGenlockSampleRateI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:559
wglGetGenlockSampleRateI3D = _link_function('wglGetGenlockSampleRateI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:560
wglGenlockSourceDelayI3D = _link_function('wglGenlockSourceDelayI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:561
wglGetGenlockSourceDelayI3D = _link_function('wglGetGenlockSourceDelayI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:562
wglQueryGenlockMaxSourceDelayI3D = _link_function('wglQueryGenlockMaxSourceDelayI3D', BOOL, [HDC, POINTER(UINT), POINTER(UINT)], 'I3D_genlock')
PFNWGLENABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:564
PFNWGLDISABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:565
PFNWGLISENABLEDGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(BOOL)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:566
PFNWGLGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:567
PFNWGLGETGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:568
PFNWGLGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:569
PFNWGLGETGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:570
PFNWGLGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:571
PFNWGLGETGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:572
PFNWGLGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:573
PFNWGLGETGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:574
PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT), POINTER(UINT)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:575
# I3D_image_buffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:578)
WGL_I3D_image_buffer = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:579
LPVOID = POINTER(None) # C:\cygwin\home\Alex\pyglet\tools\wgl.h:45
DWORD = c_ulong # C:\cygwin\home\Alex\pyglet\tools\wgl.h:54
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:581
wglCreateImageBufferI3D = _link_function('wglCreateImageBufferI3D', LPVOID, [HDC, DWORD, UINT], 'I3D_image_buffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:582
wglDestroyImageBufferI3D = _link_function('wglDestroyImageBufferI3D', BOOL, [HDC, LPVOID], 'I3D_image_buffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:583
wglAssociateImageBufferEventsI3D = _link_function('wglAssociateImageBufferEventsI3D', BOOL, [HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT], 'I3D_image_buffer')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:584
wglReleaseImageBufferEventsI3D = _link_function('wglReleaseImageBufferEventsI3D', BOOL, [HDC, POINTER(LPVOID), UINT], 'I3D_image_buffer')
PFNWGLCREATEIMAGEBUFFERI3DPROC = CFUNCTYPE(LPVOID, HDC, DWORD, UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:586
PFNWGLDESTROYIMAGEBUFFERI3DPROC = CFUNCTYPE(BOOL, HDC, LPVOID) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:587
PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:588
PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(LPVOID), UINT) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:589
# I3D_swap_frame_lock (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:592)
WGL_I3D_swap_frame_lock = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:593
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:595
wglEnableFrameLockI3D = _link_function('wglEnableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:596
wglDisableFrameLockI3D = _link_function('wglDisableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:597
wglIsEnabledFrameLockI3D = _link_function('wglIsEnabledFrameLockI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:598
wglQueryFrameLockMasterI3D = _link_function('wglQueryFrameLockMasterI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
PFNWGLENABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:600
PFNWGLDISABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:601
PFNWGLISENABLEDFRAMELOCKI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:602
PFNWGLQUERYFRAMELOCKMASTERI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:603
# I3D_swap_frame_usage (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:606)
WGL_I3D_swap_frame_usage = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:607
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:609
wglGetFrameUsageI3D = _link_function('wglGetFrameUsageI3D', BOOL, [POINTER(c_float)], 'I3D_swap_frame_usage')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:610
wglBeginFrameTrackingI3D = _link_function('wglBeginFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:611
wglEndFrameTrackingI3D = _link_function('wglEndFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:612
wglQueryFrameTrackingI3D = _link_function('wglQueryFrameTrackingI3D', BOOL, [POINTER(DWORD), POINTER(DWORD), POINTER(c_float)], 'I3D_swap_frame_usage')
PFNWGLGETFRAMEUSAGEI3DPROC = CFUNCTYPE(BOOL, POINTER(c_float)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:614
PFNWGLBEGINFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:615
PFNWGLENDFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:616
PFNWGLQUERYFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL, POINTER(DWORD), POINTER(DWORD), POINTER(c_float)) # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:617
# ATI_pixel_format_float (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:620)
WGL_ATI_pixel_format_float = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:621
# NV_float_buffer (http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:624)
WGL_NV_float_buffer = 1 # http://oss.sgi.com/projects/ogl-sample/ABI/wglext.h:625
__all__ = ['WIN32_LEAN_AND_MEAN', 'GLAPI', 'WGL_WGLEXT_VERSION',
'WGL_FRONT_COLOR_BUFFER_BIT_ARB', 'WGL_BACK_COLOR_BUFFER_BIT_ARB',
'WGL_DEPTH_BUFFER_BIT_ARB', 'WGL_STENCIL_BUFFER_BIT_ARB',
'WGL_SAMPLE_BUFFERS_ARB', 'WGL_SAMPLES_ARB', 'WGL_NUMBER_PIXEL_FORMATS_ARB',
'WGL_DRAW_TO_WINDOW_ARB', 'WGL_DRAW_TO_BITMAP_ARB', 'WGL_ACCELERATION_ARB',
'WGL_NEED_PALETTE_ARB', 'WGL_NEED_SYSTEM_PALETTE_ARB',
'WGL_SWAP_LAYER_BUFFERS_ARB', 'WGL_SWAP_METHOD_ARB',
'WGL_NUMBER_OVERLAYS_ARB', 'WGL_NUMBER_UNDERLAYS_ARB', 'WGL_TRANSPARENT_ARB',
'WGL_TRANSPARENT_RED_VALUE_ARB', 'WGL_TRANSPARENT_GREEN_VALUE_ARB',
'WGL_TRANSPARENT_BLUE_VALUE_ARB', 'WGL_TRANSPARENT_ALPHA_VALUE_ARB',
'WGL_TRANSPARENT_INDEX_VALUE_ARB', 'WGL_SHARE_DEPTH_ARB',
'WGL_SHARE_STENCIL_ARB', 'WGL_SHARE_ACCUM_ARB', 'WGL_SUPPORT_GDI_ARB',
'WGL_SUPPORT_OPENGL_ARB', 'WGL_DOUBLE_BUFFER_ARB', 'WGL_STEREO_ARB',
'WGL_PIXEL_TYPE_ARB', 'WGL_COLOR_BITS_ARB', 'WGL_RED_BITS_ARB',
'WGL_RED_SHIFT_ARB', 'WGL_GREEN_BITS_ARB', 'WGL_GREEN_SHIFT_ARB',
'WGL_BLUE_BITS_ARB', 'WGL_BLUE_SHIFT_ARB', 'WGL_ALPHA_BITS_ARB',
'WGL_ALPHA_SHIFT_ARB', 'WGL_ACCUM_BITS_ARB', 'WGL_ACCUM_RED_BITS_ARB',
'WGL_ACCUM_GREEN_BITS_ARB', 'WGL_ACCUM_BLUE_BITS_ARB',
'WGL_ACCUM_ALPHA_BITS_ARB', 'WGL_DEPTH_BITS_ARB', 'WGL_STENCIL_BITS_ARB',
'WGL_AUX_BUFFERS_ARB', 'WGL_NO_ACCELERATION_ARB',
'WGL_GENERIC_ACCELERATION_ARB', 'WGL_FULL_ACCELERATION_ARB',
'WGL_SWAP_EXCHANGE_ARB', 'WGL_SWAP_COPY_ARB', 'WGL_SWAP_UNDEFINED_ARB',
'WGL_TYPE_RGBA_ARB', 'WGL_TYPE_COLORINDEX_ARB',
'ERROR_INVALID_PIXEL_TYPE_ARB', 'ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB',
'WGL_DRAW_TO_PBUFFER_ARB', 'WGL_MAX_PBUFFER_PIXELS_ARB',
'WGL_MAX_PBUFFER_WIDTH_ARB', 'WGL_MAX_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LARGEST_ARB', 'WGL_PBUFFER_WIDTH_ARB', 'WGL_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LOST_ARB', 'WGL_BIND_TO_TEXTURE_RGB_ARB',
'WGL_BIND_TO_TEXTURE_RGBA_ARB', 'WGL_TEXTURE_FORMAT_ARB',
'WGL_TEXTURE_TARGET_ARB', 'WGL_MIPMAP_TEXTURE_ARB', 'WGL_TEXTURE_RGB_ARB',
'WGL_TEXTURE_RGBA_ARB', 'WGL_NO_TEXTURE_ARB', 'WGL_TEXTURE_CUBE_MAP_ARB',
'WGL_TEXTURE_1D_ARB', 'WGL_TEXTURE_2D_ARB', 'WGL_MIPMAP_LEVEL_ARB',
'WGL_CUBE_MAP_FACE_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB', 'WGL_FRONT_LEFT_ARB',
'WGL_FRONT_RIGHT_ARB', 'WGL_BACK_LEFT_ARB', 'WGL_BACK_RIGHT_ARB',
'WGL_AUX0_ARB', 'WGL_AUX1_ARB', 'WGL_AUX2_ARB', 'WGL_AUX3_ARB',
'WGL_AUX4_ARB', 'WGL_AUX5_ARB', 'WGL_AUX6_ARB', 'WGL_AUX7_ARB',
'WGL_AUX8_ARB', 'WGL_AUX9_ARB', 'WGL_TYPE_RGBA_FLOAT_ARB',
'ERROR_INVALID_PIXEL_TYPE_EXT', 'WGL_NUMBER_PIXEL_FORMATS_EXT',
'WGL_DRAW_TO_WINDOW_EXT', 'WGL_DRAW_TO_BITMAP_EXT', 'WGL_ACCELERATION_EXT',
'WGL_NEED_PALETTE_EXT', 'WGL_NEED_SYSTEM_PALETTE_EXT',
'WGL_SWAP_LAYER_BUFFERS_EXT', 'WGL_SWAP_METHOD_EXT',
'WGL_NUMBER_OVERLAYS_EXT', 'WGL_NUMBER_UNDERLAYS_EXT', 'WGL_TRANSPARENT_EXT',
'WGL_TRANSPARENT_VALUE_EXT', 'WGL_SHARE_DEPTH_EXT', 'WGL_SHARE_STENCIL_EXT',
'WGL_SHARE_ACCUM_EXT', 'WGL_SUPPORT_GDI_EXT', 'WGL_SUPPORT_OPENGL_EXT',
'WGL_DOUBLE_BUFFER_EXT', 'WGL_STEREO_EXT', 'WGL_PIXEL_TYPE_EXT',
'WGL_COLOR_BITS_EXT', 'WGL_RED_BITS_EXT', 'WGL_RED_SHIFT_EXT',
'WGL_GREEN_BITS_EXT', 'WGL_GREEN_SHIFT_EXT', 'WGL_BLUE_BITS_EXT',
'WGL_BLUE_SHIFT_EXT', 'WGL_ALPHA_BITS_EXT', 'WGL_ALPHA_SHIFT_EXT',
'WGL_ACCUM_BITS_EXT', 'WGL_ACCUM_RED_BITS_EXT', 'WGL_ACCUM_GREEN_BITS_EXT',
'WGL_ACCUM_BLUE_BITS_EXT', 'WGL_ACCUM_ALPHA_BITS_EXT', 'WGL_DEPTH_BITS_EXT',
'WGL_STENCIL_BITS_EXT', 'WGL_AUX_BUFFERS_EXT', 'WGL_NO_ACCELERATION_EXT',
'WGL_GENERIC_ACCELERATION_EXT', 'WGL_FULL_ACCELERATION_EXT',
'WGL_SWAP_EXCHANGE_EXT', 'WGL_SWAP_COPY_EXT', 'WGL_SWAP_UNDEFINED_EXT',
'WGL_TYPE_RGBA_EXT', 'WGL_TYPE_COLORINDEX_EXT', 'WGL_DRAW_TO_PBUFFER_EXT',
'WGL_MAX_PBUFFER_PIXELS_EXT', 'WGL_MAX_PBUFFER_WIDTH_EXT',
'WGL_MAX_PBUFFER_HEIGHT_EXT', 'WGL_OPTIMAL_PBUFFER_WIDTH_EXT',
'WGL_OPTIMAL_PBUFFER_HEIGHT_EXT', 'WGL_PBUFFER_LARGEST_EXT',
'WGL_PBUFFER_WIDTH_EXT', 'WGL_PBUFFER_HEIGHT_EXT', 'WGL_DEPTH_FLOAT_EXT',
'WGL_SAMPLE_BUFFERS_3DFX', 'WGL_SAMPLES_3DFX', 'WGL_SAMPLE_BUFFERS_EXT',
'WGL_SAMPLES_EXT', 'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D',
'WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D', 'WGL_GAMMA_TABLE_SIZE_I3D',
'WGL_GAMMA_EXCLUDE_DESKTOP_I3D', 'WGL_GENLOCK_SOURCE_MULTIVIEW_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D', 'WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D', 'WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D',
'WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D', 'WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D',
'WGL_GENLOCK_SOURCE_EDGE_RISING_I3D', 'WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D',
'WGL_IMAGE_BUFFER_MIN_ACCESS_I3D', 'WGL_IMAGE_BUFFER_LOCK_I3D',
'WGL_BIND_TO_TEXTURE_DEPTH_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV',
'WGL_DEPTH_TEXTURE_FORMAT_NV', 'WGL_TEXTURE_DEPTH_COMPONENT_NV',
'WGL_DEPTH_COMPONENT_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV', 'WGL_TEXTURE_RECTANGLE_NV',
'WGL_TYPE_RGBA_FLOAT_ATI', 'WGL_FLOAT_COMPONENTS_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV', 'WGL_TEXTURE_FLOAT_R_NV',
'WGL_TEXTURE_FLOAT_RG_NV', 'WGL_TEXTURE_FLOAT_RGB_NV',
'WGL_TEXTURE_FLOAT_RGBA_NV', 'HPBUFFERARB', 'HPBUFFEREXT',
'WGL_ARB_buffer_region', 'wglCreateBufferRegionARB',
'wglDeleteBufferRegionARB', 'wglSaveBufferRegionARB',
'wglRestoreBufferRegionARB', 'PFNWGLCREATEBUFFERREGIONARBPROC',
'PFNWGLDELETEBUFFERREGIONARBPROC', 'PFNWGLSAVEBUFFERREGIONARBPROC',
'PFNWGLRESTOREBUFFERREGIONARBPROC', 'WGL_ARB_multisample',
'WGL_ARB_extensions_string', 'wglGetExtensionsStringARB',
'PFNWGLGETEXTENSIONSSTRINGARBPROC', 'WGL_ARB_pixel_format',
'wglGetPixelFormatAttribivARB', 'wglGetPixelFormatAttribfvARB',
'wglChoosePixelFormatARB', 'PFNWGLGETPIXELFORMATATTRIBIVARBPROC',
'PFNWGLGETPIXELFORMATATTRIBFVARBPROC', 'PFNWGLCHOOSEPIXELFORMATARBPROC',
'WGL_ARB_make_current_read', 'wglMakeContextCurrentARB',
'wglGetCurrentReadDCARB', 'PFNWGLMAKECONTEXTCURRENTARBPROC',
'PFNWGLGETCURRENTREADDCARBPROC', 'WGL_ARB_pbuffer', 'wglCreatePbufferARB',
'wglGetPbufferDCARB', 'wglReleasePbufferDCARB', 'wglDestroyPbufferARB',
'wglQueryPbufferARB', 'PFNWGLCREATEPBUFFERARBPROC',
'PFNWGLGETPBUFFERDCARBPROC', 'PFNWGLRELEASEPBUFFERDCARBPROC',
'PFNWGLDESTROYPBUFFERARBPROC', 'PFNWGLQUERYPBUFFERARBPROC',
'WGL_ARB_render_texture', 'wglBindTexImageARB', 'wglReleaseTexImageARB',
'wglSetPbufferAttribARB', 'PFNWGLBINDTEXIMAGEARBPROC',
'PFNWGLRELEASETEXIMAGEARBPROC', 'PFNWGLSETPBUFFERATTRIBARBPROC',
'WGL_ARB_pixel_format_float', 'WGL_EXT_display_color_table',
'wglCreateDisplayColorTableEXT', 'wglLoadDisplayColorTableEXT',
'wglBindDisplayColorTableEXT', 'wglDestroyDisplayColorTableEXT',
'PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC', 'PFNWGLLOADDISPLAYCOLORTABLEEXTPROC',
'PFNWGLBINDDISPLAYCOLORTABLEEXTPROC', 'PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC',
'WGL_EXT_extensions_string', 'wglGetExtensionsStringEXT',
'PFNWGLGETEXTENSIONSSTRINGEXTPROC', 'WGL_EXT_make_current_read',
'wglMakeContextCurrentEXT', 'wglGetCurrentReadDCEXT',
'PFNWGLMAKECONTEXTCURRENTEXTPROC', 'PFNWGLGETCURRENTREADDCEXTPROC',
'WGL_EXT_pbuffer', 'wglCreatePbufferEXT', 'wglGetPbufferDCEXT',
'wglReleasePbufferDCEXT', 'wglDestroyPbufferEXT', 'wglQueryPbufferEXT',
'PFNWGLCREATEPBUFFEREXTPROC', 'PFNWGLGETPBUFFERDCEXTPROC',
'PFNWGLRELEASEPBUFFERDCEXTPROC', 'PFNWGLDESTROYPBUFFEREXTPROC',
'PFNWGLQUERYPBUFFEREXTPROC', 'WGL_EXT_pixel_format',
'wglGetPixelFormatAttribivEXT', 'wglGetPixelFormatAttribfvEXT',
'wglChoosePixelFormatEXT', 'PFNWGLGETPIXELFORMATATTRIBIVEXTPROC',
'PFNWGLGETPIXELFORMATATTRIBFVEXTPROC', 'PFNWGLCHOOSEPIXELFORMATEXTPROC',
'WGL_EXT_swap_control', 'wglSwapIntervalEXT', 'wglGetSwapIntervalEXT',
'PFNWGLSWAPINTERVALEXTPROC', 'PFNWGLGETSWAPINTERVALEXTPROC',
'WGL_EXT_depth_float', 'WGL_NV_vertex_array_range', 'wglAllocateMemoryNV',
'wglFreeMemoryNV', 'PFNWGLALLOCATEMEMORYNVPROC', 'PFNWGLFREEMEMORYNVPROC',
'WGL_3DFX_multisample', 'WGL_EXT_multisample', 'WGL_OML_sync_control',
'wglGetSyncValuesOML', 'wglGetMscRateOML', 'wglSwapBuffersMscOML',
'wglSwapLayerBuffersMscOML', 'wglWaitForMscOML', 'wglWaitForSbcOML',
'PFNWGLGETSYNCVALUESOMLPROC', 'PFNWGLGETMSCRATEOMLPROC',
'PFNWGLSWAPBUFFERSMSCOMLPROC', 'PFNWGLSWAPLAYERBUFFERSMSCOMLPROC',
'PFNWGLWAITFORMSCOMLPROC', 'PFNWGLWAITFORSBCOMLPROC',
'WGL_I3D_digital_video_control', 'wglGetDigitalVideoParametersI3D',
'wglSetDigitalVideoParametersI3D', 'PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC',
'PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC', 'WGL_I3D_gamma',
'wglGetGammaTableParametersI3D', 'wglSetGammaTableParametersI3D',
'wglGetGammaTableI3D', 'wglSetGammaTableI3D',
'PFNWGLGETGAMMATABLEPARAMETERSI3DPROC',
'PFNWGLSETGAMMATABLEPARAMETERSI3DPROC', 'PFNWGLGETGAMMATABLEI3DPROC',
'PFNWGLSETGAMMATABLEI3DPROC', 'WGL_I3D_genlock', 'wglEnableGenlockI3D',
'wglDisableGenlockI3D', 'wglIsEnabledGenlockI3D', 'wglGenlockSourceI3D',
'wglGetGenlockSourceI3D', 'wglGenlockSourceEdgeI3D',
'wglGetGenlockSourceEdgeI3D', 'wglGenlockSampleRateI3D',
'wglGetGenlockSampleRateI3D', 'wglGenlockSourceDelayI3D',
'wglGetGenlockSourceDelayI3D', 'wglQueryGenlockMaxSourceDelayI3D',
'PFNWGLENABLEGENLOCKI3DPROC', 'PFNWGLDISABLEGENLOCKI3DPROC',
'PFNWGLISENABLEDGENLOCKI3DPROC', 'PFNWGLGENLOCKSOURCEI3DPROC',
'PFNWGLGETGENLOCKSOURCEI3DPROC', 'PFNWGLGENLOCKSOURCEEDGEI3DPROC',
'PFNWGLGETGENLOCKSOURCEEDGEI3DPROC', 'PFNWGLGENLOCKSAMPLERATEI3DPROC',
'PFNWGLGETGENLOCKSAMPLERATEI3DPROC', 'PFNWGLGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLGETGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC', 'WGL_I3D_image_buffer',
'wglCreateImageBufferI3D', 'wglDestroyImageBufferI3D',
'wglAssociateImageBufferEventsI3D', 'wglReleaseImageBufferEventsI3D',
'PFNWGLCREATEIMAGEBUFFERI3DPROC', 'PFNWGLDESTROYIMAGEBUFFERI3DPROC',
'PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC',
'PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC', 'WGL_I3D_swap_frame_lock',
'wglEnableFrameLockI3D', 'wglDisableFrameLockI3D', 'wglIsEnabledFrameLockI3D',
'wglQueryFrameLockMasterI3D', 'PFNWGLENABLEFRAMELOCKI3DPROC',
'PFNWGLDISABLEFRAMELOCKI3DPROC', 'PFNWGLISENABLEDFRAMELOCKI3DPROC',
'PFNWGLQUERYFRAMELOCKMASTERI3DPROC', 'WGL_I3D_swap_frame_usage',
'wglGetFrameUsageI3D', 'wglBeginFrameTrackingI3D', 'wglEndFrameTrackingI3D',
'wglQueryFrameTrackingI3D', 'PFNWGLGETFRAMEUSAGEI3DPROC',
'PFNWGLBEGINFRAMETRACKINGI3DPROC', 'PFNWGLENDFRAMETRACKINGI3DPROC',
'PFNWGLQUERYFRAMETRACKINGI3DPROC', 'WGL_ATI_pixel_format_float',
'WGL_NV_float_buffer']
# END GENERATED CONTENT (do not edit above this line)
|
{
"content_hash": "8b892c55e96883b56eda50f0aa0810e5",
"timestamp": "",
"source": "github",
"line_count": 778,
"max_line_length": 181,
"avg_line_length": 76.66066838046272,
"alnum_prop": 0.7566815331477817,
"repo_name": "mattpap/sympy-polys",
"id": "b715ec8e1017a2f8cf233c8c58b0b876277ebbfc",
"size": "61360",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "sympy/thirdparty/pyglet/pyglet/gl/wglext_arb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8476904"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
}
|
from sklearn import svm
from sklearn import datasets
clf = svm.SVC()
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf.fit(X, y)
import pickle
s = pickle.dumps(clf)
|
{
"content_hash": "b7741401cf99d7d71e9cee995b4d16e3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 29,
"avg_line_length": 16.363636363636363,
"alnum_prop": 0.7166666666666667,
"repo_name": "suresh/notes",
"id": "f7b6cc291f5b84211bb633b93189e58f5fab9ca0",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/svm_model_persist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3920639"
},
{
"name": "Python",
"bytes": "23454"
}
],
"symlink_target": ""
}
|
import argparse
import time
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import data
import model
parser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')
parser.add_argument('--data', type=str, default='./data/penn',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=2,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--save', type=str, default='model.pt',
help='path to save the final model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def get_batch(source, i, evaluation=False):
seq_len = min(args.bptt, len(source) - 1 - i)
data = Variable(source[i:i+seq_len], volatile=evaluation)
target = Variable(source[i+1:i+1+seq_len].view(-1))
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i, evaluation=True)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).data
hidden = repackage_hidden(hidden)
return total_loss[0] / len(data_source)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
p.data.add_(-lr, p.grad.data)
total_loss += loss.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb') as f:
model = torch.load(f)
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
|
{
"content_hash": "ff13826dfe6817f423d9bc97fb4e7a4d",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 106,
"avg_line_length": 39.39267015706806,
"alnum_prop": 0.578548644338118,
"repo_name": "edgarriba/examples",
"id": "88ed6007ef61688a0b47fa7764d314107787b60c",
"size": "7524",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "word_language_model/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "92473"
}
],
"symlink_target": ""
}
|
import couchdb
import json
class Handle_DB():
def __init__(self,url):
self.server = couchdb.Server(url)
try:
self.db = self.server.create('tweets')
except:
self.db = self.server['tweets']
def add_tweet(self, id, tweet, BOW, sentiment, city, label):
#print ('add doc')
try:
self.db[json.dumps(id)] = {'tweet':tweet,'BOW':json.dumps(BOW),'sentiment':sentiment,'city':city,'label':label}
except couchdb.http.ResourceConflict:
pass
#print ('couchdb.http.ResourceConflict')
def querySentiment(self,key):
#for sentiment, the key should be -1 or 0 or 1
map_func = '''function(doc){
emit(doc.sentiment,1);
}'''
reduce_func = '_count'
result = self.db.query(map_func,reduce_func)
count = result[key:key]
for row in count:
value = row.value
return value
def query_class(self,key):
map_func = '''function(doc){
emit(doc.label,1);
}'''
reduce_func = '_count'
result = self.db.query(map_func,reduce_func)
count = result[key:key]
for row in count:
value = row.value
return value
def total_length(self):
return len(self.db)
def update_label(self,doc_id,label):
doc = self.db[doc_id]
doc['label'] = label
self.db.save(doc)
def get_db(self):
return self.db
|
{
"content_hash": "7f31d1ccfb8bbe131d3c0885b08480cb",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 123,
"avg_line_length": 29.140350877192983,
"alnum_prop": 0.48946417820590005,
"repo_name": "Le0nHardt/Cloud-Project",
"id": "775a5000fa46b1c48b5e283ff6234e4b2c0822a2",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProcessingAndCouchDB/handle_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45516"
},
{
"name": "HTML",
"bytes": "41619"
},
{
"name": "JavaScript",
"bytes": "115628"
},
{
"name": "Python",
"bytes": "33287"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import argparse
import os
import sys
from .options import project
class BaseCommand(object):
"""A CLI command.
"""
name = None
description = None
default_arguments = [project]
arguments = []
def __init__(self, parser=None):
if not parser:
parser = argparse.ArgumentParser(
prog=os.path.basename(sys.argv[0]),
description="Base argument parser for passa"
)
self.parser = parser
self.add_arguments()
@classmethod
def build_parser(cls):
parser = argparse.ArgumentParser(
prog="passa {}".format(cls.name),
description=cls.description,
)
return cls(parser)
@classmethod
def run_parser(cls):
parser = cls.build_parser()
parser()
def __call__(self, argv=None):
options = self.parser.parse_args(argv)
result = self.main(options)
if result is not None:
sys.exit(result)
def add_default_arguments(self):
for arg in self.default_arguments:
arg.add_to_parser(self.parser)
def add_arguments(self):
self.add_default_arguments()
for arg in self.arguments:
arg.add_to_parser(self.parser)
def main(self, options):
return self.run(options)
def run(self, options):
raise NotImplementedError
|
{
"content_hash": "1acaced61c2f11b3ef107d5a75384f69",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 60,
"avg_line_length": 24.559322033898304,
"alnum_prop": 0.5914423740510697,
"repo_name": "kennethreitz/pipenv",
"id": "0ca48682ed8c7e42d1db37c541a5f7cfaba77320",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipenv/vendor/passa/cli/_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2588085"
},
{
"name": "Roff",
"bytes": "40754"
}
],
"symlink_target": ""
}
|
"""Python configuration for recording multiple incorrect submissions issue."""
from __future__ import annotations
from extensions.issues import base
class MultipleIncorrectSubmissions(base.BaseExplorationIssueSpec):
"""Issue that's recorded when the learner answers multiple times incorrectly
in the same card and quits the exploration.
"""
_customization_arg_specs = [{
'name': 'state_name',
'description': 'State name',
'schema': {
'type': 'unicode',
},
'default_value': ''
}, {
'name': 'num_times_answered_incorrectly',
'description': 'Number of times incorrect answers were submitted',
'schema': {
'type': 'int',
},
'default_value': 0
}]
|
{
"content_hash": "ea2a9cf47b7ea7bd845ba2d090a28c39",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 28.703703703703702,
"alnum_prop": 0.6103225806451613,
"repo_name": "brianrodri/oppia",
"id": "501aec6129e31bb7dae85c266739c37e5789f468",
"size": "1397",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "extensions/issues/MultipleIncorrectSubmissions/MultipleIncorrectSubmissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "487903"
},
{
"name": "HTML",
"bytes": "1748056"
},
{
"name": "JavaScript",
"bytes": "1176446"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "14169091"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13316709"
}
],
"symlink_target": ""
}
|
"""TOPI Testing Util functions.
Used to verify the correctness of operators in TOPI .
"""
from __future__ import absolute_import as _abs
from .conv1d_ncw_python import conv1d_ncw_python, group_conv1d_ncw_python
from .conv2d_hwcn_python import conv2d_hwcn_python
from .conv2d_nchw_python import conv2d_nchw_python
from .conv2d_nhwc_python import conv2d_nhwc_python
from .conv3d_ncdhw_python import conv3d_ncdhw_python
from .conv3d_ndhwc_python import conv3d_ndhwc_python
from .conv3d_transpose_ncdhw_python import conv3d_transpose_ncdhw_python
from .conv2d_transpose_python import conv2d_transpose_nchw_python, conv2d_transpose_nhwc_python
from .conv1d_transpose_ncw_python import conv1d_transpose_ncw_python
from .correlation_nchw_python import correlation_nchw_python
from .deformable_conv2d_python import deformable_conv2d_nchw_python, deformable_conv2d_nhwc_python
from .depthwise_conv2d_python import (
depthwise_conv2d_python_nchw,
depthwise_conv2d_python_nhwc,
depthwise_conv2d_python_nchwc,
)
from .dilate_python import dilate_python
from .softmax_python import softmax_python, log_softmax_python
from .resize_python import resize1d_python, resize2d_python, resize3d_python
from .reorg_python import reorg_python
from .roi_align_python import roi_align_nchw_python, roi_align_nhwc_python
from .roi_pool_python import roi_pool_nchw_python
from .lrn_python import lrn_python
from .l2_normalize_python import l2_normalize_python
from .gather_python import gather_python
from .gather_nd_python import gather_nd_python
from .strided_slice_python import strided_slice_python, strided_set_python
from .batch_matmul import batch_matmul
from .batch_norm import batch_norm
from .slice_axis_python import slice_axis_python
from .sequence_mask_python import sequence_mask
from .poolnd_python import poolnd_python
from .pool_grad_python import pool_grad_nchw
from .one_hot import one_hot
from .depth_to_space import depth_to_space_python
from .space_to_depth import space_to_depth_python
from .crop_and_resize_python import crop_and_resize_python
from .common import (
compare_numpy_tvm,
get_injective_schedule,
get_reduce_schedule,
get_broadcast_schedule,
get_elemwise_schedule,
get_conv2d_nchw_implement,
dispatch,
)
from .adaptive_pool_python import adaptive_pool
from .grid_sample_python import affine_grid_python, grid_sample_nchw_python
from .matrix_set_diag import matrix_set_diag
from .space_to_batch_nd import space_to_batch_nd_python
from .batch_to_space_nd import batch_to_space_nd_python
from .nll_loss import nll_loss
from .dense import dense
from .searchsorted import searchsorted_ref
from .conv2d_backcward_weight_python import conv2d_backward_weight_python
|
{
"content_hash": "6a256eb4a034092afac2f5818ef8bb6f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 98,
"avg_line_length": 44.40983606557377,
"alnum_prop": 0.8028792912513842,
"repo_name": "dmlc/tvm",
"id": "c3d222cfd1209eba25793f243cc800e73e7d4569",
"size": "3495",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/tvm/topi/testing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
from LogReader import LogReader
import math
import json
import pickle
class DataCondenser:
def __init__(self):
self.rdrLog = LogReader()
def condenseData(self, strPath):
dataOwl = None
log = self.rdrLog.loadLog(strPath)
dataOwl = log.getOwlData()
self.tti = dataOwl["task-tree-individuals"]
owlMeta = dataOwl["metadata"]
owlAnnot = dataOwl["annotation"]
if owlMeta:
result = {"Toplevel" : self.condenseNodes("", owlMeta.subActions())};
with open("out.json", "wb") as f:
json.dump(result, f)
with open("generalized_model.pkl", "wb") as f:
pickle.dump({"model" : result,
"parameters" : owlAnnot.annotatedParameterTypes()},
f, pickle.HIGHEST_PROTOCOL)
else:
print "No meta data in file!"
def condenseNodes(self, strParentNode, arrNodes, nLevel = 0):
arrTypes = {}
arrIndividuals = {}
for strNode in arrNodes:
owlNode = self.tti[strNode]
ident = owlNode.taskContext()#.type()
failures = owlNode.failures()
failure = ""
if len(failures) > 0:
failure = self.tti[failures[0]].type()
result = self.condenseNodes(strNode, owlNode.subActions(), nLevel + 1)
if not ident in arrTypes:
arrTypes[ident] = result
else:
arrTypes[ident] = self.unifyResults(arrTypes[ident], result)
arrTypes[ident]["individuals"][strNode] = {"parameters" : owlNode.annotatedParameters(True),
"parent" : strParentNode,
"failure" : failure}
return {"subTypes" : arrTypes,
"individuals" : {}}
def unifyResults(self, res1, res2):
resparams = {}
if len(res1["individuals"]) > 0:
resparams = res1["individuals"]
if len(res2["individuals"]) > 0:
resparams = dict(resparams.items() + res2["individuals"].items())
unified = {"subTypes" : {},
"individuals" : resparams}
for ressub1 in res1["subTypes"]:
if ressub1 in res2["subTypes"]:
unified["subTypes"][ressub1] = self.unifyResults(res1["subTypes"][ressub1],
res2["subTypes"][ressub1])
else:
unified["subTypes"][ressub1] = res1["subTypes"][ressub1]
for ressub2 in res2["subTypes"]:
if not ressub2 in res1["subTypes"]:
unified["subTypes"][ressub2] = res2["subTypes"][ressub2]
return unified
|
{
"content_hash": "0a737cb531bd52d276d909c7e0561fbf",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 104,
"avg_line_length": 36.109756097560975,
"alnum_prop": 0.49037487335359675,
"repo_name": "code-iai/semrec",
"id": "f519bb82ca50f2dd3ca0e3a689441f33915112d6",
"size": "2961",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/bstools/Beliefstate Tools/DataCondenser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "2038"
},
{
"name": "C++",
"bytes": "415001"
},
{
"name": "CMake",
"bytes": "2624"
},
{
"name": "Gnuplot",
"bytes": "1306"
},
{
"name": "Python",
"bytes": "118308"
},
{
"name": "Shell",
"bytes": "3909"
}
],
"symlink_target": ""
}
|
"""
sentry.web.urls
~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('urlpatterns',)
from django.conf.urls import include, patterns, url
from django.conf import settings
import sentry.web.frontend.projects.keys
import sentry.web.frontend.projects.plugins
import sentry.web.frontend.projects.quotas
import sentry.web.frontend.projects.rules
import sentry.web.frontend.projects.tags
from sentry.web import api
from sentry.web.frontend import (
accounts, generic, groups, events, admin
)
from sentry.web.frontend.admin_queue import AdminQueueView
from sentry.web.frontend.accept_organization_invite import AcceptOrganizationInviteView
from sentry.web.frontend.auth_link_identity import AuthLinkIdentityView
from sentry.web.frontend.auth_login import AuthLoginView
from sentry.web.frontend.auth_logout import AuthLogoutView
from sentry.web.frontend.auth_organization_login import AuthOrganizationLoginView
from sentry.web.frontend.auth_provider_login import AuthProviderLoginView
from sentry.web.frontend.error_page_embed import ErrorPageEmbedView
from sentry.web.frontend.home import HomeView
from sentry.web.frontend.mailgun_inbound_webhook import MailgunInboundWebhookView
from sentry.web.frontend.organization_api_keys import OrganizationApiKeysView
from sentry.web.frontend.organization_api_key_settings import OrganizationApiKeySettingsView
from sentry.web.frontend.organization_audit_log import OrganizationAuditLogView
from sentry.web.frontend.organization_auth_settings import OrganizationAuthSettingsView
from sentry.web.frontend.organization_members import OrganizationMembersView
from sentry.web.frontend.organization_member_settings import OrganizationMemberSettingsView
from sentry.web.frontend.organization_settings import OrganizationSettingsView
from sentry.web.frontend.create_organization import CreateOrganizationView
from sentry.web.frontend.create_organization_member import CreateOrganizationMemberView
from sentry.web.frontend.create_project import CreateProjectView
from sentry.web.frontend.create_team import CreateTeamView
from sentry.web.frontend.project_issue_tracking import ProjectIssueTrackingView
from sentry.web.frontend.project_notifications import ProjectNotificationsView
from sentry.web.frontend.project_release_tracking import ProjectReleaseTrackingView
from sentry.web.frontend.project_settings import ProjectSettingsView
from sentry.web.frontend.react_page import GenericReactPageView, ReactPageView
from sentry.web.frontend.release_webhook import ReleaseWebhookView
from sentry.web.frontend.remove_organization import RemoveOrganizationView
from sentry.web.frontend.remove_project import RemoveProjectView
from sentry.web.frontend.remove_team import RemoveTeamView
from sentry.web.frontend.team_settings import TeamSettingsView
def init_all_applications():
"""
Forces import of all applications to ensure code is registered.
"""
from django.db.models import get_apps, get_models
for app in get_apps():
try:
get_models(app)
except Exception:
continue
init_all_applications()
urlpatterns = patterns('')
if settings.DEBUG:
import sentry.web.frontend.debug.mail
from sentry.web.frontend.debug.debug_trigger_error import DebugTriggerErrorView
from sentry.web.frontend.debug.debug_error_embed import DebugErrorPageEmbedView
from sentry.web.frontend.debug.debug_new_release_email import DebugNewReleaseEmailView
urlpatterns += patterns('',
url(r'^debug/mail/new-event/$',
sentry.web.frontend.debug.mail.new_event),
url(r'^debug/mail/new-note/$',
sentry.web.frontend.debug.mail.new_note),
url(r'^debug/mail/new-release/$',
DebugNewReleaseEmailView.as_view()),
url(r'^debug/mail/request-access/$',
sentry.web.frontend.debug.mail.request_access),
url(r'^debug/mail/access-approved/$',
sentry.web.frontend.debug.mail.access_approved),
url(r'^debug/embed/error-page/$',
DebugErrorPageEmbedView.as_view()),
url(r'^debug/trigger-error/$',
DebugTriggerErrorView.as_view()),
)
urlpatterns += patterns('',
# Store endpoints first since they are the most active
url(r'^api/store/$', api.StoreView.as_view(),
name='sentry-api-store'),
url(r'^api/(?P<project_id>[\w_-]+)/store/$', api.StoreView.as_view(),
name='sentry-api-store'),
url(r'^_static/(?P<module>[^/]+)/(?P<path>.*)$', generic.static_media,
name='sentry-media'),
url(r'^templates/(?P<path>.*)$', generic.partial_static_media,
name='sentry-partial-media'),
# API
url(r'^api/0/', include('sentry.api.urls')),
url(r'^api/hooks/mailgun/inbound/', MailgunInboundWebhookView.as_view(),
name='sentry-mailgun-inbound-hook'),
url(r'^api/hooks/release/(?P<plugin_id>[^/]+)/(?P<project_id>[^/]+)/(?P<signature>[^/]+)/', ReleaseWebhookView.as_view(),
name='sentry-release-hook'),
url(r'^api/embed/error-page/$', ErrorPageEmbedView.as_view(),
name='sentry-error-page-embed'),
url(r'^api/', include('sentry.api.help_urls')),
# Auth
url(r'^auth/link/(?P<organization_slug>[^/]+)/$', AuthLinkIdentityView.as_view(),
name='sentry-auth-link-identity'),
url(r'^auth/login/$', AuthLoginView.as_view(),
name='sentry-login'),
url(r'^auth/login/(?P<organization_slug>[^/]+)/$', AuthOrganizationLoginView.as_view(),
name='sentry-auth-organization'),
url(r'^auth/sso/$', AuthProviderLoginView.as_view(),
name='sentry-auth-sso'),
url(r'^auth/logout/$', AuthLogoutView.as_view(),
name='sentry-logout'),
# Account
url(r'^login-redirect/$', accounts.login_redirect,
name='sentry-login-redirect'),
url(r'^register/$', AuthLoginView.as_view(),
name='sentry-register'),
url(r'^account/sudo/$', 'sudo.views.sudo',
{'template_name': 'sentry/account/sudo.html'},
name='sentry-sudo'),
url(r'^account/recover/$', accounts.recover,
name='sentry-account-recover'),
url(r'^account/recover/confirm/(?P<user_id>[\d]+)/(?P<hash>[0-9a-zA-Z]+)/$', accounts.recover_confirm,
name='sentry-account-recover-confirm'),
url(r'^account/settings/$', accounts.settings,
name='sentry-account-settings'),
url(r'^account/settings/appearance/$', accounts.appearance_settings,
name='sentry-account-settings-appearance'),
url(r'^account/settings/identities/$', accounts.list_identities,
name='sentry-account-settings-identities'),
url(r'^account/settings/notifications/$', accounts.notification_settings,
name='sentry-account-settings-notifications'),
url(r'^account/settings/social/', include('social_auth.urls')),
# Admin
url(r'^manage/$', admin.overview,
name='sentry-admin-overview'),
url(r'^manage/queue/$', AdminQueueView.as_view(),
name='sentry-admin-queue'),
url(r'^manage/status/environment/$', admin.status_env,
name='sentry-admin-status'),
url(r'^manage/status/packages/$', admin.status_packages,
name='sentry-admin-packages-status'),
url(r'^manage/status/mail/$', admin.status_mail,
name='sentry-admin-mail-status'),
# Admin - Teams
url(r'^manage/teams/$', admin.manage_teams,
name='sentry-admin-teams'),
# Admin - Projects
url(r'^manage/projects/$', admin.manage_projects,
name='sentry-admin-projects'),
# Admin - Users
url(r'^manage/users/$', admin.manage_users,
name='sentry-admin-users'),
url(r'^manage/users/new/$', admin.create_new_user,
name='sentry-admin-new-user'),
url(r'^manage/users/(?P<user_id>\d+)/$', admin.edit_user,
name='sentry-admin-edit-user'),
url(r'^manage/users/(?P<user_id>\d+)/remove/$', admin.remove_user,
name='sentry-admin-remove-user'),
url(r'^manage/users/(?P<user_id>\d+)/projects/$', admin.list_user_projects,
name='sentry-admin-list-user-projects'),
# Admin - Plugins
url(r'^manage/plugins/(?P<slug>[\w_-]+)/$', admin.configure_plugin,
name='sentry-admin-configure-plugin'),
# Organizations
url(r'^(?P<organization_slug>[\w_-]+)/$', ReactPageView.as_view(),
name='sentry-organization-home'),
url(r'^organizations/new/$', CreateOrganizationView.as_view(),
name='sentry-create-organization'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/api-keys/$', OrganizationApiKeysView.as_view(),
name='sentry-organization-api-keys'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/api-keys/(?P<key_id>[\w_-]+)$', OrganizationApiKeySettingsView.as_view(),
name='sentry-organization-api-key-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/auth/$', OrganizationAuthSettingsView.as_view(),
name='sentry-organization-auth-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/audit-log/$', OrganizationAuditLogView.as_view(),
name='sentry-organization-audit-log'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/$', OrganizationMembersView.as_view(),
name='sentry-organization-members'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/new/$', CreateOrganizationMemberView.as_view(),
name='sentry-create-organization-member'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/members/(?P<member_id>\d+)/$', OrganizationMemberSettingsView.as_view(),
name='sentry-organization-member-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/stats/$', ReactPageView.as_view(),
name='sentry-organization-stats'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/settings/$', OrganizationSettingsView.as_view(),
name='sentry-organization-settings'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/settings/$', TeamSettingsView.as_view(),
name='sentry-manage-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/remove/$', RemoveTeamView.as_view(),
name='sentry-remove-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/teams/new/$', CreateTeamView.as_view(),
name='sentry-create-team'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/projects/new/$', CreateProjectView.as_view(),
name='sentry-create-project'),
url(r'^organizations/(?P<organization_slug>[\w_-]+)/remove/$', RemoveOrganizationView.as_view(),
name='sentry-remove-organization'),
url(r'^accept/(?P<member_id>\d+)/(?P<token>\w+)/$', AcceptOrganizationInviteView.as_view(),
name='sentry-accept-invite'),
# Settings - Projects
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/$',
ProjectSettingsView.as_view(),
name='sentry-manage-project'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/issue-tracking/$',
ProjectIssueTrackingView.as_view(),
name='sentry-project-issue-tracking'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/release-tracking/$',
ProjectReleaseTrackingView.as_view(),
name='sentry-project-release-tracking'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/$',
sentry.web.frontend.projects.keys.manage_project_keys,
name='sentry-manage-project-keys'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/new/$',
sentry.web.frontend.projects.keys.new_project_key,
name='sentry-new-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/(?P<key_id>\d+)/edit/$',
sentry.web.frontend.projects.keys.edit_project_key,
name='sentry-edit-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/(?P<key_id>\d+)/remove/$',
sentry.web.frontend.projects.keys.remove_project_key,
name='sentry-remove-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/(?P<key_id>\d+)/disable/$',
sentry.web.frontend.projects.keys.disable_project_key,
name='sentry-disable-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/keys/(?P<key_id>\d+)/enable/$',
sentry.web.frontend.projects.keys.enable_project_key,
name='sentry-enable-project-key'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/plugins/$',
sentry.web.frontend.projects.plugins.manage_plugins,
name='sentry-manage-project-plugins'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/plugins/(?P<slug>[\w_-]+)/$',
sentry.web.frontend.projects.plugins.configure_project_plugin,
name='sentry-configure-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/plugins/(?P<slug>[\w_-]+)/reset/$',
sentry.web.frontend.projects.plugins.reset_project_plugin,
name='sentry-reset-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/plugins/(?P<slug>[\w_-]+)/disable/$',
sentry.web.frontend.projects.plugins.disable_project_plugin,
name='sentry-disable-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/plugins/(?P<slug>[\w_-]+)/enable/$',
sentry.web.frontend.projects.plugins.enable_project_plugin,
name='sentry-enable-project-plugin'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/remove/$',
RemoveProjectView.as_view(),
name='sentry-remove-project'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/tags/$',
sentry.web.frontend.projects.tags.manage_project_tags,
name='sentry-manage-project-tags'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/quotas/$',
sentry.web.frontend.projects.quotas.manage_project_quotas,
name='sentry-manage-project-quotas'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_slug>[\w_-]+)/settings/notifications/$',
ProjectNotificationsView.as_view(),
name='sentry-project-notifications'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/rules/$',
sentry.web.frontend.projects.rules.list_rules,
name='sentry-project-rules'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/rules/(?P<rule_id>\d+)/edit/$',
sentry.web.frontend.projects.rules.create_or_edit_rule,
name='sentry-edit-project-rule'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/rules/(?P<rule_id>\d+)/remove/$',
sentry.web.frontend.projects.rules.remove_rule,
name='sentry-remove-project-rule'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/settings/rules/new/$',
sentry.web.frontend.projects.rules.create_or_edit_rule,
name='sentry-new-project-rule'),
# Generic
url(r'^$', HomeView.as_view(),
name='sentry'),
# crossdomain.xml
url(r'^crossdomain\.xml$', api.crossdomain_xml_index,
name='sentry-api-crossdomain-xml-index'),
url(r'^api/(?P<project_id>[\w_-]+)/crossdomain\.xml$', api.crossdomain_xml,
name='sentry-api-crossdomain-xml'),
# Generic API
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/trends/$', api.get_group_trends,
name='sentry-api-groups-trends'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/newest/$', api.get_new_groups,
name='sentry-api-groups-new'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/groups/resolved/$', api.get_resolved_groups,
name='sentry-api-groups-resolved'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<team_slug>[\w_-]+)/stats/$', api.get_stats,
name='sentry-api-stats'),
url(r'^api/(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/tags/search/$', api.search_tags,
name='sentry-api-search-tags'),
url(r'^api/(?P<organization_slug>[\w_-]+)/users/search/$', api.search_users,
name='sentry-api-search-users'),
url(r'^api/(?P<organization_slug>[\w_-]+)/projects/search/$', api.search_projects,
name='sentry-api-search-projects'),
url(r'^share/group/(?P<share_id>[\w_-]+)/$', GenericReactPageView.as_view(auth_required=False),
name='sentry-group-shared'),
# TV dashboard
url(r'^(?P<organization_slug>[\w_-]+)/teams/(?P<team_slug>[\w_-]+)/wall/$', groups.wall_display,
name='sentry-wall'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/$', ReactPageView.as_view(),
name='sentry-group'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/activity/$', ReactPageView.as_view(),
name='sentry-group-activity'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/$', ReactPageView.as_view(),
name='sentry-group-events'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id>\d+)/$', ReactPageView.as_view(),
name='sentry-group-event'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id>\d+)/replay/$', events.replay_event,
name='sentry-replay'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/events/(?P<event_id_or_latest>(\d+|latest))/json/$', groups.group_event_details_json,
name='sentry-group-event-json'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/actions/(?P<slug>[\w_-]+)/', groups.group_plugin_action,
name='sentry-group-plugin-action'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/tags/$', ReactPageView.as_view(),
name='sentry-group-tags'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/group/(?P<group_id>\d+)/tags/(?P<tag_name>[^/]+)/$', ReactPageView.as_view(),
name='sentry-group-tag-details'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/releases/$', ReactPageView.as_view(),
name='sentry-releases'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/releases/(?P<version>[^\/]+)/$', ReactPageView.as_view(),
name='sentry-release-details'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/releases/(?P<version>[^\/]+)/all-events/$', ReactPageView.as_view(),
name='sentry-release-details-all-events'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/releases/(?P<version>[^\/]+)/artifacts/$', ReactPageView.as_view(),
name='sentry-release-details-artifacts'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/dashboard/$', ReactPageView.as_view(),
name='sentry-dashboard'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/events/$', ReactPageView.as_view(),
name='sentry-events'),
url(r'^(?P<organization_slug>[\w_-]+)/(?P<project_id>[\w_-]+)/$', ReactPageView.as_view(),
name='sentry-stream'),
# Legacy
url(r'', ReactPageView.as_view()),
)
|
{
"content_hash": "5fe59ec8d3ceaa06bcf8873b07afa0cb",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 175,
"avg_line_length": 52.674731182795696,
"alnum_prop": 0.6506761929063537,
"repo_name": "fuziontech/sentry",
"id": "bf2476c0d89947d4d31d59aac1a0ed4644a508bd",
"size": "19595",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/web/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "154635"
},
{
"name": "HTML",
"bytes": "203411"
},
{
"name": "JavaScript",
"bytes": "363845"
},
{
"name": "Makefile",
"bytes": "2718"
},
{
"name": "Python",
"bytes": "6299848"
}
],
"symlink_target": ""
}
|
from sys import (oar, baz)
oar
|
{
"content_hash": "1c0161098b47d4ded43549c55d5e0057",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 10.333333333333334,
"alnum_prop": 0.7096774193548387,
"repo_name": "int19h/PTVS",
"id": "77b4ee7927465395fe0cfee677a6b0126789b2d0",
"size": "31",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Python/Tests/TestData/RemoveImport/FromImportParens1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "7975"
},
{
"name": "C",
"bytes": "21444"
},
{
"name": "C#",
"bytes": "11297254"
},
{
"name": "C++",
"bytes": "175131"
},
{
"name": "CSS",
"bytes": "4109"
},
{
"name": "HTML",
"bytes": "213660"
},
{
"name": "JavaScript",
"bytes": "44401"
},
{
"name": "PowerShell",
"bytes": "18157"
},
{
"name": "Pug",
"bytes": "2807"
},
{
"name": "Python",
"bytes": "620501"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "3663"
},
{
"name": "Tcl",
"bytes": "24968"
},
{
"name": "Vim Snippet",
"bytes": "17303"
}
],
"symlink_target": ""
}
|
"""
Title: Text generation with a miniature GPT
Author: [Apoorv Nandan](https://twitter.com/NandanApoorv)
Date created: 2020/05/29
Last modified: 2020/05/29
Description: Implement a miniature version of GPT and train it to generate text.
"""
"""
## Introduction
This example demonstrates how to implement an autoregressive language model
using a miniature version of the GPT model.
The model consists of a single Transformer block with causal masking
in its attention layer.
We use the text from the IMDB sentiment classification dataset for training
and generate new movie reviews for a given prompt.
When using this script with your own dataset, make sure it has at least
1 million words.
This example should be run with `tf-nightly>=2.3.0-dev20200531` or
with TensorFlow 2.3 or higher.
**References:**
- [GPT](https://www.semanticscholar.org/paper/Improving-Language-Understanding-by-Generative-Radford/cd18800a0fe0b668a1cc19f2ec95b5003d0a5035)
- [GPT-2](https://www.semanticscholar.org/paper/Language-Models-are-Unsupervised-Multitask-Learners-Radford-Wu/9405cc0d6169988371b2755e573cc28650d14dfe)
- [GPT-3](https://arxiv.org/abs/2005.14165)
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import TextVectorization
import numpy as np
import os
import re
import string
import random
"""
## Implement a Transformer block as a layer
"""
def causal_attention_mask(batch_size, n_dest, n_src, dtype):
"""
Mask the upper half of the dot product matrix in self attention.
This prevents flow of information from future tokens to current token.
1's in the lower triangle, counting from the lower right corner.
"""
i = tf.range(n_dest)[:, None]
j = tf.range(n_src)
m = i >= j - n_src + n_dest
mask = tf.cast(m, dtype)
mask = tf.reshape(mask, [1, n_dest, n_src])
mult = tf.concat(
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], 0
)
return tf.tile(mask, mult)
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(num_heads, embed_dim)
self.ffn = keras.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs):
input_shape = tf.shape(inputs)
batch_size = input_shape[0]
seq_len = input_shape[1]
causal_mask = causal_attention_mask(batch_size, seq_len, seq_len, tf.bool)
attention_output = self.att(inputs, inputs, attention_mask=causal_mask)
attention_output = self.dropout1(attention_output)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output)
return self.layernorm2(out1 + ffn_output)
"""
## Implement an embedding layer
Create two seperate embedding layers: one for tokens and one for token index
(positions).
"""
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
"""
## Implement the miniature GPT model
"""
vocab_size = 20000 # Only consider the top 20k words
maxlen = 80 # Max sequence size
embed_dim = 256 # Embedding size for each token
num_heads = 2 # Number of attention heads
feed_forward_dim = 256 # Hidden layer size in feed forward network inside transformer
def create_model():
inputs = layers.Input(shape=(maxlen,), dtype=tf.int32)
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, feed_forward_dim)
x = transformer_block(x)
outputs = layers.Dense(vocab_size)(x)
model = keras.Model(inputs=inputs, outputs=[outputs, x])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
"adam",
loss=[loss_fn, None],
) # No loss and optimization based on word embeddings from transformer block
return model
"""
## Prepare the data for word-level language modelling
Download the IMDB dataset and combine training and validation sets for a text
generation task.
"""
"""shell
curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
tar -xf aclImdb_v1.tar.gz
"""
batch_size = 128
# The dataset contains each review in a separate text file
# The text files are present in four different folders
# Create a list all files
filenames = []
directories = [
"aclImdb/train/pos",
"aclImdb/train/neg",
"aclImdb/test/pos",
"aclImdb/test/neg",
]
for dir in directories:
for f in os.listdir(dir):
filenames.append(os.path.join(dir, f))
print(f"{len(filenames)} files")
# Create a dataset from text files
random.shuffle(filenames)
text_ds = tf.data.TextLineDataset(filenames)
text_ds = text_ds.shuffle(buffer_size=256)
text_ds = text_ds.batch(batch_size)
def custom_standardization(input_string):
"""Remove html line-break tags and handle punctuation"""
lowercased = tf.strings.lower(input_string)
stripped_html = tf.strings.regex_replace(lowercased, "<br />", " ")
return tf.strings.regex_replace(stripped_html, f"([{string.punctuation}])", r" \1")
# Create a vectorization layer and adapt it to the text
vectorize_layer = TextVectorization(
standardize=custom_standardization,
max_tokens=vocab_size - 1,
output_mode="int",
output_sequence_length=maxlen + 1,
)
vectorize_layer.adapt(text_ds)
vocab = vectorize_layer.get_vocabulary() # To get words back from token indices
def prepare_lm_inputs_labels(text):
"""
Shift word sequences by 1 position so that the target for position (i) is
word at position (i+1). The model will use all words up till position (i)
to predict the next word.
"""
text = tf.expand_dims(text, -1)
tokenized_sentences = vectorize_layer(text)
x = tokenized_sentences[:, :-1]
y = tokenized_sentences[:, 1:]
return x, y
text_ds = text_ds.map(prepare_lm_inputs_labels, num_parallel_calls=tf.data.AUTOTUNE)
text_ds = text_ds.prefetch(tf.data.AUTOTUNE)
"""
## Implement a Keras callback for generating text
"""
class TextGenerator(keras.callbacks.Callback):
"""A callback to generate text from a trained model.
1. Feed some starting prompt to the model
2. Predict probabilities for the next token
3. Sample the next token and add it to the next input
Arguments:
max_tokens: Integer, the number of tokens to be generated after prompt.
start_tokens: List of integers, the token indices for the starting prompt.
index_to_word: List of strings, obtained from the TextVectorization layer.
top_k: Integer, sample from the `top_k` token predictions.
print_every: Integer, print after this many epochs.
"""
def __init__(
self, max_tokens, start_tokens, index_to_word, top_k=10, print_every=1
):
self.max_tokens = max_tokens
self.start_tokens = start_tokens
self.index_to_word = index_to_word
self.print_every = print_every
self.k = top_k
def sample_from(self, logits):
logits, indices = tf.math.top_k(logits, k=self.k, sorted=True)
indices = np.asarray(indices).astype("int32")
preds = keras.activations.softmax(tf.expand_dims(logits, 0))[0]
preds = np.asarray(preds).astype("float32")
return np.random.choice(indices, p=preds)
def detokenize(self, number):
return self.index_to_word[number]
def on_epoch_end(self, epoch, logs=None):
start_tokens = [_ for _ in self.start_tokens]
if (epoch + 1) % self.print_every != 0:
return
num_tokens_generated = 0
tokens_generated = []
while num_tokens_generated <= self.max_tokens:
pad_len = maxlen - len(start_tokens)
sample_index = len(start_tokens) - 1
if pad_len < 0:
x = start_tokens[:maxlen]
sample_index = maxlen - 1
elif pad_len > 0:
x = start_tokens + [0] * pad_len
else:
x = start_tokens
x = np.array([x])
y, _ = self.model.predict(x)
sample_token = self.sample_from(y[0][sample_index])
tokens_generated.append(sample_token)
start_tokens.append(sample_token)
num_tokens_generated = len(tokens_generated)
txt = " ".join(
[self.detokenize(_) for _ in self.start_tokens + tokens_generated]
)
print(f"generated text:\n{txt}\n")
# Tokenize starting prompt
word_to_index = {}
for index, word in enumerate(vocab):
word_to_index[word] = index
start_prompt = "this movie is"
start_tokens = [word_to_index.get(_, 1) for _ in start_prompt.split()]
num_tokens_generated = 40
text_gen_callback = TextGenerator(num_tokens_generated, start_tokens, vocab)
"""
## Train the model
Note: This code should preferably be run on GPU.
"""
model = create_model()
model.fit(text_ds, verbose=2, epochs=25, callbacks=[text_gen_callback])
|
{
"content_hash": "9f5bc7fe20ff39eb3510a3a05a750553",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 152,
"avg_line_length": 33.130434782608695,
"alnum_prop": 0.6723198061780739,
"repo_name": "keras-team/keras-io",
"id": "7d68291637252304d3801e0861cd44b6db2f3cbd",
"size": "9906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/generative/text_generation_with_miniature_gpt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15929"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "21968"
},
{
"name": "Jupyter Notebook",
"bytes": "718942"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "680865"
}
],
"symlink_target": ""
}
|
import sys
import re
all_features = ['OSPL']
ignore_regex = ['<\?xml .*\?>',
'<splice_meta_config version=.*>',
'<!--xmlns:xsi=.*-->',
'<serviceMapping>',
'<element name=.* command=.*>',
'</serviceMapping>',
'</splice_meta_config>' ]
class ReMatcher(object):
def __init__(self, patterns):
self._patterns = []
for p in patterns:
self._patterns.append(re.compile(p, re.IGNORECASE))
def match(self, string):
for p in self._patterns:
if bool(p.match(string)):
return True
return False
def extract (in_file, out_file, version, service, features):
matcher = ReMatcher(ignore_regex)
state = []
in_scope = True
cmd_line = False
fr = open (in_file, 'r')
fw = open (out_file, 'w')
fw.write ("<dummyElement>\n")
for line in fr:
ignore_line = matcher.match(line.strip());
if not ignore_line:
line = line.replace ('VERSION', version)
line = line.replace ('LC_SNAME', service.lower ())
line = line.replace ('SNAME', service)
fw.write (line),
fw.write ("</dummyElement>")
fr.close ()
fw.close ()
def usage ():
sys.stdout.write ('usage: extract.py meta_config_file (osplconf)')
for f in all_features: sys.stdout.write (' [' + f + ']')
sys.stdout.write ('\n')
sys.exit (1)
def main (argv):
features = ['OSPL']
version = 'COMMERCIAL'
service = 'DDSI2E'
try:
meta = argv[0]
config = argv[1]
except: usage ()
extract (meta, config + '.xml', version, service, features)
if __name__ == "__main__":
main (sys.argv[1:])
|
{
"content_hash": "5faad54407d017d048bb6d7335768601",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 68,
"avg_line_length": 26.203125,
"alnum_prop": 0.5491949910554562,
"repo_name": "osrf/opensplice",
"id": "03f6a696c6df9c4417b398da34b0ff159fa5e1bd",
"size": "1696",
"binary": false,
"copies": "2",
"ref": "refs/heads/osrf-6.9.0",
"path": "build/docs/extract.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "16400"
},
{
"name": "Batchfile",
"bytes": "192174"
},
{
"name": "C",
"bytes": "19618578"
},
{
"name": "C#",
"bytes": "2428591"
},
{
"name": "C++",
"bytes": "8036199"
},
{
"name": "CMake",
"bytes": "35186"
},
{
"name": "CSS",
"bytes": "41427"
},
{
"name": "HTML",
"bytes": "457045"
},
{
"name": "Java",
"bytes": "5184488"
},
{
"name": "JavaScript",
"bytes": "540355"
},
{
"name": "LLVM",
"bytes": "13059"
},
{
"name": "Lex",
"bytes": "51476"
},
{
"name": "Makefile",
"bytes": "513684"
},
{
"name": "Objective-C",
"bytes": "38424"
},
{
"name": "Perl",
"bytes": "164028"
},
{
"name": "Python",
"bytes": "915683"
},
{
"name": "Shell",
"bytes": "363583"
},
{
"name": "TeX",
"bytes": "8134"
},
{
"name": "Visual Basic",
"bytes": "290"
},
{
"name": "Yacc",
"bytes": "202848"
}
],
"symlink_target": ""
}
|
"""Extract what lint expects for cmap from our data."""
import argparse
import sys
from nototools import lint_config
from nototools import noto_data
from nototools import opentype_data
from nototools import unicode_data
from nototools import cmap_data
_PHASE_TWO_SCRIPTS = """
Arab, Aran, Armi, Armn, Avst, Bali, Bamu, Batk, Beng, Brah, Bugi, Buhd, Cans,
Cari, Cham, Cher, Copt, Cprt, Deva, Dsrt, Egyp, Ethi, Geor, Glag, Goth, Gujr,
Guru, Hano, Hans, Hant, Hebr, Ital, Java, Jpan, Kali, Khar, Khmr, Knda, Kore,
Kthi, LGC, Lana, Laoo, Lepc, Limb, Linb, Lisu, Lyci, Lydi, Mand, Mlym, Mong,
Mtei, Mymr, Nkoo, Ogam, Olck, Orkh, Orya, Osma, Phag, Phli, Phnx, Prti, Qaae,
Rjng, Runr, Samr, Sarb, Saur, Shaw, Sinh, Sund, Sylo, Syrc, Tagb, Tale, Talu,
Taml, Tavt, Telu, Tfng, Tglg, Thaa, Thai, Tibt, Ugar, Vaii, Xpeo, Xsux, Yiii,
Zsym
"""
def _code_range_to_set(code_range):
"""Converts a code range output by _parse_code_ranges to a set."""
characters = set()
for first, last, _ in code_range:
characters.update(range(first, last+1))
return characters
def _symbol_set():
"""Returns set of characters that should be supported in Noto Symbols."""
ranges = unicode_data._parse_code_ranges(noto_data.SYMBOL_RANGES_TXT)
return _code_range_to_set(ranges)
def _math_set():
"""Returns set of characters that should be supported in Noto Math."""
ranges = unicode_data._parse_code_ranges(noto_data.MATH_RANGES_TXT)
return _code_range_to_set(ranges)
def _cjk_set():
"""Returns set of characters that will be provided in CJK fonts."""
ranges = unicode_data._parse_code_ranges(noto_data.CJK_RANGES_TXT)
return _code_range_to_set(ranges)
def _emoji_pua_set():
"""Returns the legacy PUA characters required for Android emoji."""
return lint_config.parse_int_ranges('FE4E5-FE4EE FE82C FE82E-FE837')
def _get_script_required(
script, unicode_version, noto_phase, unicode_only=False, verbose=False):
needed_chars = set()
if script == 'Zsye': # Emoji
# TODO: Check emoji coverage
if not unicode_only:
needed_chars = _emoji_pua_set() # legacy PUA for android emoji
elif script == 'Zmth': # Math
if not unicode_only:
needed_chars = _math_set()
elif script == 'Zsym': # Symbols
if not unicode_only:
needed_chars = _symbol_set()
elif script == 'LGC':
needed_chars = (
unicode_data.defined_characters(scr='Latn', version=unicode_version)
| unicode_data.defined_characters(scr='Grek', version=unicode_version)
| unicode_data.defined_characters(scr='Cyrl', version=unicode_version))
if not unicode_only:
needed_chars -= _symbol_set()
needed_chars -= _cjk_set()
elif script == "Aran":
if unicode_only:
needed_chars = unicode_data.defined_characters(
scr='Arab', version=unicode_version)
else:
needed_chars = noto_data.urdu_set()
elif script in ['Hans', 'Hant', 'Jpan', 'Kore']:
needed_chars = _cjk_set()
else:
needed_chars = unicode_data.defined_characters(
scr=script, version=unicode_version)
if not unicode_only:
needed_chars -= _symbol_set()
if not unicode_only:
needed_chars |= noto_data.get_extra_characters_needed(script, noto_phase)
try:
needed_chars |= set(opentype_data.SPECIAL_CHARACTERS_NEEDED[script])
except KeyError:
pass
needed_chars -= noto_data.get_characters_not_needed(script, noto_phase)
if not unicode_only:
needed_chars |= set([0, 0xd, 0x20])
if verbose:
print >> sys.stderr, script,
needed_chars &= unicode_data.defined_characters(version=unicode_version)
return needed_chars
def _required_unicode_version(noto_font, noto_phase):
if noto_font.family != 'Noto': # e.g. Arimo, Cousine, Tinos
return 8.0
if noto_phase == 2:
return 6.0
return 9.0
def _compute_required_chars(noto_font, noto_phase):
unicode_version = _required_unicode_version(noto_font, noto_phase)
needed_chars = _get_script_required(
noto_font.script, unicode_version, noto_phase)
return frozenset(needed_chars)
_REQUIRED_CACHE = {}
def get_required_chars(noto_font, phase):
# Required characters must only depend on family, script, variant, and phase
key = '_'.join(filter(None, [
noto_font.family, noto_font.script, noto_font.variant, str(phase)]))
result = _REQUIRED_CACHE.get(key, None)
if not result:
result = _compute_required_chars(noto_font, phase)
_REQUIRED_CACHE[key] = result
return result
def _check_scripts(scripts):
# TODO(dougfelt): something realer
bad_scripts = []
for script in scripts:
if script[0] < 'A' or script[0] > 'Z':
bad_scripts.append(script)
if bad_scripts:
print 'bad scripts: %s' % ', '.join(bad_scripts)
raise ValueError('bad scripts')
return set(scripts)
def get_cmap_data(scripts, unicode_version, noto_phase, unicode_only, verbose):
metadata = cmap_data.create_metadata('lint_cmap_reqs', [
('unicode_version', unicode_version),
('phase', noto_phase),
('unicode_only', unicode_only)])
tabledata = cmap_data.create_table_from_map({
script : _get_script_required(
script, unicode_version, noto_phase, unicode_only, verbose)
for script in sorted(scripts)
})
return cmap_data.CmapData(metadata, tabledata)
def main():
DEFAULT_UNICODE_VERSION = 9.0
parser = argparse.ArgumentParser()
parser.add_argument(
'--scripts', help='list of pseudo-script codes, empty for all '
'phase 2 scripts', metavar='code', nargs='*')
parser.add_argument(
'--unicode_version', help='version of unicode to use (default %s)' %
DEFAULT_UNICODE_VERSION, metavar='version', type=float,
default=DEFAULT_UNICODE_VERSION)
parser.add_argument(
'--unicode_only', help='only use unicode data, not noto-specific data',
action='store_true')
parser.add_argument(
'-p', '--phase', help='noto phase (default 3)',
metavar='phase', type=int, default=3)
parser.add_argument(
'--outfile', help='write to output file, otherwise to stdout',
metavar='fname', nargs='?', const='-default-')
parser.add_argument(
'--verbose', help='log to stderr as each script is complete',
action='store_true')
args = parser.parse_args()
if not args.scripts:
scripts = set(s.strip() for s in _PHASE_TWO_SCRIPTS.split(','))
else:
scripts = _check_scripts(args.scripts)
cmapdata = get_cmap_data(
scripts, args.unicode_version, args.phase, args.unicode_only,
args.verbose)
if args.outfile:
if args.outfile == '-default-':
args.outfile = 'lint_cmap_%s.xml' % args.unicode_version
print >> sys.stderr, 'writing %s' % args.outfile
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
else:
print cmap_data.write_cmap_data(cmapdata, pretty=True)
if __name__ == "__main__":
main()
|
{
"content_hash": "8866267ba97be60de2ede955a88f861c",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 33.80392156862745,
"alnum_prop": 0.6690835266821346,
"repo_name": "dougfelt/nototools",
"id": "93b3b2b0ad66c34b7e852abfdfeaf233eaf97b73",
"size": "7517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nototools/lint_cmap_reqs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "665"
},
{
"name": "HTML",
"bytes": "620"
},
{
"name": "Makefile",
"bytes": "3666"
},
{
"name": "Python",
"bytes": "1158086"
},
{
"name": "Shell",
"bytes": "14586"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from personas.views import Personas,EditarContrato,CrearContrato,EditarPersona, ReportePersonasExcel, Bienvenida, DetalleContrato,\
ModificarContrato,ReporteEDPExcel,ReporteODCExcel,ficha,DetalleEdp,ModificarEdp,BorrarEdp,ModificarOdc,DetalleOdc,BorrarOdc,ReporteFiniquito,\
crear_docODC,crear_docCtto,crear_docEDP, Crear_Personalproy, Crear_Personalctta,ReporteITEMExcel,ReporteCommitmentItem,ReporteCttoWSheet,Crear_AdminProy,\
ReportePlanSoleSource,ReporteListaCttos
from . import models
from . import views
urlpatterns = [
#url(r'^',include('seguridad.urls',namespace='seguridad')),
url(r'^$',views.Bienvenida.as_view(), name="bienvenida"),
url(r'^crear_persona/$',views.CrearContrato.as_view(), name="crear_contrato"),
url(r'^crear_persona/busqueda_ajax/$',views.BusquedaAjaxView.as_view(), name="Busqueda_ajax"),
url(r'^crear_contratista/$',views.CrearContratista.as_view(), name="crear_contratista"),
url(r'^crear_docCtto/(?P<id_ctto>\d+)/$',views.crear_docCtto.as_view(), name="crear_docCtto"),
url(r'^crear_personalproy/$',views.Crear_Personalproy.as_view(), name="crear_personalproy"),
url(r'^crear_personalctta/$',views.Crear_Personalctta.as_view(), name="crear_personalctta"),
url(r'^Crear_AdminProy/$',views.Crear_AdminProy.as_view(), name="Crear_AdminProy"),
url(r'^personas/$',Personas.as_view(), name="personas"),
url(r'^editar_contrato/(?P<id_ctto>[^/]+)$',views.EditarContrato, name="EditarContrato"),
url(r'^ficha/$',ficha.as_view(), name="ficha"),
#url(r'^detail/(?P<dni>[-\w]+)/$',views.EditarPersona.as_view(), name="editar_persona"),
url(r'^detail/(?P<id_Persona>\d+)/$',views.EditarPersona.as_view(), name="editar_persona"),
url(r'^reporte_personas_excel/$',ReportePersonasExcel.as_view(), name="reporte_personas_excel"),
url(r'^reporte_edp_excel/$',ReporteEDPExcel.as_view(), name="reporte_edp_excel"),
url(r'^reporte_odc_excel/$',ReporteODCExcel.as_view(), name="reporte_odc_excel"),
url(r'^reporte_item_excel/$',ReporteITEMExcel.as_view(), name="reporte_item_excel"),
url(r'^reporte_commitment_item/$',ReporteCommitmentItem.as_view(), name="reporte_commitment_item"),
url(r'^reporte_plan_solesource/$',ReportePlanSoleSource.as_view(), name="reporte_plan_solesource"),
url(r'^detalle_persona/(?P<pk>\d+)/$', DetalleContrato.as_view(), name="detalle_contrato"),
url(r'^modificar_persona/(?P<pk>\d+)/$',ModificarContrato.as_view(), name="modificar_contrato"),
url(r'^WS_contrato/(?P<id_ctto>\d+)/$',ReporteCttoWSheet.as_view(), name="WS_contrato"),
url(r'^Lista_contrato/$',ReporteListaCttos.as_view(), name="Lista_contrato"),
url(r'^crear_edp/(?P<id_ctto>\d+)/$',views.CrearEdp.as_view(), name="crear_edp"),
url(r'^detalle_edp/(?P<pk>\d+)/$', DetalleEdp.as_view(), name="detalle_edp"),
url(r'^modificar_edp/(?P<pk>\d+)/$',ModificarEdp.as_view(), name="modificar_edp"),
url(r'^borrar_edp/(?P<pk>\d+)/$',BorrarEdp.as_view(), name="borrar_edp"),
url(r'^crear_docEDP/(?P<pk>\d+)/$',views.crear_docEDP.as_view(), name="crear_docEDP"),
url(r'^crear_odc/(?P<id_ctto>\d+)/$',views.CrearOdc.as_view(), name="crear_odc"),
url(r'^detalle_odc/(?P<pk>\d+)/$', DetalleOdc.as_view(), name="detalle_odc"),
url(r'^modificar_odc/(?P<pk>\d+)/$',ModificarOdc.as_view(), name="modificar_odc"),
url(r'^borrar_odc/(?P<pk>\d+)/$',BorrarOdc.as_view(), name="borrar_odc"),
url(r'^crear_docODC/(?P<id_odc>\d+)/$',views.crear_docODC.as_view(), name="crear_docODC"),
url(r'^reporte_finiquito/(?P<id_ctto>\d+)/$',ReporteFiniquito.as_view(), name="reporte_finiquito"),
url(r'^polls/$', views.upload, name='uplink'),
url(r'^polls/import/', views.import_data, name="import"),
url(r'^polls/import_EDP/', views.import_EDP_ODC, name="import_EDP_ODC"),
url(r'^polls/export/(.*)', views.export_data, name="export"),
url(r'^polls/import_sheet/', views.import_sheet, name="import_sheet"),
url(r'^ctto/export/', views.export_r5, name="export_r5"),
url(r'^prueba/',views.prueba, name="prueba"),
]
|
{
"content_hash": "804f667a9758fb6b331dd90801031c29",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 154,
"avg_line_length": 54.733333333333334,
"alnum_prop": 0.6803897685749086,
"repo_name": "Ykharo/tutorial_P3_4",
"id": "888e3bc8a6269467ce87a54c5bf8c65ff8bcddae",
"size": "4105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "personas/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "994832"
},
{
"name": "HTML",
"bytes": "2256197"
},
{
"name": "JavaScript",
"bytes": "464559"
},
{
"name": "Python",
"bytes": "231736"
}
],
"symlink_target": ""
}
|
class DitError(Exception):
pass
class ArgumentError(DitError):
pass
class NoTaskSpecifiedError(DitError):
pass
class SubprocessError(Exception):
pass
def maybe_raise_unrecognized_argument(argv):
if len(argv) > 0:
raise ArgumentError("Unrecognized argument: %s" % argv[0])
|
{
"content_hash": "548b7fb035e97d638496fc65b8b0a87e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 16.210526315789473,
"alnum_prop": 0.7045454545454546,
"repo_name": "filipelbc/dit",
"id": "b16b0b81ba1436a058616e8384d04329b3dd0d34",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dit/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "Python",
"bytes": "73187"
},
{
"name": "Shell",
"bytes": "9862"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
{
"content_hash": "5094d2765923297e81b3becd84627ec8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 25.017543859649123,
"alnum_prop": 0.6129032258064516,
"repo_name": "KristFoundation/Programs",
"id": "e66a7730aeb5de966b25d9f6401188d0db7c305b",
"size": "1700",
"binary": false,
"copies": "1949",
"ref": "refs/heads/master",
"path": "luaide/tools/perf/scripts/python/syscall-counts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10201036"
},
{
"name": "Awk",
"bytes": "30879"
},
{
"name": "C",
"bytes": "539626448"
},
{
"name": "C++",
"bytes": "3413466"
},
{
"name": "Clojure",
"bytes": "1570"
},
{
"name": "Cucumber",
"bytes": "4809"
},
{
"name": "Groff",
"bytes": "46837"
},
{
"name": "Lex",
"bytes": "55541"
},
{
"name": "Lua",
"bytes": "59745"
},
{
"name": "Makefile",
"bytes": "1601043"
},
{
"name": "Objective-C",
"bytes": "521706"
},
{
"name": "Perl",
"bytes": "730609"
},
{
"name": "Perl6",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "296036"
},
{
"name": "Shell",
"bytes": "357961"
},
{
"name": "SourcePawn",
"bytes": "4687"
},
{
"name": "UnrealScript",
"bytes": "12797"
},
{
"name": "XS",
"bytes": "1239"
},
{
"name": "Yacc",
"bytes": "115572"
}
],
"symlink_target": ""
}
|
"""Hook for Google Drive service"""
from typing import IO, Any, Optional, Sequence, Union
from googleapiclient.discovery import Resource, build
from googleapiclient.http import HttpRequest, MediaFileUpload
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class GoogleDriveHook(GoogleBaseHook):
"""
Hook for the Google Drive APIs.
:param api_version: API version used (for example v3).
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self.api_version = api_version
def get_conn(self) -> Any:
"""
Retrieves the connection to Google Drive.
:return: Google Drive services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("drive", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def _ensure_folders_exists(self, path: str) -> str:
service = self.get_conn()
current_parent = "root"
folders = path.split("/")
depth = 0
# First tries to enter directories
for current_folder in folders:
self.log.debug("Looking for %s directory with %s parent", current_folder, current_parent)
conditions = [
"mimeType = 'application/vnd.google-apps.folder'",
f"name='{current_folder}'",
f"'{current_parent}' in parents",
]
result = (
service.files()
.list(q=" and ".join(conditions), spaces="drive", fields="files(id, name)")
.execute(num_retries=self.num_retries)
)
files = result.get("files", [])
if not files:
self.log.info("Not found %s directory", current_folder)
# If the directory does not exist, break loops
break
depth += 1
current_parent = files[0].get("id")
# Check if there are directories to process
if depth != len(folders):
# Create missing directories
for current_folder in folders[depth:]:
file_metadata = {
"name": current_folder,
"mimeType": "application/vnd.google-apps.folder",
"parents": [current_parent],
}
file = (
service.files()
.create(body=file_metadata, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("Created %s directory", current_folder)
current_parent = file.get("id")
# Return the ID of the last directory
return current_parent
def get_media_request(self, file_id: str) -> HttpRequest:
"""
Returns a get_media http request to a Google Drive object.
:param file_id: The Google Drive file id
:return: request
:rtype: HttpRequest
"""
service = self.get_conn()
request = service.files().get_media(fileId=file_id)
return request
def exists(self, folder_id: str, file_name: str, drive_id: Optional[str] = None):
"""
Checks to see if a file exists within a Google Drive folder
:param folder_id: The id of the Google Drive folder in which the file resides
:param file_name: The name of a file in Google Drive
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:return: True if the file exists, False otherwise
:rtype: bool
"""
return bool(self.get_file_id(folder_id=folder_id, file_name=file_name, drive_id=drive_id))
def get_file_id(self, folder_id: str, file_name: str, drive_id: Optional[str] = None):
"""
Returns the file id of a Google Drive file
:param folder_id: The id of the Google Drive folder in which the file resides
:param file_name: The name of a file in Google Drive
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:return: Google Drive file id if the file exists, otherwise None
:rtype: str if file exists else None
"""
query = f"name = '{file_name}'"
if folder_id:
query += f" and parents in '{folder_id}'"
service = self.get_conn()
if drive_id:
files = (
service.files()
.list(
q=query,
spaces="drive",
fields="files(id, mimeType)",
orderBy="modifiedTime desc",
driveId=drive_id,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
corpora="drive",
)
.execute(num_retries=self.num_retries)
)
else:
files = (
service.files()
.list(q=query, spaces="drive", fields="files(id, mimeType)", orderBy="modifiedTime desc")
.execute(num_retries=self.num_retries)
)
file_metadata = {}
if files['files']:
file_metadata = {"id": files['files'][0]['id'], "mime_type": files['files'][0]['mimeType']}
return file_metadata
def upload_file(
self,
local_location: str,
remote_location: str,
chunk_size: int = 100 * 1024 * 1024,
resumable: bool = False,
) -> str:
"""
Uploads a file that is available locally to a Google Drive service.
:param local_location: The path where the file is available.
:param remote_location: The path where the file will be send
:param chunk_size: File will be uploaded in chunks of this many bytes. Only
used if resumable=True. Pass in a value of -1 if the file is to be
uploaded as a single chunk. Note that Google App Engine has a 5MB limit
on request size, so you should never set your chunk size larger than 5MB,
or to -1.
:param resumable: True if this is a resumable upload. False means upload
in a single request.
:return: File ID
:rtype: str
"""
service = self.get_conn()
directory_path, _, file_name = remote_location.rpartition("/")
if directory_path:
parent = self._ensure_folders_exists(directory_path)
else:
parent = "root"
file_metadata = {"name": file_name, "parents": [parent]}
media = MediaFileUpload(local_location, chunksize=chunk_size, resumable=resumable)
file = (
service.files()
.create(body=file_metadata, media_body=media, fields="id", supportsAllDrives=True)
.execute(num_retries=self.num_retries)
)
self.log.info("File %s uploaded to gdrive://%s.", local_location, remote_location)
return file.get("id")
def download_file(self, file_id: str, file_handle: IO, chunk_size: int = 100 * 1024 * 1024):
"""
Download a file from Google Drive.
:param file_id: the id of the file
:param file_handle: file handle used to write the content to
:param chunk_size: File will be downloaded in chunks of this many bytes.
"""
request = self.get_media_request(file_id=file_id)
self.download_content_from_request(file_handle=file_handle, request=request, chunk_size=chunk_size)
|
{
"content_hash": "2a25839b6382b54eb542675407422e61",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 107,
"avg_line_length": 41.41121495327103,
"alnum_prop": 0.5854208982171067,
"repo_name": "lyft/incubator-airflow",
"id": "94390503aae67b1849dcd69c949e50ec9ca30646",
"size": "9649",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "airflow/providers/google/suite/hooks/drive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
}
|
import time
import praw
import re
import urllib2
import signal, sys
# This string is sent by praw to reddit in accordance to the API rules
user_agent = ("REDDIT Bot v1.4 by /u/USERNAME")
r = praw.Reddit(user_agent=user_agent)
# Fill in the bot's username and password here
username = "USERNAME"
password = "PASSWORD"
r.login(username, password)
# Fill in the subreddit(s) here. Multisubs are done with + (e.g. MagicTCG+EDH)
subreddit = r.get_subreddit('INSERT_SUBREDDITS')
# This loads the already parsed comments from a backup text file
already_done = []
with open('magictcg_done.txt', 'r') as f:
for i in f:
already_done.append(i.replace("\n", ""))
# Function that does all the magic
def bot_comments():
ids = []
sub_comments = subreddit.get_comments()
for comment in sub_comments:
ids.append(comment.id)
# Checks if the post is not actually the bot itself (since the details say [[CARDNAME]]
if comment.id not in already_done and not str(comment.author) == username:
# Regex Magic that finds the text encaptured with [[ ]]
cards = re.findall("\[\[([^\[\]]*)\]\]", comment.body)
reply = ""
# Because a comment can only have a max length, limit to only the first 30 requests
if len(cards) > 30: cards = cards[0:30]
# Set removes any duplicates
for i in set(cards):
print i
i = i.split('/')[0]
# Converts obscure characters like AE to a URL-valid text
j = urllib2.quote(i.encode('utf-8'))
# Checks if a card exists
card_id = card_check(i, j)
if card_id:
# Builds the post
reply += "[%s](http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=%s&type=card&.jpg)" % (i, card_id)
reply += " - "
reply += "[Gatherer](http://gatherer.wizards.com/Pages/Card/Details.aspx?name=%s)" % j
reply += ", [MagicCards](http://magiccards.info/query?q=!%s)" % j
reply += "\n\n"
# If a post was built before, complete it and post it to reddit
if reply:
reply += "^^Questions? ^^Message ^^/u/CREATOR ^^- ^^Call ^^cards ^^with ^^[[CARDNAME]] ^^- ^^Format: ^^Image ^^- ^^URL ^^to ^^Gatherer"
# Possible advice text to advice using "AutocardAnywhere" instead
#reply += "\n\n^^^Try ^^^the ^^^browser ^^^plugin ^^^'AutocardAnywhere' ^^^instead ^^^of ^^^the ^^^bot: ^^^Personal ^^^card-links!"
# Posting might fail (too long, ban, reddit down etc), so cancel the post and print the error
try:
comment.reply(reply)
except Exception,e: print str(e)
# Add the post to the list of parsed comments
already_done.append(comment.id)
# Finally, return the list of parsed comments (seperate from already_done)
return ids
# This function is nearly the same as comment parsing, except it takes submissions (should be combined later)
def bot_submissions():
sub_ids = []
sub_subs = subreddit.get_new(limit=5)
for submission in sub_subs:
sub_ids.append(submission.id)
if submission.id not in already_done:
cards = re.findall("\[\[([^\[\]]*)\]\]", submission.selftext)
reply = ""
if len(cards) > 30: cards = cards[0:30]
for i in set(cards):
print i
i = i.split('/')[0]
j = urllib2.quote(i.encode('utf-8'))
card_id = card_check(i, j)
if card_id:
reply += "[%s](http://gatherer.wizards.com/Handlers/Image.ashx?multiverseid=%s&type=card&.jpg)" % (i, card_id)
reply += " - "
reply += "[Gatherer](http://gatherer.wizards.com/Pages/Card/Details.aspx?name=%s)" % j
reply += ", [MagicCards](http://magiccards.info/query?q=!%s)" % j
reply += "\n\n"
if reply:
reply += "^^Questions? ^^Message ^^/u/xslicer ^^- ^^Call ^^cards ^^with ^^[[CARDNAME]] ^^- ^^Format: ^^Image ^^- ^^URL ^^to ^^Gatherer"
try:
submission.add_comment(reply)
except Exception,e: print str(e)
already_done.append(submission.id)
return sub_ids
# Function that checks if the requested card exist and returns the card id (card id is unneccesary
# for linking since the gatherer will also link the image with it's name, but this is still valid
# to check if the card exists).
def card_check(card, enc_card):
try:
# Opens the Gatherer page and looks for the card ID with Regex - Replaces & because it breaks URLs
page = urllib2.urlopen("http://gatherer.wizards.com/Pages/Card/Details.aspx?name=%s" % enc_card.replace("&", "%26")).read()
return re.search("multiverseid=([0-9]*)", page).group(1)
except AttributeError:
print "ERROR"
return False
# Function that backs up current parsed comments
def write_done():
with open("magictcg_done.txt", "w") as f:
for i in already_done:
f.write(str(i) + '\n')
# Function that is called when ctrl-c is pressed. It backups the current parsed comments into a backup file and then quits.
def signal_handler(signal, frame):
write_done()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Infinite loop that calls the function. The function outputs the post-ID's of all parsed comments.
# The ID's of parsed comments is compared with the already parsed comments so the list stays clean
# and memory is not increased. It sleeps for 15 seconds to wait for new posts.
while True:
ids = bot_comments()
time.sleep(5)
sub_ids = bot_submissions()
new_done = []
# Checks for both comments and submissions
for i in already_done:
if i in ids:
new_done.append(i)
if i in sub_ids:
new_done.append(i)
already_done = new_done[:]
# Back up the parsed comments to a file
write_done()
time.sleep(10)
|
{
"content_hash": "f7e78366c1d58230f8fd91caede39291",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 151,
"avg_line_length": 45.29197080291971,
"alnum_prop": 0.5866236905721193,
"repo_name": "XSlicer/RedditMTGBot",
"id": "130140aa828ef217c3422adc901c837234023e27",
"size": "6295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redditbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6295"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
}
|
"""
Presence analyzer.
"""
from .main import APP
from . import views
|
{
"content_hash": "ffd4dc94e2bfe3862c6bbfbae29e55f0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 21,
"avg_line_length": 11.666666666666666,
"alnum_prop": 0.6857142857142857,
"repo_name": "stxnext-kindergarten/presence-analyzer-gderdak",
"id": "c60a4dfa84f49b326aad699948a14aa61086e4fa",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/presence_analyzer/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "990"
},
{
"name": "HTML",
"bytes": "9776"
},
{
"name": "Python",
"bytes": "31643"
}
],
"symlink_target": ""
}
|
import os
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from WhatManager2 import whatimg
import bibliotik.settings
import WhatManager2.settings
from books import settings, utils, what_upload
from books.models import BookUploadForm, BookUpload
@login_required
def uploads(request):
books = BookUpload.objects.defer(*BookUpload.binary_fields).order_by('-added').all()
data = {
'books': books
}
return render(request, 'books/uploads.html', data)
@login_required
def new_upload(request):
errors = []
if request.method == 'POST':
if 'ebook' in request.FILES and 'opf' in request.FILES and 'cover' in request.FILES:
if os.path.splitext(request.FILES['ebook'].name)[1] not in ['.azw3', '.epub', '.pdf']:
errors.append('What is this ebook extension?')
elif os.path.splitext(request.FILES['opf'].name)[1] != '.opf':
errors.append('What is this OPF?')
elif os.path.splitext(request.FILES['cover'].name)[1] not in ['.jpg', '.jpeg']:
errors.append('What is this cover?')
else:
book_upload = BookUpload()
book_upload.book_data = request.FILES['ebook']
book_upload.opf_data = request.FILES['opf'].read().decode('utf-8')
book_upload.cover_data = request.FILES['cover'].read()
book_upload.format = os.path.splitext(request.FILES['ebook'].name)[1][1:].upper()
book_upload.save()
return redirect('books.views.edit_upload', book_upload.id)
else:
errors.append('Please upload the ebook, the OPF and the cover')
data = {
'errors': errors
}
return render(request, 'books/new_upload.html', data)
@login_required
def edit_upload(request, upload_id):
book_upload = BookUpload.objects.defer(*BookUpload.binary_fields).extra(
select={
'has_bibliotik_torrent': '`bibliotik_torrent_file` IS NOT NULL '
'AND OCTET_LENGTH(`bibliotik_torrent_file`) != 0',
'has_what_torrent': '`what_torrent_file` IS NOT NULL '
'AND OCTET_LENGTH(`what_torrent_file`) != 0',
}).get(id=upload_id)
if book_upload.title is None:
book_upload.populate_from_opf()
book_upload.save()
if request.method == 'POST':
form = BookUploadForm(request.POST, instance=book_upload)
if form.is_valid():
form.save()
else:
form = BookUploadForm(instance=book_upload)
try:
book_upload.full_clean()
book_upload.is_valid = True
except ValidationError:
book_upload.is_valid = False
data = {
'book': book_upload,
'form': form
}
return render(request, 'books/edit_upload.html', data)
@login_required
def upload_cover(request, upload_id):
book_upload = BookUpload.objects.only('cover_data').get(id=upload_id)
return HttpResponse(book_upload.cover_data, content_type='image/jpeg')
@login_required
def upload_cover_upload(request, upload_id):
book_upload = BookUpload.objects.only('cover_url', 'cover_data').get(id=upload_id)
if not book_upload.cover_url:
whatimg_url = whatimg.upload_image_from_memory(book_upload.cover_data)
book_upload.cover_url = whatimg_url
book_upload.save()
return redirect('books.views.edit_upload', upload_id)
@login_required
def upload_generate_torrents(request, upload_id):
book_upload = BookUpload.objects.get(id=upload_id)
target_temp_filename = book_upload.book_data.storage.path(book_upload.book_data)
torrent_temp_filename = os.path.join(settings.UPLOAD_TEMP_DIR,
os.path.splitext(book_upload.target_filename)[
0] + '.torrent')
if book_upload.bibliotik_torrent_file is None:
utils.call_mktorrent(target_temp_filename,
torrent_temp_filename,
bibliotik.settings.BIBLIOTIK_ANNOUNCE,
book_upload.target_filename)
with open(torrent_temp_filename, 'rb') as file:
book_upload.bibliotik_torrent_file = file.read()
book_upload.save()
os.remove(torrent_temp_filename)
if book_upload.what_torrent_file is None:
utils.call_mktorrent(target_temp_filename,
torrent_temp_filename,
WhatManager2.settings.WHAT_ANNOUNCE,
book_upload.target_filename)
with open(torrent_temp_filename, 'rb') as file:
book_upload.what_torrent_file = file.read()
book_upload.save()
os.remove(torrent_temp_filename)
return redirect('books.views.edit_upload', upload_id)
@login_required
def upload_to_what(request, upload_id):
book_upload = BookUpload.objects.get(id=upload_id)
what_upload.upload_to_what(request, book_upload)
return redirect(request.GET['return'])
@login_required
def skip_what(request, upload_id):
book_upload = BookUpload.objects.defer(*BookUpload.binary_fields).get(id=upload_id)
book_upload.what_torrent_id = 0
book_upload.save()
return redirect('books.views.edit_upload', upload_id)
@login_required
def skip_bibliotik(request, upload_id):
book_upload = BookUpload.objects.defer(*BookUpload.binary_fields).get(id=upload_id)
book_upload.bibliotik_torrent_id = 0
book_upload.save()
return redirect('books.views.edit_upload', upload_id)
|
{
"content_hash": "977ebd36cc94e3de5593d6a04255c564",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 98,
"avg_line_length": 37.73026315789474,
"alnum_prop": 0.6284219703574542,
"repo_name": "davols/WhatManager2",
"id": "72193895d3173eaa4b6ba57ad7aef83e37ae66f7",
"size": "5735",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "books/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202604"
},
{
"name": "JavaScript",
"bytes": "711008"
},
{
"name": "Python",
"bytes": "310036"
},
{
"name": "Shell",
"bytes": "953"
}
],
"symlink_target": ""
}
|
import pickle
man = []
other = []
try:
with open('sketch.txt') as data:
for each_line in data:
try:
(role, line_spoken) = each_line.split(':',1)
line_spoken = line_spoken.strip()
if role == 'Other Man':
man.append(line_spoken)
elif role == 'Man':
other.append(line_spoken)
except ValueError:
pass
except IOError as err:
print('File Error:' + str(err))
print(man)
print(other)
try:
with open('man_file.pickle',mode='wb') as man_file, open('other_file.pickle', mode='wb') as other_file:
pickle.dump(man,man_file)
pickle.dump(other, other_file)
except pickle.PickleError as ex:
print('Pickle Error:' + str(ex))
|
{
"content_hash": "1d45f2d8b3c394dd6f3d0f9723a6f4e6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 107,
"avg_line_length": 29.392857142857142,
"alnum_prop": 0.5188335358444714,
"repo_name": "tdean1995/HFPythonSandbox",
"id": "30062583a2a464eca1d287f671aad915ba113864",
"size": "823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch4/ch4_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5893"
},
{
"name": "Python",
"bytes": "103816"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Nov 03 13:06:56 2015
@author: Eunice
"""
if __name__ == '__main__':
import sys
sys.path.append("..")
from pyalgotrade import bar
from pyalgotrade import plotter
# 以上模块仅测试用
from pyalgotrade.broker.fillstrategy import DefaultStrategy
from pyalgotrade.broker.backtesting import TradePercentage
from pyalgotrade import strategy
from pyalgotrade.technical import ma
from pyalgotrade.technical import cross
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1,self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
#self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
if self.__ma1[-i-1] > self.__ma3[-i-1]:
m1 += 1
if self.__ma2[-i-1] > self.__ma3[-i-1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long position.
if self.__ma2[-1]is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
#self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print bars[self.__instrument].getDateTime(), bars[self.__instrument].getPrice()
#self.info("buy %s" % (bars.getDateTime()))
if __name__ == "__main__":
strat = thrSMA
instrument = '000001'
market = 'SZ'
fromDate = '20140101'
toDate ='20160101'
frequency = bar.Frequency.DAY
paras = [2, 20, 60, 10]
plot = True
#############################################path set ############################33
import sys,os
if frequency == bar.Frequency.MINUTE:
path = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,'histdata','min'))
elif frequency == bar.Frequency.DAY:
path = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,'histdata','day'))
filepath = path +os.sep + instrument + market + ".csv"
#############################################don't change ############################33
from pyalgotrade.barfeed.csvfeed import GenericBarFeed
barfeed = GenericBarFeed(frequency)
barfeed.setDateTimeFormat('%Y-%m-%d %H:%M:%S')
barfeed.addBarsFromCSV(instrument, filepath)
strat = strat(barfeed, instrument, *paras)
if plot:
plt = plotter.StrategyPlotter(strat, True, True, True)
strat.run()
if plot:
plt.plot()
|
{
"content_hash": "f7cffadcdd101bd512f805c048791853",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 126,
"avg_line_length": 27.049689440993788,
"alnum_prop": 0.5524684270952928,
"repo_name": "UpSea/midProjects",
"id": "d181c6fb490be41346a5036c5bbc8d8537f0ca89",
"size": "4395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyAlgoTradeCN/01_SamplesFromPyAlgoTradeCN/stratlib/thrSMA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "13285"
},
{
"name": "Batchfile",
"bytes": "2076"
},
{
"name": "C",
"bytes": "195139"
},
{
"name": "C++",
"bytes": "56306"
},
{
"name": "Makefile",
"bytes": "13644"
},
{
"name": "Objective-C",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "625753"
},
{
"name": "R",
"bytes": "15618"
}
],
"symlink_target": ""
}
|
import os
import scipy.misc
from model import DCGAN
from utils import pp
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 10, "epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_integer("batch_size", 10, "The size of batch images [64]")
flags.DEFINE_integer("image_size", 156, "The size of image to use (will be center cropped) [108]")
flags.DEFINE_integer("ratio", 4, "The ratio rate by which the images is to be resized")
flags.DEFINE_string("dataset", "For_NN", "The name of dataset [celebA, mnist]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("is_train", True, "True for training, False for testing [False]")
flags.DEFINE_boolean("is_crop", True, "True for training, False for testing [False]")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
with tf.Session() as sess:
dcgan = DCGAN(sess, image_size=FLAGS.image_size, is_crop=FLAGS.is_crop,
batch_size=FLAGS.batch_size, dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir,
ratio=FLAGS.ratio)
if FLAGS.is_train:
dcgan.train(FLAGS)
else:
dcgan.check(FLAGS)
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "0290e41d65808e3bac5f8cf718e96370",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 106,
"avg_line_length": 40.11904761904762,
"alnum_prop": 0.686646884272997,
"repo_name": "pascal220/ESPCN_OCT",
"id": "7a0c4d44a9d8f4c3ce1efa9905db89e00f6c8c0b",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subpixel_oct/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19462"
},
{
"name": "HTML",
"bytes": "28424"
},
{
"name": "JavaScript",
"bytes": "93566"
},
{
"name": "Jupyter Notebook",
"bytes": "56315"
},
{
"name": "Python",
"bytes": "22750"
}
],
"symlink_target": ""
}
|
from datacanvas.dataset import DataSet
def test_text_file():
url = 'file://test_output_text_file.bin'
content_write = 'test_text_file'
o = DataSet(url=url, format='text')
o.put_raw(content_write)
i = DataSet(url=url, format='text')
content_read = i.get_raw()
assert content_read == content_write
def test_binary_file():
url = 'file://test_output_binary_file.bin'
content_write = b'test_binary_file'
o = DataSet(url=url, format='binary')
o.put_raw(content_write)
i = DataSet(url=url, format='binary')
content_read = i.get_raw()
assert content_read == content_write
|
{
"content_hash": "08acf7b7dd551162be65ef2ca1bea037",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 46,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.6522435897435898,
"repo_name": "DataCanvasIO/pyDataCanvas",
"id": "5b156577c236d27424431e8588d722f18ed3aed2",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dataset/scheme/test_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "126175"
}
],
"symlink_target": ""
}
|
from point import Point
from sensor import Sensor
from device import Device
from rule import Rule
|
{
"content_hash": "91ae14b12674dc02334309c111ca5bac",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 25,
"avg_line_length": 24.5,
"alnum_prop": 0.8367346938775511,
"repo_name": "TempoIQ/tempoiq-python",
"id": "ba83f02ea9fa054f61bb393fb97e4fbbc3616968",
"size": "98",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempoiq/protocol/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "34"
},
{
"name": "Python",
"bytes": "167200"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import abc
import copy
import random
from builtins import map
from builtins import object
from builtins import range
from opentuner.search import technique
from .technique import SearchTechnique
class GlobalEvolutionaryTechnique(SearchTechnique):
def __init__(self,
mutation_rate=0.1,
crossover_rate=0.0,
must_mutate_count=1,
crossover_strength=0.1,
*pargs, **kwargs):
super(GlobalEvolutionaryTechnique, self).__init__(*pargs, **kwargs)
self.mutation_rate = mutation_rate
self.crossover_rate = crossover_rate
self.must_mutate_count = must_mutate_count
self.crossover_strength = crossover_strength
@classmethod
def get_hyper_parameters(cls):
return ['mutation_rate', 'crossover_rate', 'must_mutate_count', 'crossover_strength']
def desired_configuration(self):
"""
return a (cfg, priority) that we should test,
through random mutation and crossover
"""
# TODO: set limit value
parents = self.selection()
parents = list(map(copy.deepcopy, parents))
parent_hashes = list(map(self.manipulator.hash_config, parents))
if len(parents) > 1:
cfg = self.crossover(parents)
else:
cfg = parents[0]
for z in range(10): # retries
self.mutation(cfg)
if self.manipulator.hash_config(cfg) in parent_hashes:
continue # try again
return cfg
def mutation(self, cfg):
"""
mutate cfg in place
"""
params = self.manipulator.parameters(cfg)
random.shuffle(params)
for param in params[:self.must_mutate_count]:
self.mutate_param(cfg, param)
for param in params[self.must_mutate_count:]:
if random.random() < self.mutation_rate:
self.mutate_param(cfg, param)
def mutate_param(self, cfg, param):
"""
mutate single parameter of cfg in place
"""
param.op1_randomize(cfg)
def crossover(self, cfgs):
cfg1, cfg2, = cfgs
new = self.manipulator.copy(cfg1)
params = self.manipulator.parameters(cfg1)
random.shuffle(params)
d = int(self.crossover_strength * len(params))
for param in params[:d]:
param.set_value(new, param.get_value(cfg2))
return new
def selection(self):
"""return a list of parent configurations to use"""
if random.random() < self.crossover_rate:
return [self.select(),
self.select()]
else:
return [self.select()]
@abc.abstractmethod
def select(self):
"""return a single random parent configuration"""
return None
class GreedySelectionMixin(object):
"""
EvolutionaryTechnique mixin for greedily selecting the best known
configuration
"""
def select(self):
"""return a single random parent configuration"""
if (self.driver.best_result is not None and
self.driver.best_result.state == 'OK'):
return self.driver.best_result.configuration.data
else:
return self.manipulator.random()
class NormalMutationMixin(object):
"""
Mutate primitive parameters according to normal distribution
"""
def __init__(self, sigma=0.1, *pargs, **kwargs):
super(NormalMutationMixin, self).__init__(*pargs, **kwargs)
self.sigma = sigma
def mutate_param(self, cfg, param):
"""
mutate single parameter of cfg in place
"""
if param.is_primitive():
param.op1_normal_mutation(cfg, self.sigma)
else:
random.choice(param.manipulators(cfg))(cfg)
class UniformGreedyMutation(GreedySelectionMixin, GlobalEvolutionaryTechnique):
pass
class NormalGreedyMutation(NormalMutationMixin, GreedySelectionMixin, GlobalEvolutionaryTechnique):
pass
technique.register(NormalGreedyMutation(crossover_rate=0.5, crossover_strength=0.2, name='GGA'))
|
{
"content_hash": "2244d598d35afa64d98582870fafd9b1",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 99,
"avg_line_length": 30.364963503649633,
"alnum_prop": 0.6161057692307692,
"repo_name": "jansel/opentuner",
"id": "a7ea2357437b2b8fe29f3221c65b49bbc218fae2",
"size": "4160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opentuner/search/globalGA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "203"
},
{
"name": "Gnuplot",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "1211"
},
{
"name": "Python",
"bytes": "320828"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.