text stringlengths 0 1.05M | meta dict |
|---|---|
"""ARTICLE MODELS
Article
"""
from django.db import models
from ckeditor.fields import RichTextField
from django.utils import timezone
from django.utils.text import slugify
class Article(models.Model):
"""Article
post id: primary key, autoincrement
title: length=250
body: ckeditor
published(date)
modified(date)
tags: sitedata.Tag
slug: slugfield
headerimage: image displayed at top of post
"""
# basic post
post_id = models.AutoField(
primary_key=True,
verbose_name='Post #',
)
title = models.CharField(
max_length=250,
verbose_name='Article Title'
)
body = RichTextField()
published = models.DateTimeField(
verbose_name='Published',
)
# additional stuff
modified = models.DateTimeField(
verbose_name='Last Modified',
)
tags = models.ManyToManyField(
'sitedata.Tag',
verbose_name='Tags',
)
slug = models.SlugField(
max_length=50,
unique=True
)
headerimage = models.URLField(
max_length=200,
blank=True,
verbose_name='Header Image',
)
def __str__(self):
"""Article string representation
return title as string representation
Args:
self: Article object
Returns:
(str): title of article
Raises:
None
"""
return self.title
class Meta:
"""verbose names for articles
"""
verbose_name = 'Article post'
verbose_name_plural = 'Article posts'
def save(self, *args, **kwargs):
"""save articles to database
check for duplicates, and update modified timestamps
Args:
self: article object
*args: arguments
**kwargs: parameters
Returns:
calls Article.super()
Raises:
None
"""
if not self.post_id:
self.created = timezone.now()
dup = Article.objects.filter(title=self.title)
if len(dup) > 0:
# objects with the same slug exist -> duplicate!
nos = str(len(dup))
# append number of duplicates as modifier
self.slug = slugify(self.title[:49 - len(dup)] + '-' + nos)
else:
self.slug = slugify(self.title[:50])
self.modified = timezone.now()
return super(Article, self).save(*args, **kwargs)
| {
"repo_name": "efueger/harshp.com",
"path": "articles/models.py",
"copies": "1",
"size": "2537",
"license": "mit",
"hash": -4947090653001015000,
"line_mean": 22.4907407407,
"line_max": 75,
"alpha_frac": 0.5530153725,
"autogenerated": false,
"ratio": 4.51423487544484,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 108
} |
'''This module contains the data model for Articles in blog sources.'''
import codecs
import datetime
import os
import lpbm.models.configmodel as cm_module
import lpbm.tools as ltools
from lpbm.lib.slugify import SLUG_CHARS_DISPLAY as _SLUG_CHARS_DISPLAY
from lpbm.lib.slugify import slugify as _slugify
class ArticleSameIdError(Exception):
'''Exception raised when two articles are found with the same id.'''
def __init__(self, art1, art2):
super(ArticleSameIdError, self).__init__(self)
self.art1, self.art2 = art1, art2
def __str__(self):
return 'Articles `%s\' and `%s\' have the same id defined.' % (
self.art1.title, self.art2.title
)
_TITLE_SEPARATOR = '=='
_FRMT_DATE_CONF = '%Y-%m-%dT%H:%M:%S'
def translate_to_jekyll_markdown(contents):
lines, in_code = [], False
for line in contents.splitlines():
line_stripped = line.lstrip()
if line_stripped.startswith(':::lpbm'):
continue
elif line_stripped.startswith(':::'):
line = '```' + line_stripped[3:]
in_code = True
elif in_code:
if line_stripped and not line.startswith(' '):
popped = lines[-1] == ''
if popped:
lines.pop()
lines.append('```')
if popped:
lines.append('')
in_code = False
else:
line = line[4:] if line.startswith(' ') else line
lines.append(line)
return '\n'.join(lines) + '\n'
class Article(cm_module.Model):
'''
The actual model. Articles are devided in two files. The actual article
(written in markdown syntax) and a configuration file containing information
for blog generation.
'''
title = cm_module.field('title', required=True)
published = cm_module.opt_bool('general', 'published', default=False)
_date = cm_module.opt('general', 'date')
_authors = cm_module.opt('general', 'authors', default='')
_categories = cm_module.opt('general', 'categories', default='')
def __init__(self, mod, mods, filename=None):
super().__init__(mod, mods)
self.title, self._content = '', ''
try:
self.filename, self.path = filename[:-9], filename
except TypeError:
self.filename, self.path = '', ''
# Reads the content of the file, config being before SEPARATOR
try:
f = codecs.open(filename, 'r', 'utf-8')
line = f.readline()
while line:
if line.startswith(_TITLE_SEPARATOR):
line = f.readline()
break
self.title += line[:-1]
line = f.readline()
while line:
self._content += line
line = f.readline()
except (IOError, TypeError):
pass
# Model configuration.
self.cm = cm_module.ConfigModel(self._config_filename())
# Interactive fields.
self._interactive_fields = ['title']
if self.id is None:
self._interactive_fields += ['filename']
self._interactive_fields += ['authors', 'categories']
# Authors
self.authors = self._authors
self.categories = self._categories
# If creating the article, set date to now.
if self._date is None:
self.date = datetime.datetime.now()
def __str__(self):
return '"{title}" by {authors} [{published}]'.format(
id=self.id,
title=self.title,
authors=self.mod._get_author_verbose(self.authors),
published=('published' if self.published else 'draft'),
)
def __lt__(self, other):
'''Articles are sorted by date'''
return (self.date, self.id) < (other.date, other.id)
def save(self):
'''Articles' configuration is saved automatically.'''
with codecs.open(self._markdown_filename(), 'w', 'utf-8') as f:
# Then we have the title.
f.write(self.title + '\n')
f.write(len(self.title) * '=' + '\n')
# End finally we have the content.
f.write(self._content)
# Saving special fields configuration
self._authors = ', '.join(list(self._authors_set))
self._categories = ', '.join(list(self._categories_set))
# Finally saving everything.
super().save()
def interactive_filename(self):
def is_valid(value):
if _slugify(value) != value:
print('This is not a valid slug ({}).'.format(_SLUG_CHARS_DISPLAY))
return False
path = os.path.join('articles', value + '.markdown')
if os.path.exists(os.path.normpath(path)):
print('Article with this filename already exists.')
return False
return True
default = _slugify(self.title)
self.filename = ltools.input_default(
'Filename', default, required=True, is_valid=is_valid)
# Several paths have to be reset.
self.filename = os.path.join('articles', self.filename)
self.path = os.path.normpath(self.filename + '.markdown')
self.cm.filename = self._config_filename()
def interactive_authors(self):
self.mods['authors'].opt_list(short=True)
self.authors = ltools.input_default(
'Please list authors (comma separated)', self._authors,
required=True, is_valid=self.mods['authors'].is_valid_list,
)
def interactive_categories(self):
self.mods['categories'].opt_list(short=True)
self.categories = ltools.input_default(
'Please list categories (comma separated)', self._categories,
required=True, is_valid=self.mods['categories'].is_valid_list,
)
def delete(self):
super().delete()
self.published = False
@property
def authors(self):
'''Returns the list of authors.'''
return [int(a) for a in list(self._authors_set)]
@authors.setter
def authors(self, authors):
'''
Takes a string of comma-separated authors and adds them to authors of
the article.
'''
self._authors_set = set(ltools.split_on_comma(authors))
@property
def categories(self):
return [int(c) for c in list(self._categories_set)]
@categories.setter
def categories(self, value):
self._categories_set = set(ltools.split_on_comma(value))
@property
def date(self):
'''Translates date string in configuration to a timestamp. (getter).'''
try:
return datetime.datetime.strptime(self._date, _FRMT_DATE_CONF)
except ValueError:
return datetime.datetime.fromtimestamp(0)
@date.setter
def date(self, value):
'''Translates a date as a string in the right format. (setter).'''
if value is None:
self._date = None
else:
self._date = value.strftime(_FRMT_DATE_CONF)
@property
def content(self):
return self._content
def _config_filename(self):
'''Returns the filename with config's extension.'''
return '{filename}.cfg'.format(filename=self.filename)
def _markdown_filename(self):
'''Returns the filename with markdown's extension.'''
return '{filename}.markdown'.format(filename=self.filename)
def html_filename(self):
'''Returns the filename of the HTML file for that article.'''
filename = os.path.basename(self.filename)
return '%d-%s.html' % (self.id, filename)
def jekyll_url(self):
dt = self.date.strftime('%Y/%m/%d')
filename = os.path.basename(self.filename)
return '%s/%s.html' % (dt, filename)
def jekyll_markdown_filename(self):
dt = self.date.strftime('%Y-%m-%d')
filename = os.path.basename(self.filename)
return '%s-%s.md' % (dt, filename)
def url(self):
'''The direct link to the article.'''
return os.path.join('/', 'articles', self.html_filename())
def publish(self):
'''
Set everything needed to publish an article (published flag and date).
'''
self.published = True
self.date = datetime.datetime.now()
@property
def jekyll_content(self):
return translate_to_jekyll_markdown(self._content)
| {
"repo_name": "fmichea/lpbm",
"path": "lpbm/models/articles.py",
"copies": "1",
"size": "8616",
"license": "bsd-3-clause",
"hash": -7671744794293129000,
"line_mean": 31.8854961832,
"line_max": 83,
"alpha_frac": 0.5775301764,
"autogenerated": false,
"ratio": 4.054588235294117,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132118411694118,
"avg_score": null,
"num_lines": null
} |
"""articles_column
Revision ID: 49f02f2c2ea
Revises: 171e70161dd
Create Date: 2016-03-19 22:38:51.402128
"""
# revision identifiers, used by Alembic.
revision = '49f02f2c2ea'
down_revision = '171e70161dd'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('article_columns',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('articles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('article_column_id', sa.Integer(), nullable=True),
sa.Column('index', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['article_column_id'], ['article_columns.id'], ),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_articles_timestamp'), 'articles', ['timestamp'], unique=False)
op.drop_table('article_column')
op.drop_table('article')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('article',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('title', sa.VARCHAR(length=64), nullable=True),
sa.Column('body', sa.TEXT(), nullable=True),
sa.Column('timestamp', sa.DATETIME(), nullable=True),
sa.Column('author_id', sa.INTEGER(), nullable=True),
sa.Column('index', sa.INTEGER(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('article_column',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('title', sa.VARCHAR(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_index(op.f('ix_articles_timestamp'), table_name='articles')
op.drop_table('articles')
op.drop_table('article_columns')
### end Alembic commands ###
| {
"repo_name": "XiaochenCui/algorithm_submit",
"path": "migrations/versions/49f02f2c2ea_articles_column.py",
"copies": "1",
"size": "2229",
"license": "mit",
"hash": 9076071015691817000,
"line_mean": 34.9516129032,
"line_max": 91,
"alpha_frac": 0.6603858232,
"autogenerated": false,
"ratio": 3.439814814814815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600200638014815,
"avg_score": null,
"num_lines": null
} |
"""articles
Revision ID: 2223792e165
Revises: 13d42a02406
Create Date: 2016-03-19 21:58:27.811507
"""
# revision identifiers, used by Alembic.
revision = '2223792e165'
down_revision = '13d42a02406'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('article',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_article_timestamp'), 'article', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_article_timestamp'), table_name='article')
op.drop_table('article')
### end Alembic commands ###
| {
"repo_name": "XiaochenCui/algorithm_submit",
"path": "migrations/versions/2223792e165_articles.py",
"copies": "1",
"size": "1075",
"license": "mit",
"hash": -9171602199706998000,
"line_mean": 28.8611111111,
"line_max": 89,
"alpha_frac": 0.6688372093,
"autogenerated": false,
"ratio": 3.4126984126984126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45815356219984127,
"avg_score": null,
"num_lines": null
} |
'''Articulated "skeleton" class and associated helper functions.'''
import logging
import numpy as np
import ode
from . import parser
from . import physics
def pid(kp=0., ki=0., kd=0., smooth=0.1):
r'''Create a callable that implements a PID controller.
A PID controller returns a control signal :math:`u(t)` given a history of
error measurements :math:`e(0) \dots e(t)`, using proportional (P), integral
(I), and derivative (D) terms, according to:
.. math::
u(t) = kp * e(t) + ki * \int_{s=0}^t e(s) ds + kd * \frac{de(s)}{ds}(t)
The proportional term is just the current error, the integral term is the
sum of all error measurements, and the derivative term is the instantaneous
derivative of the error measurement.
Parameters
----------
kp : float
The weight associated with the proportional term of the PID controller.
ki : float
The weight associated with the integral term of the PID controller.
kd : float
The weight associated with the derivative term of the PID controller.
smooth : float in [0, 1]
Derivative values will be smoothed with this exponential average. A
value of 1 never incorporates new derivative information, a value of 0.5
uses the mean of the historic and new information, and a value of 0
discards historic information (i.e., the derivative in this case will be
unsmoothed). The default is 0.1.
Returns
-------
controller : callable (float, float) -> float
Returns a function that accepts an error measurement and a delta-time
value since the previous measurement, and returns a control signal.
'''
state = dict(p=0, i=0, d=0)
def control(error, dt=1):
state['d'] = smooth * state['d'] + (1 - smooth) * (error - state['p']) / dt
state['i'] += error * dt
state['p'] = error
return kp * state['p'] + ki * state['i'] + kd * state['d']
return control
def as_flat_array(iterables):
'''Given a sequence of sequences, return a flat numpy array.
Parameters
----------
iterables : sequence of sequence of number
A sequence of tuples or lists containing numbers. Typically these come
from something that represents each joint in a skeleton, like angle.
Returns
-------
ndarray :
An array of flattened data from each of the source iterables.
'''
arr = []
for x in iterables:
arr.extend(x)
return np.array(arr)
class Skeleton:
'''A skeleton is a group of rigid bodies connected with articulated joints.
Commonly, the skeleton is used to represent an articulated body that is
capable of mimicking the motion of the human body.
Most often, skeletons are configured by parsing information from a text file
of some sort. See :class:`pagoda.parser.BodyParser` for more information
about the format of the text file. Skeletons can also be loaded from text
files in ASF format; see :class:`pagoda.parser.AsfParser` for more
information.
Parameters
----------
world : :class:`pagoda.physics.World`
A world object that holds bodies and joints for physics simulation.
Attributes
----------
bodies : list of :class:`pagoda.physics.Body`
A list of the rigid bodies that comprise this skeleton.
joints : list of :class:`pagoda.physics.Joint`
A list of the joints that connect bodies in this skeleton.
'''
def __init__(self, world):
self.world = world
self.jointgroup = ode.JointGroup()
self.bodies = []
self.joints = []
def load(self, source, **kwargs):
'''Load a skeleton definition from a file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton. See :class:`pagoda.parser.Parser` for more
information about the format of the text file.
'''
if hasattr(source, 'endswith') and source.lower().endswith('.asf'):
self.load_asf(source, **kwargs)
else:
self.load_skel(source, **kwargs)
def load_skel(self, source, **kwargs):
'''Load a skeleton definition from a text file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton. See :class:`pagoda.parser.BodyParser` for
more information about the format of the text file.
'''
logging.info('%s: parsing skeleton configuration', source)
if hasattr(source, 'read'):
p = parser.parse(source, self.world, self.jointgroup, **kwargs)
else:
with open(source) as handle:
p = parser.parse(handle, self.world, self.jointgroup, **kwargs)
self.bodies = p.bodies
self.joints = p.joints
self.set_pid_params(kp=0.999 / self.world.dt)
def load_asf(self, source, **kwargs):
'''Load a skeleton definition from an ASF text file.
Parameters
----------
source : str or file
A filename or file-like object that contains text information
describing a skeleton, in ASF format.
'''
if hasattr(source, 'read'):
p = parser.parse_asf(source, self.world, self.jointgroup, **kwargs)
else:
with open(source) as handle:
p = parser.parse_asf(handle, self.world, self.jointgroup, **kwargs)
self.bodies = p.bodies
self.joints = p.joints
self.set_pid_params(kp=0.999 / self.world.dt)
def set_pid_params(self, *args, **kwargs):
'''Set PID parameters for all joints in the skeleton.
Parameters for this method are passed directly to the `pid` constructor.
'''
for joint in self.joints:
joint.target_angles = [None] * joint.ADOF
joint.controllers = [pid(*args, **kwargs) for i in range(joint.ADOF)]
@property
def color(self):
return getattr(self.bodies[0], 'color', (1, 0, 0, 1))
@color.setter
def color(self, color):
for b in self.bodies:
b.color = color
@property
def num_dofs(self):
'''Return the number of degrees of freedom in the skeleton.'''
return sum(j.ADOF for j in self.joints)
@property
def joint_angles(self):
'''Get a list of all current joint angles in the skeleton.'''
return as_flat_array(j.angles for j in self.joints)
@property
def joint_velocities(self):
'''Get a list of all current joint velocities in the skeleton.'''
return as_flat_array(j.velocities for j in self.joints)
@property
def joint_torques(self):
'''Get a list of all current joint torques in the skeleton.'''
return as_flat_array(getattr(j, 'amotor', j).feedback[-1][:j.ADOF]
for j in self.joints)
@property
def body_positions(self):
'''Get a list of all current body positions in the skeleton.'''
return as_flat_array(b.position for b in self.bodies)
@property
def body_rotations(self):
'''Get a list of all current body rotations in the skeleton.'''
return as_flat_array(b.quaternion for b in self.bodies)
@property
def body_linear_velocities(self):
'''Get a list of all current body velocities in the skeleton.'''
return as_flat_array(b.linear_velocity for b in self.bodies)
@property
def body_angular_velocities(self):
'''Get a list of all current body angular velocities in the skeleton.'''
return as_flat_array(b.angular_velocity for b in self.bodies)
@property
def cfm(self):
return self.joints[0].cfm
@cfm.setter
def cfm(self, cfm):
for joint in self.joints:
joint.cfm = cfm
@property
def erp(self):
return self.joints[0].erp
@erp.setter
def erp(self, erp):
for joint in self.joints:
joint.erp = erp
def indices_for_joint(self, name):
'''Get a list of the indices for a specific joint.
Parameters
----------
name : str
The name of the joint to look up.
Returns
-------
list of int :
A list of the index values for quantities related to the named
joint. Often useful for getting, say, the angles for a specific
joint in the skeleton.
'''
j = 0
for joint in self.joints:
if joint.name == name:
return list(range(j, j + joint.ADOF))
j += joint.ADOF
return []
def indices_for_body(self, name, step=3):
'''Get a list of the indices for a specific body.
Parameters
----------
name : str
The name of the body to look up.
step : int, optional
The number of numbers for each body. Defaults to 3, should be set
to 4 for body rotation (since quaternions have 4 values).
Returns
-------
list of int :
A list of the index values for quantities related to the named body.
'''
for j, body in enumerate(self.bodies):
if body.name == name:
return list(range(j * step, (j + 1) * step))
return []
def joint_distances(self):
'''Get the current joint separations for the skeleton.
Returns
-------
distances : list of float
A list expressing the distance between the two joint anchor points,
for each joint in the skeleton. These quantities describe how
"exploded" the bodies in the skeleton are; a value of 0 indicates
that the constraints are perfectly satisfied for that joint.
'''
return [((np.array(j.anchor) - j.anchor2) ** 2).sum() for j in self.joints]
def get_body_states(self):
'''Return a list of the states of all bodies in the skeleton.'''
return [b.state for b in self.bodies]
def set_body_states(self, states):
'''Set the states of all bodies in the skeleton.'''
for state in states:
self.world.get_body(state.name).state = state
def set_joint_velocities(self, target=0):
'''Set the target velocity for all joints in the skeleton.
Often the target is set to 0 to cancel out any desired joint rotation.
Parameters
----------
target : float, optional
The target velocity for all joints in the skeleton. Defaults to 0.
'''
for joint in self.joints:
joint.velocities = target
def enable_motors(self, max_force):
'''Enable the joint motors in this skeleton.
This method sets the maximum force that can be applied by each joint to
attain the desired target velocities. It also enables torque feedback
for all joint motors.
Parameters
----------
max_force : float
The maximum force that each joint is allowed to apply to attain its
target velocity.
'''
for joint in self.joints:
amotor = getattr(joint, 'amotor', joint)
amotor.max_forces = max_force
if max_force > 0:
amotor.enable_feedback()
else:
amotor.disable_feedback()
def disable_motors(self):
'''Disable joint motors in this skeleton.
This method sets to 0 the maximum force that joint motors are allowed to
apply, in addition to disabling torque feedback.
'''
self.enable_motors(0)
def set_target_angles(self, angles):
'''Move each joint toward a target angle.
This method uses a PID controller to set a target angular velocity for
each degree of freedom in the skeleton, based on the difference between
the current and the target angle for the respective DOF.
PID parameters are by default set to achieve a tiny bit less than
complete convergence in one time step, using only the P term (i.e., the
P coefficient is set to 1 - \delta, while I and D coefficients are set
to 0). PID parameters can be updated by calling the `set_pid_params`
method.
Parameters
----------
angles : list of float
A list of the target angles for every joint in the skeleton.
'''
j = 0
for joint in self.joints:
velocities = [
ctrl(tgt - cur, self.world.dt) for cur, tgt, ctrl in
zip(joint.angles, angles[j:j+joint.ADOF], joint.controllers)]
joint.velocities = velocities
j += joint.ADOF
def add_torques(self, torques):
'''Add torques for each degree of freedom in the skeleton.
Parameters
----------
torques : list of float
A list of the torques to add to each degree of freedom in the
skeleton.
'''
j = 0
for joint in self.joints:
joint.add_torques(
list(torques[j:j+joint.ADOF]) + [0] * (3 - joint.ADOF))
j += joint.ADOF
| {
"repo_name": "EmbodiedCognition/pagoda",
"path": "pagoda/skeleton.py",
"copies": "1",
"size": "13358",
"license": "mit",
"hash": 9160017352299570000,
"line_mean": 33.5167958656,
"line_max": 83,
"alpha_frac": 0.6011378949,
"autogenerated": false,
"ratio": 4.1717676452217365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041171335300354504,
"num_lines": 387
} |
"""Artifactory purger module."""
import base64
import datetime
import logging
import certifi
import party
# pylint: disable=redefined-builtin
from requests.exceptions import (BaseHTTPError, ConnectionError, HTTPError, InvalidURL, RequestException)
from ..credentials import load_credentials
LOG = logging.getLogger(__name__)
class Artifactory:
"""Artifactory purger class."""
def __init__(self, repo_name=None):
self.repo_name = repo_name
self.credentials = load_credentials()
self.base_url = self.credentials['artifactory_url']
self.artifactory = party.Party()
if not self.base_url.endswith('/api'):
self.api_url = '/'.join([self.base_url, 'api'])
else:
self.api_url = self.base_url
self.artifactory.artifactory_url = self.api_url
self.artifactory.username = self.credentials['artifactory_username']
self.artifactory.password = base64.encodebytes(bytes(self.credentials['artifactory_password'], 'utf-8'))
self.artifactory.certbundle = certifi.where()
def repos(self, repo_type='local'):
"""
Return a dictionary of repos with basic info about each.
Args:
repo_type (str): Type of repository to list. (local/virtual/cache/any)
Returns:
repos (dict): Dictionary of repos.
"""
repos = {}
raw_data = self.artifactory.get('storageinfo')
data = raw_data.json()
LOG.debug('Storage info data: %s', data)
for repo in data["repositoriesSummaryList"]:
if repo['repoKey'] == "TOTAL":
continue
if repo['repoType'].lower() != repo_type and repo_type != 'any':
LOG.debug("Skipping repo %s, not of type %s", repo['repoKey'], repo_type)
continue
repos[repo['repoKey']] = repo
return repos
def purge(self, dry_run, artifacts):
""" Purge artifacts from the specified repo.
Args:
dry_run (bool): Dry run mode True/False
artifacts (list): Artifacts.
Returns:
purged (int): Count purged.
"""
purged = 0
mode = 'DRYRUN' if dry_run else 'LIVE'
LOG.info('Running mode: %s', mode)
artifacts = sorted(artifacts, key=lambda k: k['path'])
for artifact in artifacts:
artifact_path = '{}/{}/{}'.format(self.repo_name, artifact['path'], artifact['name'])
LOG.info('%s purge %s', mode, artifact_path)
full_artifact_url = '{}/{}'.format(self.base_url, artifact_path)
if dry_run:
purged += 1
else:
try:
self.artifactory.query_artifactory(full_artifact_url, query_type='delete')
purged += 1
except (BaseHTTPError, HTTPError, InvalidURL, RequestException, ConnectionError) as error:
LOG.error(str(error))
return purged
def move_artifacts(self, artifacts=None, dest_repository=None):
"""Moves a list of artifacts to dest_repository.
Args:
artifacts (list): List of artifacts to move.
dest_repository (str): The name of the destination repo.
"""
base_endpoint = "move/{}".format(self.repo_name)
dest_prefix = "?to=/{}".format(dest_repository)
artifacts = sorted(artifacts, key=lambda k: k['path'])
for artifact in artifacts:
move_url = "{0}/{1}/{2}{3}/{1}/{2}".format(base_endpoint, artifact['path'], artifact['name'], dest_prefix)
LOG.info("Moving %s to repository %s", artifact['name'], dest_repository)
request = self.artifactory.post(move_url)
if not request.ok:
LOG.warning("error moving artifact %s: %s", artifact['name'], request.text)
return True
# pylint: disable-msg=too-many-arguments
def filter(self, terms=None, depth=3, sort=None, offset=0, limit=0, fields=None, item_type="folder"):
"""Get a subset of artifacts from the specified repo.
This looks at the project level, but actually need to iterate lower at project level
This method does not use pagination. It assumes that this utility
will be called on a repo sufficiently frequently that removing just
the default n items will be enough.
Args:
terms (list): an array of jql snippets that will be ANDed together
depth (int, optional): how far down the folder hierarchy to look
fields (list): Fields
sort (dict): How to sort Artifactory results
offset (int): how many items from the beginning of the list should be skipped (optional)
limit (int): the maximum number of entries to return (optional)
item_type (str): The item type to search for (file/folder/any).
Returns:
list: List of artifacts returned from query
"""
if sort is None:
sort = {}
if fields is None:
fields = []
if terms is None:
terms = []
terms.append({"path": {"$nmatch": "*/repodata"}}) # ignore all repodata. In future make configurable
terms.append({"repo": {"$eq": self.repo_name}})
terms.append({"type": {"$eq": item_type}})
if depth:
terms.append({"depth": {"$eq": depth}})
aql = {"$and": terms}
LOG.debug("AQL: %s", aql)
response = self.artifactory.find_by_aql(
fields=fields, criteria=aql, order_and_fields=sort, offset_records=offset, num_records=limit)
results = response['results']
return results
def get_artifact_properties(self, artifact):
"""Given an artifact, queries for properties from artifact URL
Args:
artifact (dict): Dictionary of artifact info. Needs artifact['name'] and ['path'].
Returns:
dict: Dictionary of all properties on specific artifact
"""
artifact_url = "{0}/{1}/{2}/{3}".format(self.base_url, self.repo_name, artifact['path'], artifact['name'])
LOG.debug("Getting properties for %s", artifact_url)
self.artifactory.get_properties(artifact_url)
return self.artifactory.properties # pylint: disable=no-member
def get_all_repo_artifacts(self, depth=None, item_type='file', with_properties=True):
"""returns all artifacts in a repo with metadata
Args:
depth (int): How far down Artifactory folder to look. None will go to bottom of folder.
item_type (str): The item type to search for (file/folder/any).
with_properties (bool): Include artifact properties or not.
Returns:
list: List of all artifacts in a repository.
"""
LOG.info("Searching for all artifacts in %s.", self.repo_name)
if with_properties:
fields = ['stat', 'property.*']
else:
fields = []
artifacts = self.filter(item_type=item_type, depth=depth, fields=fields)
return artifacts
def time_based_retention(self, keep_days=None, time_field='created', item_type='file', extra_aql=None):
"""Retains artifacts based on number of days since creation.
extra_aql example: [{"@deployed": {"$match": "dev"}}, {"@deployed": {"$nmatch": "prod"}}]
This would search for artifacts that were created after <keep_days> with
property "deployed" equal to dev and not equal to prod.
Args:
keep_days (int): Number of days to keep an artifact.
time_field (str): The field of time to look at (created, modified, stat.downloaded).
item_type (str): The item type to search for (file/folder/any).
extra_aql (list). List of extra AQL terms to apply to search
Return:
list: List of artifacts matching retention policy
"""
if extra_aql is None:
extra_aql = []
now = datetime.datetime.now()
before = now - datetime.timedelta(days=keep_days)
created_before = before.strftime("%Y-%m-%dT%H:%M:%SZ")
aql_terms = [{time_field: {"$lt": created_before}}]
aql_terms.extend(extra_aql)
purgeable_artifacts = self.filter(item_type=item_type, depth=None, terms=aql_terms)
return purgeable_artifacts
def count_based_retention(self,
retention_count=None,
project_depth=2,
artifact_depth=3,
item_type='folder',
extra_aql=None):
"""Return all artifacts except the <count> most recent.
Args:
retention_count (int): Number of artifacts to keep.
project_depth (int): how far down the Artifactory folder hierarchy to look for projects.
artifact_depth (int): how far down the Artifactory folder hierarchy to look for specific artifacts.
item_type (str): The item type to search for (file/folder/any).
extra_aql (list). List of extra AQL terms to apply to search
Returns:
list: List of all artifacts to delete.
"""
purgeable_artifacts = []
LOG.info("Searching for purgable artifacts with count based retention in %s.", self.repo_name)
for project in self.filter(depth=project_depth):
LOG.debug("Processing artifacts for project %s", project)
if project['path'] == '.':
path = "{}".format(project["name"])
else:
path = "{}/{}".format(project["path"], project["name"])
terms = [{"path": path}]
if extra_aql:
terms += extra_aql
purgeable_artifacts.extend(
self.filter(
offset=retention_count,
item_type=item_type,
depth=artifact_depth,
terms=terms,
sort={"$desc": ["created"]}))
return purgeable_artifacts
| {
"repo_name": "gogoair/lavatory",
"path": "src/lavatory/utils/artifactory.py",
"copies": "1",
"size": "10148",
"license": "apache-2.0",
"hash": 9156925701365416000,
"line_mean": 39.592,
"line_max": 118,
"alpha_frac": 0.5832676389,
"autogenerated": false,
"ratio": 4.240702047638947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0011363202649275058,
"num_lines": 250
} |
# artifacts default extension
ARTIFACTS_EXTENSION = '.feature'
# regex support string
FEATURE = 'feature'
SCENARIO = 'scenario'
ACTOR = 'as_a'
VALUE_PROPOSITION = 'so_that'
OBJECTIVE = 'i_wanna'
CONTEXT = 'given'
ACTION = 'when'
EXPECTED_BEHAVIOR = 'then'
LANGUAGE = 'language'
# default filename for exports
DEFAULT_FILENAME = 'default'
# alerts and errors messages
NO_VALID_ARTIFACTS_MSG = 'You must set a path with valid artifacts'
INVALID_PATH_MSG = 'You must set a valid path'
NO_VALID_PROPOSITION_MSG = 'No value proposition found'
NO_SCENARIOS_MSG = 'This feature Dont have scenarios'
NO_STEPS_MSG = 'This scenario Dont have steps yet'
NO_VALUE_MSG = 'No value proposition is listed'
INVALID_FEATURE_MSG = 'You must have one valid feature/Check your feature language'
# domains of application tags
SUPPORT = 'support'
NOT_IMPLEMENTED = 'nao_imp'
CORE_FEATURE = 'core'
# graphs label for plot.ly
OVERALL_TESTS_IMPLEMENTATION_LABEL = 'Overall Features (Implemented x Not Implemented)'
CORE_TESTS_IMPLEMENTATION_LABEL = 'Core Features (Implemented x Not Implemented)'
SCENARIOS_IMPLEMENTATION_LABEL = 'Scenarios (Implemented x Not Implemented)'
CORE_SCENARIOS_IMPLEMENTATION_LABEL = 'Core Scenarios (Implemented x Not Implemented)'
IMPLEMENTED_LABEL = 'Implemented'
NOT_IMPLEMENTED_LABEL = 'Not implemented'
| {
"repo_name": "yurireeis/bddocs",
"path": "bddocs/config/settings.py",
"copies": "1",
"size": "1317",
"license": "mit",
"hash": 6608844391406785000,
"line_mean": 33.6578947368,
"line_max": 87,
"alpha_frac": 0.7661351557,
"autogenerated": false,
"ratio": 3.2761194029850746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4542254558685075,
"avg_score": null,
"num_lines": null
} |
"""Artificial Adversarial Searches"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import random
import time
import numpy as np
import six
from . import base
@six.add_metaclass(abc.ABCMeta)
class Adversarial(base.SearchBase):
"""Adversarial Search.
Parameters
----------
time_limit : float (default=np.inf)
Time limit (in seconds) for a performance.
By default, search has infinite time to make a decision.
depth_limit : float (default=np.inf)
Depth limit (in hops) for a branch search.
By default, search can keep going until the branch dies.
dispose : bool (default=False)
Always dispose memory after a movement.
Attributes
----------
started_at : long
Time in which performance started.
`time.time() - started_at` yeilds how much time has
approximately passed since the `MinMax.perform` was called.
"""
MAXIMIZE, MINIMIZE = (0, 1)
def __init__(self, agent, root=None, time_limit=np.inf,
depth_limit=np.inf, dispose=False):
super(Adversarial, self).__init__(agent=agent, root=root)
self.time_limit = time_limit
self.depth_limit = depth_limit
self.dispose = dispose
self.started_at = None
class Random(Adversarial):
"""Random Adversarial Search.
Actions are taken randomly, achieving a result.
"""
def __init__(self, agent, root=None,
time_limit=np.inf, depth_limit=np.inf,
dispose=False, random_generator=None):
super(Random, self).__init__(
agent=agent, root=root, time_limit=time_limit,
depth_limit=depth_limit, dispose=dispose)
self.random_generator = random_generator or random.Random()
def search(self):
self.started_at = time.time()
state = self.root or self.agent.last_state.random()
children = self.agent.predict(state)
self.solution_candidate_ = (self.random_generator.choice(children)
if children else None)
return self
class MinMax(Adversarial):
"""Min Max Adversarial Search.
Notes
-----
Not all branches can be completely searched in feasible time.
`MinMax` assumes that the agent at hand has a "good" utility
function to evaluate states, regardless of their position in
the derivation tree.
"""
def search(self):
self.started_at = time.time()
self.solution_candidate_ = None
best_score = -np.inf
for c in self.agent.predict(self.root):
c_score = self._selection_policy(c)
if best_score < c_score:
self.solution_candidate_ = c
best_score = c_score
return self
def should_maximize_at(self, depth):
return depth % 2 == self.MAXIMIZE
def _selection_policy(self, state, depth=1):
"""Min-Max Policy."""
children = self.agent.predict(state)
if (depth > self.depth_limit or not children or
time.time() - self.started_at > self.time_limit):
return self.agent.utility(state)
interest = max if self.should_maximize_at(depth) else min
return interest(self._selection_policy(c, depth + 1)
for c in children)
class AlphaBeta(MinMax):
"""Alpha Beta Pruning Adversarial Search.
Min-Max search with alpha-beta pruning, a optimization strategy for
branch cutting.
"""
def _selection_policy(self, state, depth=1, a=-np.inf, b=np.inf):
"""Alpha Beta Pruning Policy."""
children = self.agent.predict(state)
if (depth > self.depth_limit or not children or
time.time() - self.started_at > self.time_limit):
return self.agent.utility(state)
v, interest = ((-np.inf, max)
if self.should_maximize_at(depth)
else (np.inf, min))
for c in children:
v = interest(v, self._selection_policy(c, depth + 1, a, b))
if self.should_maximize_at(depth):
a = interest(a, v)
else:
b = interest(b, v)
if b <= a: break
return v
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/adversarial.py",
"copies": "1",
"size": "4290",
"license": "mit",
"hash": -720069562844128600,
"line_mean": 28.5862068966,
"line_max": 74,
"alpha_frac": 0.593006993,
"autogenerated": false,
"ratio": 3.885869565217391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978876558217391,
"avg_score": null,
"num_lines": null
} |
"""Artificial Agents Base"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import six
from sklearn.utils import check_random_state
from ..base import Environment
@six.add_metaclass(abc.ABCMeta)
class AgentBase(object):
"""Agent Base Template.
Defines a basic contract shared between all agents.
Arguments
---------
environment : Environment
The environment upon which the agent will act.
actions : list-like (optional)
Which actions an agent has. This is used as a reminder for
`predict` implementations and it's optional,
"""
def __init__(self, environment, actions=None, random_state=None):
self.environment = environment
self.actions = actions
self.last_state = None
self.last_known_state = None
self.random_state = check_random_state(random_state)
def perceive(self):
"""Perceive the environment and save current state."""
self.last_state = self.environment.current_state
if self.last_state:
self.last_known_state = self.last_state
return self
def act(self):
"""Decides which action should be performed over the world,
and return its code.
"""
raise NotImplementedError
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/agents/base.py",
"copies": "1",
"size": "1303",
"license": "mit",
"hash": -135948974581874860,
"line_mean": 24.0576923077,
"line_max": 69,
"alpha_frac": 0.650038373,
"autogenerated": false,
"ratio": 4.23051948051948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
"""Artificial Agents Test"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import warnings
from unittest import TestCase
from artificial import base, agents
from artificial.base import State
from artificial.searches import fringe
class _S(State):
@property
def is_goal(self):
return self.data == 10
def h(self):
return abs(self.data - 10)
class _TEnv(base.Environment):
def update(self):
pass
class _GBA(agents.GoalBasedAgent):
def predict(self, state):
return [
_S(state.data - 1, action=0, parent=state),
_S(state.data + 1, action=1, parent=state),
_S(state.data, action=2, parent=state),
]
class TableDrivenAgentTest(TestCase):
def setUp(self):
self.env = _TEnv(_S(0))
def test_sanity(self):
action_map = {
hash(0): 1,
hash(1): 10,
hash(10): 11,
hash(11): 100,
hash(100): 101,
}
tda = agents.TableDrivenAgent(action_map, self.env,
action_map.values())
self.assertIsNotNone(tda)
def test_perceive(self):
action_map = {
hash(0): 1,
hash(1): 10,
hash(10): 11,
hash(11): 100,
hash(100): 101,
}
tda = agents.TableDrivenAgent(action_map, self.env,
action_map.values())
tda.perceive()
self.assertEqual(tda.percepts, str(hash(_S(0))))
self.env.current_state = _S(100)
tda.perceive()
self.assertEqual(tda.percepts,
str(hash(_S(0))) + str(hash(_S(100))))
def test_act(self):
action_map = {
str(hash(0)): 1,
str(hash(1)): 10,
str(hash(10)): 11,
str(hash(11)): 100,
str(hash(100)): 101,
}
tda = agents.TableDrivenAgent(action_map, self.env,
action_map.values())
tda.perceive()
self.assertEqual(tda.percepts, str(hash(_S(0))))
action = tda.act()
self.assertEqual(action, 1)
self.env.current_state = _S(100)
tda.perceive()
self.assertEqual(tda.percepts,
str(hash(_S(0))) + str(hash(_S(100))))
# State "hash(0)+hash(100)" is not described on table.
tda.perceive()
action = tda.act()
self.assertIsNone(action)
class SimpleReflexAgentTest(TestCase):
def setUp(self):
self.env = _TEnv(_S(0))
def test_sanity(self):
rules = {_S(0): 1, _S(1): 2, _S(2): 1}
sra = agents.SimpleReflexAgent(rules, self.env,
rules.values())
self.assertIsNotNone(sra)
def test_act(self):
rules = {_S(0): 1, _S(1): 2, _S(2): 1}
sra = agents.SimpleReflexAgent(rules, self.env,
rules.values())
action = sra.perceive().act()
self.assertEqual(action, 1)
self.env.current_state = _S(3)
action = sra.perceive().act()
self.assertIsNone(action)
class _RA(agents.ResponderAgent, _GBA):
pass
class ResponderAgentTest(TestCase):
def setUp(self):
self.env = _TEnv(_S(0))
def test_sanity(self):
ra = _RA(search=fringe.BreadthFirst,
environment=self.env)
self.assertIsNotNone(ra)
def test_act(self):
ra = _RA(search=fringe.BreadthFirst,
environment=self.env)
answer = ra.perceive().act()
self.assertTrue(isinstance(answer, _S))
self.assertEqual(answer.data, 10)
self.env.current_state = _S(3)
answer = ra.perceive().act()
self.assertTrue(isinstance(answer, _S))
self.assertEqual(answer.data, 10)
class ModelBasedAgentTest(TestCase):
def setUp(self):
self.env = _TEnv(_S(0))
def test_infer_state(self):
class _TestModelBasedAgent(agents.ModelBasedAgent,
agents.SimpleReflexAgent):
def predict(self, state):
a = self.rules[state] if state in self.rules else None
children = []
if a:
# An state has a single action associated =>
# takes to a single state. Although this guy is
# very limited, this is just a test.
children.append(_S(a, action=a))
return children
rules = {_S(0): 1, _S(1): 2, _S(2): 1}
sra = _TestModelBasedAgent(rules, self.env,
rules.values())
action = sra.perceive().act()
self.assertEqual(sra.last_state, _S(0))
self.assertEqual(sra.last_action, 1)
self.assertEqual(sra.last_action, action)
# Suddenly, the environment becomes undefined!
self.env.current_state = None
sra.perceive()
# The last state known is 0.
self.assertEqual(sra.last_known_state, _S(0))
# The last state is a guest of what would
# happen if action 1 were taken.
self.assertEqual(sra.last_state, _S(1, action=1))
def test_undefined_action_warning(self):
class _TestModelBasedAgent(agents.ModelBasedAgent,
agents.SimpleReflexAgent):
def predict(self, state):
a = self.rules[state] if state in self.rules else None
children = []
if a:
children.append(_S(a))
return children
rules = {_S(0): 1, _S(1): 2, _S(2): 1}
sra = _TestModelBasedAgent(rules, self.env,
rules.values())
sra.perceive().act()
# Suddenly, the environment becomes undefined!
self.env.current_state = None
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sra.perceive()
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
class GoalBasedAgentTest(TestCase):
def setUp(self):
self.env = _TEnv(_S(0))
def test_sanity(self):
gba = _GBA(fringe.BreadthFirst,
environment=self.env,
actions=[0, 1, 2])
self.assertIsNotNone(gba)
def test_act(self):
gba = _GBA(fringe.BreadthFirst,
environment=self.env,
actions=[0, 1, 2])
for _ in range(10):
self.assertEqual(gba.perceive().act(), 1)
# Test verbosity.
gba = _GBA(fringe.BreadthFirst,
environment=self.env,
actions=[0, 1, 2])
for _ in range(10):
self.assertEqual(gba.perceive().act(), 1)
def test_is_agent_goal(self):
gba = _GBA(fringe.BreadthFirst,
environment=self.env,
actions=[0, 1, 2])
self.assertFalse(gba.is_goal(_S(0)))
self.assertTrue(gba.is_goal(_S(10)))
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/agents/tests/agents_test.py",
"copies": "1",
"size": "7158",
"license": "mit",
"hash": 1081405976653463000,
"line_mean": 27.2924901186,
"line_max": 70,
"alpha_frac": 0.5217937972,
"autogenerated": false,
"ratio": 3.6992248062015505,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9720631096620181,
"avg_score": 0.00007750135627373479,
"num_lines": 253
} |
"""Artificial Environment"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import logging
import six
logger = logging.getLogger('artificial')
@six.add_metaclass(abc.ABCMeta)
class Environment(object):
"""Environment Base Class.
Defines how agents and states intertwine, modeling a problem domain
into the computer.
Obviously,you must subclass `Environment` for every different problem
faced.
Parameters
----------
initial_state: State-like object, default=None
Initial state of the environment.
Attributes
----------
state_class_ : State subclass
A link to the current's domain State specification, for reference
purposes. Useful for domain-specific methods that create random
instances of a state with `...state_class_.random()`.
current_state : State-like object
The model that represents the current state of the world.
agents : list of Agent-like objects
Contains a list of all agents currently inserted into the domain.
"""
_instance = None
state_class_ = None
def __init__(self, initial_state=None):
self.current_state = self.initial_state = initial_state
self.agents = []
Environment._instance = self
def build(self):
"""Build the environment, if necessary"""
return self
def dispose(self):
self.current_state = None
self.agents = []
if self is Environment._instance:
Environment._instance = None
return self
def __del__(self):
self.dispose()
@classmethod
def current(cls):
if Environment._instance is None:
raise RuntimeError('no %s is not currently running'
% Environment.__name__)
return Environment._instance
@abc.abstractmethod
def update(self):
"""Update the environment, should be overridden to reflect the changes
in the real world.
"""
def finished(self):
return self.current_state and self.current_state.is_goal
def live(self, n_cycles=100):
"""Make the environment alive!
Bring the Environment to life, and run it through `n_cycles` cycles.
Parameters
----------
n_cycles: int, default=100
The number of cycles in which `env.update` will be called.
"""
self.build()
logger.info('initial state: %s', str(self.current_state))
try:
cycle = 0
while cycle < n_cycles and not self.finished():
self.update()
cycle += 1
logger.info('#%i: {%s}' % (cycle, str(self.current_state)))
except KeyboardInterrupt:
logger.info('canceled by user.')
finally:
logger.info('final state: %s', str(self.current_state))
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/base/environment.py",
"copies": "1",
"size": "2911",
"license": "mit",
"hash": 8222140090343056000,
"line_mean": 24.9910714286,
"line_max": 78,
"alpha_frac": 0.6035726554,
"autogenerated": false,
"ratio": 4.555555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5659128210955555,
"avg_score": null,
"num_lines": null
} |
"""Artificial Fringe Searches"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import six
from . import base
from .. import agents
from ..utils import PriorityQueue
@six.add_metaclass(abc.ABCMeta)
class FringeBase(base.SearchBase):
"""Fringe Base Search.
Base class for searchers that rely on the concept of fringe.
Fringes are, by default, `lists`, but can be freely overridden
for other structures, such as `sets` or `PriorityQueues`.
Attributes
----------
fringe_ : list
A collection of states in the search fringe.
"""
def __init__(self, agent, root=None):
super(FringeBase, self).__init__(agent=agent, root=root)
self.fringe_ = list(self.space_) if self.space_ else []
def restart(self, root):
super(FringeBase, self).restart(root=root)
self.fringe_ = list(self.space_)
return self
def search(self):
while self.fringe_:
state = self.extract()
if state is None:
continue
if state.is_goal:
self.solution_candidate_ = state
break
self.expand(state)
return self
@abc.abstractmethod
def extract(self):
"""Fringe extraction policy"""
@abc.abstractmethod
def expand(self, state):
"""Fringe expansion policy.
Parameters
----------
state : (State)
State that should be expanded.
"""
class BreadthFirst(FringeBase):
"""Breadth First Search.
Extract elements from the beginning of the fringe and add expanded states'
children to the end of it. It's complete and minimal.
"""
def extract(self):
return self.fringe_.pop(0)
def expand(self, state):
unseen_children = [s for s in self.agent.predict(state)
if s not in self.space_]
self.space_.update(unseen_children)
self.fringe_ += unseen_children
class UniformCost(FringeBase):
"""Uniform Cost Search.
Uses a `PriorityQueue` as fringe, adding and removing states
according to the path cost required to reach them. This search is
complete, minimal and optimal.
"""
def __init__(self, agent, root=None):
super(UniformCost, self).__init__(agent=agent, root=root)
assert isinstance(agent, agents.UtilityBasedAgent), \
'Uniform Cost Search requires an utility based agent.'
self.fringe_ = PriorityQueue()
if self.root:
self.fringe_.add(self.root)
def restart(self, root):
super(UniformCost, self).restart(root=root)
self.fringe_ = PriorityQueue()
self.fringe_.add(self.root)
return self
def extract(self):
return self.fringe_.pop()
def expand(self, state):
self.space_.add(state)
for child in self.agent.predict(state):
if (child not in self.space_ and
(child not in self.fringe_ or
child.g < self.fringe_[child][0])):
# Expanded nodes were already optimally reached.
# Just ignore these instances instance.
# This is either a new state or its costs is smaller than
# the instance found in the fringe, being a shorter path.
# Relax edge (thank you for this, Dijkstra).
self.fringe_.add(child, priority=child.g)
class GreedyBestFirst(UniformCost):
"""Greedy Best First Search.
Like Uniform Cost, uses a PriorityQueue as fringe, but adds and extracts
states based on their evaluation by a predefined heuristic function.
This is NOT complete, optimal or minimal; but will likely achieve a
solution quickly and without the need to expand too many states.
"""
def expand(self, state):
self.space_.add(state)
for child in self.agent.predict(state):
if child not in self.space_ and child not in self.fringe_:
# Only add states that aren't in the fringe yet.
# Recurrent states are likely to have the same heuristic value,
# but we chose to keep the one that was added first
# (less hops <=> closer to the root).
self.fringe_.add(child, priority=child.h())
class AStar(UniformCost):
"""A Star (A*) Search.
Combines Uniform cost and Greedy best first to add/remove states
from the priority queue based on their distance from the current node
and their evaluation of the heuristic function.
This search is complete, minimal and optimal given that the heuristic
is admissible and consistent.
"""
def expand(self, state):
self.space_.add(state)
for child in self.agent.predict(state):
if (child not in self.space_ and
(child not in self.fringe_ or
child.f() < self.fringe_[child][0])):
self.fringe_.add(child, priority=child.f())
class DepthFirst(FringeBase):
"""Depth First Search.
Parameters
----------
prevent_cycles : [False|'branch'|'tree'] (default=False)
Prevent cyclical searches.
Options are:
False : classic Depth First Search. Algorithm will NOT keep
tab on repetitions and cycles may occur.
'branch' : repetitions in current branch will not be allowed.
Requires :math:`O(2d)` memory, as references to predecessors and
a set of states in the current path are kept.
'tree' : no repetitions are allowed. This option requires
:math:`O(b^d + d)`, being no better than Breadth-First search
in terms of memory requirement.
It can still perform better, though, given a problem domain
where solutions are "far" from the root and minimizing the
number of hops to the solution is not necessary (something
which is guaranteed by `BreadthFirst`).
limit : [None|int] (default=None)
If a positive integer, executes Limited Depth First Search
up to a limit and shortcuts branches that violate this limit.
If no solution candidate is found before the limit,
`DepthLimited` won't be able to properly answer the environment
with a action list.
Obviously, this search is not complete, minimal, or optimal.
If None, no limit is imposed and original Depth First algorithm
is executed.
"""
def __init__(self, agent, root=None, prevent_cycles=False, limit=None):
super(DepthFirst, self).__init__(agent=agent, root=root)
self.prevent_cycles = prevent_cycles
self.limit = limit
self.last_expanded = None
def extract(self):
previous = self.last_expanded
current = self.fringe_.pop(0)
common = current.parent
self.last_expanded = current
if self.prevent_cycles == 'branch':
# Remove other branches from the search space.
if previous and common and previous != common:
# We switched branches, perform removal.
while previous and previous != common:
self.space_.remove(previous)
previous = previous.parent
if self.limit is None:
return current
# Checks if current depth violates limit constraint.
d = 0
p = current.parent
while p:
p, d = p.parent, d + 1
if d <= self.limit:
return current
def expand(self, state):
children = self.agent.predict(state)
if self.prevent_cycles:
children = [s for s in children if s not in self.space_]
self.space_.update(children)
self.fringe_ = children + self.fringe_
class IterativeDeepening(base.SearchBase):
"""Iterative Deepening Search.
Taking an iterative, executes `DepthLimited` passing the iteration's
value as the `limit` parameter. This search is minimal, given the
iterative includes the left-side of the Natural set
(i.e., 1, 2, 3, 4, ...), but not complete nor necessarily optimal.
Parameters
----------
prevent_cycles : [False|'branch'|'tree'] (default=False)
Prevent cyclical searches.
Options are:
--- False : classic Depth First Search. Algorithm will NOT keep
tab on repetitions and cycles may occur.
--- 'branch' : repetitions in current branch will not be allowed.
Requires :math:`O(2d)` memory, as references to predecessors and
a set of states in the current path are kept.
--- 'tree' : no repetitions are allowed. This option requires
:math:`O(b^d + d)`, being no better than Breadth-First search in
terms of memory requirement. It can still perform better, though,
given a problem domain where solutions are "far" from the root and
minimizing the number of hops to the solution is not necessary
(something which is guaranteed by `BreadthFirst`).
iterations : [array-like|range] (default=range(10))
List of limits passed to `DepthFirst`.
"""
def __init__(self, agent, root=None, prevent_cycles=False,
iterations=range(10)):
super(IterativeDeepening, self).__init__(agent=agent, root=root)
self.iterations = iterations
self.depth_limited = DepthFirst(agent=agent, root=root,
prevent_cycles=prevent_cycles)
def search(self):
for limit in self.iterations:
self.depth_limited.limit = limit
self.depth_limited.restart(root=self.root)
self.solution_candidate_ = (self.depth_limited.search()
.solution_candidate_)
if self.solution_candidate_:
break
return self
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/fringe.py",
"copies": "1",
"size": "10008",
"license": "mit",
"hash": 531827173218150100,
"line_mean": 31.1800643087,
"line_max": 79,
"alpha_frac": 0.60931255,
"autogenerated": false,
"ratio": 4.326848249027237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5436160799027236,
"avg_score": null,
"num_lines": null
} |
"""Artificial Genetic Algorithm Tests"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import string
import time
from unittest import TestCase
import numpy as np
from artificial import base, agents
from artificial.searches import genetic
from nose_parameterized import parameterized
random_state = np.random.RandomState(0)
# Classes for Hello World spelling problem.
class _S(base.GeneticState):
expected = 'hello world'
alphabet = list(string.ascii_lowercase + ' ')
def h(self):
return sum((1 if self.data[i] != self.expected[i] else 0
for i in range(min(len(self.data), len(self.expected)))))
def cross(self, other):
cross_point = random_state.randint(0, len(_S.expected))
return _S(self.data[:cross_point] + other.data[cross_point:])
def mutate(self, factor, probability):
# We ignore factor, as we are dealing with binary genes.
m = random_state.rand(len(self.data)) < probability
if np.any(m):
d = np.array(list(self.data))
d[m] = random_state.choice(self.alphabet, size=m.sum())
self.data = ''.join(d)
return self
@classmethod
def random(cls):
return cls(''.join(random_state.choice(cls.alphabet,
size=len(cls.expected))))
@property
def is_goal(self):
return self.data == _S.expected
class _A(agents.UtilityBasedAgent):
def predict(self, state):
"""Predicts nothing."""
class _E(base.Environment):
state_class_ = _S
def update(self):
"""Updates nothing."""
# Classes for a simple numerical optimization.
class _S2(base.GeneticState):
@classmethod
def random(cls):
return _S2([random_state.randint(0, 2) for _ in range(10)])
def mutate(self, factor, probability):
m = random_state.rand(len(self.data)) < probability
if m.any():
data = np.array(self.data)
data[m] = 1 - data[m]
self.data = data.tolist()
return self
def cross(self, other):
cross_point = random_state.randint(0, len(self.data))
return _S2(self.data[:cross_point] + self.data[cross_point:])
def h(self):
return -sum(1 if i == 1 else 0 for i in self.data)
@property
def is_goal(self):
# Every element is 1.
return sum(self.data) == len(self.data)
class _E2(base.Environment):
state_class_ = _S2
def update(self):
"""Updates nothing."""
class GeneticAlgorithmTest(TestCase):
def setUp(self):
self.env = _E(_S('UkDmEmaPCvK'))
self.agent = _A(search=genetic.GeneticAlgorithm,
environment=self.env,
actions=None)
self.random_state = np.random.RandomState(0)
def test_sanity(self):
ga = genetic.GeneticAlgorithm(self.agent,
random_state=self.random_state)
self.assertIsNotNone(ga)
def test_generate_population(self):
ga = genetic.GeneticAlgorithm(self.agent, max_evolution_cycles=1,
random_state=self.random_state)
ga.search()
self.assertEqual(ga.population_size_, 1000)
# Assert that the arrays necessary for the search were disposed.
self.assertIsNone(ga.population_)
self.assertIsNone(ga.selected_)
self.assertIsNone(ga.offspring_)
ga = genetic.GeneticAlgorithm(self.agent, population_size=20,
max_evolution_cycles=1,
random_state=self.random_state)
ga.search()
self.assertEqual(ga.population_size_, 20)
ga = genetic.GeneticAlgorithm(self.agent,
max_evolution_cycles=10,
max_evolution_duration=1,
n_jobs=1,
random_state=self.random_state)
ga.search()
self.assertGreater(ga.population_size_, 100)
@parameterized.expand([
'random', 'tournament', 'roulette', 'gattaca'
])
def test_select_for_breeding(self, method):
ga = genetic.GeneticAlgorithm(self.agent,
n_selected=20,
breeding_selection=method,
max_evolution_cycles=1)
(ga.search_start().generate_population().cycle_start()
.select_for_breeding())
self.assertEqual(len(ga.selected_), 20)
def test_breed(self):
ga = genetic.GeneticAlgorithm(self.agent,
population_size=100, n_selected=100)
(ga.search_start().generate_population().cycle_start()
.select_for_breeding().breed())
self.assertEqual(len(ga.population_), 100)
self.assertEqual(len(ga.offspring_), 50)
@parameterized.expand([
(dict(mutation_probability=.2), dict(population_size_=1000,
n_selected_=1000)),
])
def test_search(self, params, expected):
ga = genetic.GeneticAlgorithm(self.agent,
random_state=self.random_state, **params)
solution = ga.search().solution_candidate_
# Attributes were set as expected.
for key, value in expected.items():
self.assertEqual(getattr(ga, key), value)
# Assert clean-up was made.
self.assertIsNone(ga.offspring_)
self.assertIsNone(ga.selected_)
# Assert it eventually finds a solution.
self.assertIsNotNone(solution)
self.assertEqual(solution.data, 'hello world')
@parameterized.expand([
({
'natural_selection': 'elitism',
'max_evolution_duration': 5,
'mutation_probability': .2
}, 5.5),
({
'natural_selection': 'random',
'max_evolution_duration': 5,
'mutation_probability': .2
}, 5.5),
({
'max_evolution_duration': 5,
'mutation_probability': .2,
'n_jobs': 4,
'debug': True
}, 5.5)
])
def test_search_duration_constraint(self, params, acceptable_elapsed):
ga = genetic.GeneticAlgorithm(self.agent,
random_state=self.random_state,
**params)
elapsed = time.time()
ga.search()
elapsed = time.time() - elapsed
# Assert that the duration constraint was respected.
self.assertLess(elapsed, acceptable_elapsed)
self.assertIsNotNone(ga.solution_candidate_)
def test_preemption_by_genetic_similarity(self):
expected_variability = .4
a = _A(search=genetic.GeneticAlgorithm, environment=_E2())
ga = genetic.GeneticAlgorithm(
a, max_evolution_duration=60,
min_genetic_similarity=expected_variability,
population_size=50,
mutation_probability=0,
random_state=self.random_state,
debug=True).search()
# Assert that the last population's variability is smaller
# than the `min_genetic_similarity` parameter passed.
self.assertLessEqual(ga.variability_, expected_variability)
self.assertIsNotNone(ga.solution_candidate_)
self.assertGreaterEqual(a.utility(ga.solution_candidate_), 7)
def test_genetic_similarity_raises_error(self):
ga = genetic.GeneticAlgorithm(
self.agent, mutation_factor=.5, mutation_probability=1,
max_evolution_duration=4, min_genetic_similarity=.5,
random_state=self.random_state)
with self.assertRaises(RuntimeError):
ga.genetic_similarity()
@parameterized.expand([
(dict(n_selected='all'),),
(dict(breeding_selection='rand0m'),),
(dict(natural_selection='steady_state'),),
(dict(population_size=100, breeding_selection='tournament',
tournament_size=200),),
(dict(population_size=.5, max_evolution_cycles=1),),
])
def test_raises_value_errors(self, params):
print(params)
# Assert raises ValueError when parameters are incorrect.
with self.assertRaises(ValueError):
genetic.GeneticAlgorithm(self.agent,
random_state=self.random_state,
**params).search()
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/tests/genetic_test.py",
"copies": "1",
"size": "8606",
"license": "mit",
"hash": 7883062666656292,
"line_mean": 32.486381323,
"line_max": 79,
"alpha_frac": 0.5712293749,
"autogenerated": false,
"ratio": 4.072882158069096,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5144111532969097,
"avg_score": null,
"num_lines": null
} |
# Artificial Intelligence Homework
# Project: Birds V formation simulation
#
# By: Ryan Gilera
# Date: 16/02/14
#
#import simpleguitk as simplegui
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
import random
import math
#Global Variables
HEIGHT = 600
WIDTH = 900
DISTANCE_BET_BIRDS = 20
SPEED_LIMIT = 20
RADIUS = 120
v_key = 12
birds = [] #A list that will hold the birds clas members
formation_circle = False
formation_v = True
max_birds = 0
###########################################################
class Bird:
def __init__(self, bird_pos, radius, bird_velocity, tag, speed_limiter, in_formation):
self.radius = radius
self.bird_pos = bird_pos
self.bird_vel = bird_velocity
self.tag = tag
self.speed_limiter = speed_limiter
self.in_formation = in_formation
self.new_pos = [0,0]
self.fixed_tag = False
def update_new_pos(self,x,y):
self.new_pos[0] = x
self.new_pos[1] = y
def draw_bird(self,canvas):
#update bird
self.bird_pos[0]+=self.bird_vel[0]
self.bird_pos[1]+=self.bird_vel[1]
if self.bird_vel[0] < 0:
canvas.draw_image(imageL, (134/2,127/2), (134,127), self.bird_pos, (134/2,127/2))
else:
canvas.draw_image(imageR, (134/2,127/2), (134,127), self.bird_pos, (134/2,127/2))
def bounce(self):
if self.bird_pos[0] > WIDTH-self.radius:
self.bird_vel[0]= -self.bird_vel[0]
if self.bird_pos[1] > HEIGHT-self.radius:
self.bird_vel[1]= -self.bird_vel[1]
if self.bird_pos[0] < self.radius:
self.bird_vel[0]= -self.bird_vel[0]
if self.bird_pos[1] < self.radius:
self.bird_vel[1]= -self.bird_vel[1]
def button_cplus():
global RADIUS
RADIUS += 5
def button_cminus():
global RADIUS
RADIUS -= 5
def button_vplus():
global v_key
v_key += 1
def button_vminus():
global v_key
v_key -= 1
#randomizer for bird spawn intial position
def spawnPOS():
bird_vel = [0,0]
while bird_vel[0] < 2 and bird_vel[0] > -2:
bird_vel[0] += random.randrange(-5,5)
while bird_vel[1] < 2 and bird_vel[1] > -2:
bird_vel[1] += random.randrange(-5,5)
return bird_vel
#Magic starts here :) Spawn new bird in each click
def mouseclick(pos):
global birds, max_birds
posList = [pos[0],pos[1]]
if max_birds < 9:
birds.append(Bird(posList, 10, spawnPOS(), False, SPEED_LIMIT, False))
max_birds += 1
#button handler function for circle formation
def button_handler_c():
global formation_circle, formation_v
if formation_circle == False:
formation_circle = True
formation_v = False
for h, bird in enumerate(birds):
birds[h].tag = False
birds[h].speed_limiter = SPEED_LIMIT
if birds[h].bird_vel[0] == 0:
birds[h].bird_vel[0] += random.randrange(2,5)
birds[h].bird_vel[1] += random.randrange(2,5)
#button handler function for V formation
def button_handler_v():
global formation_circle, formation_v
if formation_v == False:
formation_circle = False
formation_v = True
for o, bird in enumerate(birds):
birds[o].tag = False
birds[o].speed_limiter = SPEED_LIMIT
if birds[o].bird_vel[0] == 0:
birds[o].bird_vel[0] += random.randrange(2,5)
birds[o].bird_vel[1] += random.randrange(2,5)
#drawing handler function. Automatically runs itself 60 times per second (to produce animation like movement)
def draw_handler(canvas):
#draw background image
canvas.draw_image(bg, (900 / 2, 600 / 2), (900, 600), (900/2, 600/2), (900, 600))
#draw Title
canvas.draw_text("F", (81,36),28,"Navy","sans-serif")
canvas.draw_text("LAPPYBIRD", (98,30),18,"Navy","sans-serif")
canvas.draw_text("S", (182,36),28,"Navy","sans-serif")
#draw sub title
canvas.draw_text("Flight Formation Simulation", (21,50),16,"Red","monospace")
canvas.draw_text("Flight Formation Simulation", (21,50),16,"Red","monospace")
canvas.draw_text("Flight Formation Simulation", (21,50),16,"Black","monospace")
canvas.draw_text("By: Ryan Gilera", (91,80),12,"Black","monospace")
#draw birds
for i, bird in enumerate(birds):
birds[i].draw_bird(canvas)
birds[i].bounce()
alignbird()
#the main part of the program. It updates birds positions and velocity
def alignbird():
key = 0
for j, bola in enumerate(birds):
if j > 0:
distance = math.sqrt(
math.pow((birds[j].bird_pos[0] - birds[0].bird_pos[0]),2) +
math.pow((birds[j].bird_pos[1] - birds[0].bird_pos[1]),2))
#updates birds positions for formation V
if formation_v == True and formation_circle == False:
if ((j % 2) == 1):
key += v_key
birdLeft(key)
birds[j].update_new_pos(new_pos[0], new_pos[1])
else:
birdRight(key)
birds[j].update_new_pos(new_pos[0], new_pos[1])
#updates birds positions for formation V
if formation_circle == True and formation_v == False:
if j == 1:
updateCTopPos()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 2:
updateCBottomPos()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 3:
updateCLeftPos()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 4:
updateCRightPos()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 5:
updateCTopRight()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 6:
updateCTopLeft()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 7:
updateCBottomRight()
birds[j].update_new_pos(new_pos[0], new_pos[1])
if j == 8:
updateCBottomLeft()
birds[j].update_new_pos(new_pos[0], new_pos[1])
new_pos_distance = math.sqrt(math.pow((birds[j].bird_pos[0] - new_pos[0]),2) +
math.pow((birds[j].bird_pos[1] - new_pos[1]),2))
#If birds are in position, maintain velocity so that it will be fix on that new position
if birds[j].tag == True:
birds[j].in_formation = True
birds[j].bird_vel[0] = (birds[j].new_pos[0] - birds[j].bird_pos[0])
birds[j].bird_vel[1] = (birds[j].new_pos[1] - birds[j].bird_pos[1])
else:
#Else sets the birds into accelaration mode
if (distance <= (math.hypot(ref_vel[0],ref_vel[1]) + 50)):
birds[j].fixed_tag = True
#Accelerates the birds so that they can move to the new_pos
#This is always true if the birds are still far from the new_pos
if birds[j].fixed_tag == True or ((birds[j].in_formation == True) and birds[j].tag == False):
if new_pos_distance > 0:
birds[j].bird_vel[0] = (birds[j].new_pos[0] - birds[j].bird_pos[0])/birds[j].speed_limiter
birds[j].bird_vel[1] = (birds[j].new_pos[1] - birds[j].bird_pos[1])/birds[j].speed_limiter
#This updates and increases the acceleration if necessary to reach new_pos
if (math.hypot(birds[j].bird_vel[0],birds[j].bird_vel[1]) <= math.hypot(birds[0].bird_vel[0],birds[0].bird_vel[1]) and
birds[j].speed_limiter > 1):
birds[j].speed_limiter -= 1
else:
#if all seems in seems in position, tag it to True
birds[j].tag = True
else:
#if all seems in seems in position, tag it to True
birds[j].tag = True
birds[j].fixed_tag = False
def birdLeft(multiplier):
global ref_vel, pre_pos, new_pos, formation_pos, v_mid_length
ref_vel = [birds[0].bird_vel[0] * multiplier, birds[0].bird_vel[1] * multiplier]
pre_pos = [birds[0].bird_pos[0] - ref_vel[0], birds[0].bird_pos[1] - ref_vel[1]]
new_pos = [pre_pos[0] + ref_vel[1], pre_pos[1] - ref_vel[0]]
def birdRight(multiplier):
global ref_vel, pre_pos, new_pos, v_mid_length
ref_vel = [birds[0].bird_vel[0] * multiplier, birds[0].bird_vel[1] * multiplier]
pre_pos = [birds[0].bird_pos[0] - ref_vel[0], birds[0].bird_pos[1] - ref_vel[1]]
new_pos = [pre_pos[0] - ref_vel[1], pre_pos[1] + ref_vel[0]]
def updateCTopPos():
global new_pos
new_pos = [birds[0].bird_pos[0], birds[0].bird_pos[1] - RADIUS ]
def updateCBottomPos():
global new_pos
new_pos = [birds[0].bird_pos[0], birds[0].bird_pos[1] + RADIUS ]
def updateCLeftPos():
global new_pos
new_pos = [birds[0].bird_pos[0] - RADIUS, birds[0].bird_pos[1]]
def updateCRightPos():
global new_pos
new_pos = [birds[0].bird_pos[0] + RADIUS, birds[0].bird_pos[1]]
def updateCTopRight():
global new_pos
x = (birds[0].bird_pos[0] + (RADIUS * (math.cos(math.radians(45)))))
y = (birds[0].bird_pos[1] - (RADIUS * (math.sin(math.radians(45)))))
new_pos = [x,y]
def updateCTopLeft():
global new_pos
x = (birds[0].bird_pos[0] - (RADIUS * (math.cos(math.radians(45)))))
y = (birds[0].bird_pos[1] - (RADIUS * (math.sin(math.radians(45)))))
new_pos = [x,y]
def updateCBottomRight():
global new_pos
x = (birds[0].bird_pos[0] + (RADIUS * (math.cos(math.radians(45)))))
y = (birds[0].bird_pos[1] + (RADIUS * (math.sin(math.radians(45)))))
new_pos = [x,y]
def updateCBottomLeft():
global new_pos
x = (birds[0].bird_pos[0] - (RADIUS * (math.cos(math.radians(45)))))
y = (birds[0].bird_pos[1] + (RADIUS * (math.sin(math.radians(45)))))
new_pos = [x,y]
frame=simplegui.create_frame("Birds Formation Simulation",WIDTH,HEIGHT)
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw_handler)
labelA = frame.add_label('Formation Control')
button1 = frame.add_button('Circle Formation', button_handler_c)
label1 = frame.add_label(' ')
button2 = frame.add_button('V Formation', button_handler_v)
label2 = frame.add_label(' ')
label3 = frame.add_label(' ')
label4 = frame.add_label(' ')
labelA = frame.add_label('Circle Diameter Control')
buttonA = frame.add_button('+', button_cplus, 25)
label5 = frame.add_label(' ')
buttonB = frame.add_button('-', button_cminus, 25)
label6 = frame.add_label(' ')
label7 = frame.add_label(' ')
labelB = frame.add_label('V Distance Control')
buttonC = frame.add_button('+', button_vplus, 25)
label8 = frame.add_label(' ')
buttonD = frame.add_button('-', button_vminus, 25)
#images
imageR = simplegui.load_image('file:///X:/GIT_ROOT/cluster-bird-formation-sim/images/right_bird.gif')
imageL = simplegui.load_image('file:///X:/GIT_ROOT/cluster-bird-formation-sim/images/left_bird.gif')
bg = simplegui.load_image('file:///X:/GIT_ROOT/cluster-bird-formation-sim/images/bg.png')
frame.start()
| {
"repo_name": "Daytron/cluster-bird-formation-sim",
"path": "source/birds_sim.py",
"copies": "1",
"size": "11904",
"license": "mit",
"hash": 6803502209951331000,
"line_mean": 33.2068965517,
"line_max": 139,
"alpha_frac": 0.5443548387,
"autogenerated": false,
"ratio": 3.042944785276074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8936022641123089,
"avg_score": 0.030255396570596817,
"num_lines": 348
} |
"""Play a game using the minimax algorithm."""
"""
Mancala game v1.0
Lyndon While, 3 April 2014
The program prompts the user for the game size X, then it plays a
game of Mancala between two random players and creates an SVG file
MancalaGameX.svg
that shows the history of the board during the game. Each board is
annotated with the next move made, and the final board is annotated
with the average branching factor during the game.
Software to run the program is available from python.org. Get Python 3.
The size of the displayed boards and the fonts used can be controlled
by changing the variable size below.
The colours used in the display can be controlled by changing the
variable colours.
Please report any bugs on help3001. Unless they're embarrassing ones. :-)
"""
import random
import copy
from collections import namedtuple
import pprint
import colorama
import tabulate
#a board position is a 2x7 list with the stores at the ends
#a player is 0 or 1, and is used to index the board
#a move is a list of indices into a board
#------------------------------------------------------------- This is the display code
size = 5 #controls the board-size and fonts - don't change anything else
side = size * 5
housefont = size * 2
storefont = size * 3
colours = [(0, 0, 0), (255,150,150), (215,215,0)]
# black pink green-yellow
def mkhouse(k, p, x, r):
h = side * (3.4 + k + r // 10 * 9)
v = side * (1.5 - p + r % 10 * 3)
return (colours[p + 1], [(h, v), (h + side, v), (h + side, v + side), (h, v + side), (h, v)],
#text placement and font
(h + side / 2 - len(str(x)) * side / 8, v + 2 * side / 3, str(x), housefont))
def mkstore(p, x, r):
h = side * (9.4 - 7 * p + r // 10 * 9)
v = side * (0.5 + r % 10 * 3)
return (colours[p + 1], [(h, v), (h + side, v), (h + side, v + 2 * side), (h, v + 2 * side), (h, v)],
#text placement and font
(h + side / 2 - len(str(x)) * side / 5, v + 6 * side / 5, str(x), storefont))
def writeColor(c):
(r, g, b) = c
return "".join(["rgb(", str(r), ",", str(g), ",", str(b), ")"])
def writeText(t):
(h, v, z, s) = t
return "".join(["<text x=\"", str(h), "\" y=\"", str(v), "\" font-family=\"Verdana\" font-size=\"",
str(s), "\" fill=\"black\">", z, "</text>\n"])
def writePolygons(f, ps):
for (c, p, t) in ps:
f.write("<polygon points=\"")
for (x, y) in p:
f.write("".join([str(x), ",", str(y), " "]))
f.write("\" style=\"fill:")
f.write(writeColor(c))
f.write(";stroke:")
f.write(writeColor(colours[0]))
f.write(";stroke-width:3\"/>\n")
f.write(writeText(t))
def mancalaDisplay(b, m, r, f):
if r % 2 == 1: t = "green"
else: t = "pink"
if r < 10:
f.write(writeText((size, side * (1.0 + r % 10 * 3), t + "'s", housefont)))
f.write(writeText((size, side * (1.5 + r % 10 * 3), "move", housefont)))
#display the move
f.write(writeText((size + side * (2 + (r - 1) // 10 * 9), side * (3 + (r - 1) % 10 * 3), "".join([str(k + 1) for k in m]), housefont)))
writePolygons(f, [mkhouse(k, p, b[p][5 * p + k * (1 - 2 * p)], r) for p in range(2) for k in range(6)] +
[mkstore( p, b[p][6], r) for p in range(2)])
#------------------------------------------------------------- This is the game mechanics code
def moves(b, p):
#returns a list of legal moves for player p on board b
zs = []
#for each non-empty house on p's side
for m in [h for h in range(6) if b[p][h] > 0]:
#if the final seed will be sown in p's store
if (b[p][m] + m) % 13 == 6:
#copy b, make move m, and check for recursive possibilities
c = copy.deepcopy(b)
move(c, p, [m])
ms = moves(c, p)
if ms == []:
zs += [[m]]
else:
zs += [[m] + n for n in ms]
else:
zs += [[m]]
return zs
def move(b, p, ms):
#make the move ms for player p on board b
for m in ms:
x = b[p][m]
b[p][m] = 0
(capturePossible, z) = sow(b, p, m + 1, 6, x)
#if the last seed was sown in an empty house on p's side, with seeds opposite
if capturePossible and b[p][z] == 1 and b[1 - p][5 - z] > 0:
b[p][6] += b[p][z] + b[1 - p][5 - z]
b[p][z] = 0
b[1 - p][5 - z] = 0
def sow(b, p, m, y, x):
#sow x seeds for player p on board b, starting from house m, with limit y
#the limit is used to exclude the opponent's store
#it returns (possibleCapture, lastHouseSown)
while x > 0:
for z in range(m, min(y + 1, m + x)):
b[p][z] += 1
x -= y + 1 - m
p = 1 - p
m = 0
y = 11 - y
return (y == 5, z)
def render(board):
"""Render the board and print it."""
print tabulate.tabulate(board)
def evaluate(board):
"""Evaluate board for score.
This function is really dumb.
"""
boardScore = sum(board[0]) - sum(board[1])
# Now evalute how many empty houses we have which are next to
# a non-empty house of the opponent
for house_index in range(0, 6):
if board[0][house_index] == 0 and board[1][house_index] != 0:
# Look at all of our available houses and see if we can start
# from any of them to finish in this square.
for i in range(0, 6):
if i + board[0][i] % 13 == house_index:
boardScore += 20
elif board[0][house_index] != 0 and board[1][house_index] == 0:
# Look at all of our available houses and see if we can start
# from any of them to finish in this square.
for i in range(0, 6):
if i + board[1][i] % 13 == house_index:
boardScore -= 20
return boardScore
ScoredMove = namedtuple("AvailableMove", "score move")
PLAYER_MOVE_INDEX = [0, -1]
PLAYER_COLORS = [colorama.Fore.RED, colorama.Fore.GREEN]
def moveScore(nextMove, board, player):
nextBoard = copy.deepcopy(board)
move(nextBoard, player, nextMove)
return evaluate(nextBoard)
def pickMoveForPlayer(player, board, available_moves, scoreFunction):
score_moves = [ScoredMove(score=scoreFunction(m,
board,
player), move=m) for m in available_moves]
score_moves = sorted(score_moves, key=lambda s: s.score)
return (score_moves, score_moves[PLAYER_MOVE_INDEX[player]].move)
def minimax(board, player, depth):
available_moves = moves(board, player)
if depth != 0 and len(available_moves) != 0:
best_move = pickMoveForPlayer(player, board, available_moves, moveScore)[1]
nextBoard = copy.deepcopy(board)
move(nextBoard, player, best_move)
return minimax(nextBoard, 1 - player, depth - 1)
return evaluate(board)
def minimaxScoreFunction(depth):
def minimaxScore(nextMove, board, player):
nextBoard = copy.deepcopy(board)
move(nextBoard, player, nextMove)
return minimax(nextBoard, 1 - player, depth)
return minimaxScore
def mancala(n):
#start with n seeds in each small house
b = [[n] * 6 + [0] for p in [0, 1]]
#open the SVG file
f = open("".join(["MancalaGame", str(n), ".svg"]), 'w')
f.write("<svg xmlns=\"http://www.w3.org/2000/svg\">\n")
mancalaDisplay(b, [], 0, f)
r = 1
current_player = 0
tm = 0
#while both players have seeds in their small houses
while all ([sum(b[p][:6]) > 0 for p in [0, 1]]):
ms = moves(b, current_player)
print colorama.Fore.WHITE
render(b)
if current_player == 0:
score_moves, best_move = pickMoveForPlayer(current_player, b, ms, minimaxScoreFunction(5))
m = best_move
print PLAYER_COLORS[current_player] + pprint.PrettyPrinter().pformat([(i, mv) for i, mv in enumerate(score_moves)])
print PLAYER_COLORS[current_player] + repr(best_move)
else:
print "Chose between \n", pprint.PrettyPrinter().pformat([(i, mv) for i, mv in enumerate(ms)])
choice = 999999999
while choice > len(ms):
choice = int(input("What move? "))
m = ms[choice]
move(b, current_player, m)
mancalaDisplay(b, m, r, f)
r += 1
current_player = 1 - current_player
tm += len(ms)
#move the remaining seeds to the stores
for p in [0, 1]:
for k in [0, 1, 2, 3, 4, 5]:
b[p][6] += b[p][k]
b[p][k] = 0
mancalaDisplay(b, [round(tm / (r - 1), 2)], r, f)
f.write("</svg>\n")
f.close()
def main():
mancala(int(input("What size game? ")))
main() | {
"repo_name": "smspillaz/artificial-intelligence",
"path": "artificialintelligence/minimax.py",
"copies": "1",
"size": "8999",
"license": "mit",
"hash": 4949056909668496000,
"line_mean": 32.9622641509,
"line_max": 139,
"alpha_frac": 0.5479497722,
"autogenerated": false,
"ratio": 3.2300789662598706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42780287384598703,
"avg_score": null,
"num_lines": null
} |
#Artificial Intelligence Module for Pygame. Developed by Shreyas Iyer, 2015.
'''
------------------------------------------------------------------------------------------------------------------------
Visit www.github.com/djeof-1 for further updates.
About --> AI module instantiates an AI object, with basic attributes for the
x-coordinate, y-ordinate and the width, height. The attributes can be changed in order to implement a sprite in place of a static image.
------------------------------------------------------------------------------------------------------------------------
How to use --> In order to be able to use the module, import this particular module as 'import ai'. Refer 'ai_example.py' Python file (In the same repository), for learning how to use this module.
(NOTE: This module must be within the directory of the project you are creating.).
--> Create an object of AI class, and manipulate its attributes. The rest functions take care of themselves accordingly on the basis of the position of the player.
------------------------------------------------------------------------------------------------------------------------
'''
#LICENSE: The following module has been licensed under MIT License.
#Dependencies
import random
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
#End of dependencies
class AI:
def __init__(self,xpos,ypos,w,h,color,canCallBackup,flag):
self.x = xpos
self.y = ypos
self.width = w
self.height = h
self.rangeRadius = w*5 #This is the radial range, within which the player will be detected.
self.dx = 0 #Change in the x & y coordinates of the AI object.
self.dy = 0
self.stopTime = 5;
self.curTime = 1;
self.viewDirection = 1
self.viewDirection2 = 1
self.alarmedState = False
self.color = color
self.callBackup = canCallBackup
self.lastSeenPosX = 0
self.lastSeenPosY = 0
self.Flag = flag
def detectEnemy(self,player_x,player_y): #Returns true when the player is within the view circle of the AI bot.
if (int(player_x-self.x)*int(player_x-self.x) + int(player_y-self.y)*int(player_y-self.y) <= self.rangeRadius*self.rangeRadius): #If the player is within the enemy's view circle, then he detects the player's motion.
self.alarmedState = True
return True
else:
return False
def AIStop(self): #Function, where the AI bot has to stop moving, and then choose a new direction.
self.curTime += 0.04
if (int(self.curTime)%4 == 0):
self.curTime = 1
self.viewDirection = random.choice([-1,1])
self.viewDirection2 = random.choice([-1,1])
def AIMotion(self,player_x,player_y,enemydx,enemydy,color,ObjList): #The main AI controller function. Controls AI's movement.
if self.detectEnemy(player_x,player_y):
self.color = (255,0,0)
if (enemydx < 0 and player_x < self.x):
self.dx = (enemydx/2.2) - 0.75 #Making the enemy slower than the player, so that the player can escape.
elif (enemydx < 0 and player_x > self.x):
self.dx = -(enemydx/2.2) - 0.75
elif (enemydx > 0 and player_x < self.x):
self.dx = -(enemydx/2.2) - 0.75
elif (enemydx == 0 and player_x < self.x):
self.dx = -1.5
else:
self.dx = (enemydx/2.2) + 0.75
if (enemydy < 0 and player_y < self.y):
self.dy = (enemydy/2.2) - 0.75 #Making the enemy slower than the player, so that the player can escape.
elif (enemydy < 0 and player_y > self.y):
self.dy = -(enemydy/2.2) + 0.75
elif (enemydy > 0 and player_y < self.y):
self.dy = -(enemydy/2.2) - 0.75
elif (enemydy == 0 and player_y < self.y):
self.dy = -1.5
else:
self.dy = (enemydy/2.2) + 0.75
self.lastSeenPosX = player_x
self.lastSeenPosY = player_y
self.lastSeenDx = enemydx/2.2
self.lastSeenDy = enemydy/2.2
if (self.callBackup):
self.CallBackup(ObjList, player_x,player_y,enemydx,enemydy)
if (not self.detectEnemy(player_x,player_y) and int(self.curTime) % self.stopTime!=0):
if (self.alarmedState == True):
self.AISearch()
else:
self.color = color
self.dx = self.viewDirection*random.randrange(0,2)
self.curTime += 0.06
self.dy = self.viewDirection2*random.randrange(0,2)
elif (not self.detectEnemy(player_x,player_y) and int(self.curTime)%self.stopTime == 0):
self.dx = 0
self.dy = 0
self.AIStop()
self.x += self.dx
self.y += self.dy
def AISearch(self):
if (abs(self.x - self.lastSeenPosX)>= 10 and abs(self.y - self.lastSeenPosY) >= 10):
self.color = (255,155,0)
self.x += self.lastSeenDx
self.y += self.lastSeenDy
self.curTime += 0.06
self.alarmedState = False
def CallBackup(self,AIObjList,player_x,player_y,enemydx,enemydy):
for ai in AIObjList:
self.Flag = 1
ai.AIMotion(player_x,player_y,enemydx,enemydy,(255,0,0),AIObjList)
def render(self,window,player_x,player_y,enemydx,enemydy,color,objList):
self.AIMotion(player_x,player_y,enemydx,enemydy,color,objList)
pygame.draw.rect(window, self.color, (self.x, self.y, self.width, self.height))
| {
"repo_name": "djeof-1/Pygame-modules",
"path": "ai.py",
"copies": "1",
"size": "5019",
"license": "mit",
"hash": 2950641978660193000,
"line_mean": 37.9069767442,
"line_max": 217,
"alpha_frac": 0.6359832636,
"autogenerated": false,
"ratio": 2.9368051492100644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40727884128100644,
"avg_score": null,
"num_lines": null
} |
"""Finds a minimum spanning tree for a sparse matrix."""
from collections import namedtuple
class DisjointSet(object):
"""A disjoint set.
This contains a number of subsets of elements. It exposes two main
operations, that being find(), which tells you which set-id an element
resides in, and merge(a, b), which merges the two sets which contain
items a and b."""
def __init__(self, starting_elements):
"""Create new disjoint set with a set for elem in starting_elements."""
super(DisjointSet, self).__init__()
self._sets = [frozenset([e]) for e in starting_elements]
def find(self, element):
"""Return the index of the set which contains this element."""
for set_id in range(0, len(self._sets)):
if element in self._sets[set_id]:
return set_id
raise KeyError("Unable to find element {0} in sets".format(element))
def merge(self, element_a, element_b):
"""Merges the two sets containing element_a and element_b."""
first_set_index = self.find(element_a)
second_set_index = self.find(element_b)
if first_set_index == second_set_index:
raise ValueError("Element {0} is in the same"
" set as {1}".format(element_a, element_b))
merged_set = self._sets[first_set_index] | self._sets[second_set_index]
del_ind = [first_set_index, second_set_index]
self._sets = [i for j, i in enumerate(self._sets) if j not in del_ind]
self._sets.append(merged_set)
Edge = namedtuple("Edge", "vertex_one vertex_two weight")
def minimum_spanning_tree(undirected_graph):
"""Create a minimum spanning tree from undirected_graph.
The undirected_graph takes the form of a compressed sparse matrix, where
each row representes a node and each column represents the weight of a
connection to another node.
So for instance, a graph that looks like this:
(0)--3-(1)---2--(2)--4-(3)
\ / \ /
1 5 7 2
\ / \ /
(4)------4------(5)
\ /
\ /
\ /
\ /
6 9
\ /
\ /
(6)
Will take the following form:
[
[ 0, 3, 0, 0, 1, 0, 0 ],
[ 3, 0, 0, 0, 5, 0, 0 ],
[ 0, 2, 0, 4, 0, 7, 0 ],
[ 0, 0, 4, 0, 0, 2, 0 ],
[ 1, 5, 0, 0, 0, 4, 6 ],
[ 0, 0, 7, 2, 4, 0, 9 ],
[ 0, 0, 0, 0, 6, 9, 0 ]
].
The output will be the minimum_spanning_tree as computed by the Kruskal
algorithm. This is the tree connects all the nodes togther by the minimum
weight.
It works by putting each node into a disjoint set and then sorting each
edge by its weight. An edge is taken if both of its nodes are not part
of the same set. When an edge is taken, both of the sets its nodes belong
to are merged. This prevents cycles.
Once we have all the edges, we then compute a sparse matrix representation
of all the connections between the nodes."""
undirected_graph_len = len(undirected_graph)
# Sanity check
for row in undirected_graph:
assert len(row) == len(undirected_graph[0])
assert len(row) == len(undirected_graph)
edges = []
for node_index in range(0, undirected_graph_len):
for connecting_index in range(0, undirected_graph_len):
distance = undirected_graph[node_index][connecting_index]
if distance != 0:
# Check for inverse edges which might already form a part
# of our edge list.
if not Edge(connecting_index, node_index, distance) in edges:
edges.append(Edge(vertex_one=node_index,
vertex_two=connecting_index,
weight=distance))
disjoint_set_of_nodes = DisjointSet(range(0, undirected_graph_len))
minimum_spanning_tree_edges = []
# Start merging the sets and adding edges as appropriate.
for edge in sorted(edges, key=lambda e: e.weight):
if (disjoint_set_of_nodes.find(edge.vertex_one) !=
disjoint_set_of_nodes.find(edge.vertex_two)):
disjoint_set_of_nodes.merge(edge.vertex_one, edge.vertex_two)
minimum_spanning_tree_edges.append(edge)
mst_csr_row = [0 for i in range(0, undirected_graph_len)]
mst_csr = [mst_csr_row[:] for i in range(0, undirected_graph_len)]
for edge in minimum_spanning_tree_edges:
mst_csr[edge.vertex_one][edge.vertex_two] = edge.weight
return mst_csr
| {
"repo_name": "smspillaz/artificial-intelligence",
"path": "artificialintelligence/mst.py",
"copies": "1",
"size": "4847",
"license": "mit",
"hash": -3644912785757762600,
"line_mean": 34.1231884058,
"line_max": 79,
"alpha_frac": 0.584897875,
"autogenerated": false,
"ratio": 3.742857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767615384922657,
"avg_score": 0.012027926586897102,
"num_lines": 138
} |
"""Loader module."""
from artificialintelligence.mst import minimum_spanning_tree
from collections import namedtuple
import math
MSTNode = namedtuple("MSTNode", "which distance")
TSPNode = namedtuple("TSPNode", "x y")
def _depth_first_search_recurse(current_node, mst_array, traversal, recursion):
"""Expand all the subnodes of this node
We have to discover what those subnodes actually are."""
nodes_to_expand = []
path_part = []
for i in range(0, len(mst_array[current_node])):
if mst_array[current_node][i] != 0 and i not in traversal:
nodes_to_expand.append(MSTNode(i,
mst_array[current_node][i]))
for i in range(0, len(mst_array)):
if mst_array[i][current_node] != 0 and i not in traversal:
nodes_to_expand.append(MSTNode(i,
mst_array[i][current_node]))
for node in sorted(frozenset(nodes_to_expand), key=lambda n: n.distance):
path_part.append(node)
path_part += _depth_first_search_recurse(node.which,
mst_array,
traversal + [node.which],
recursion + 1)
return path_part
def depth_first_search(current_node, mst_array):
"""Perform a depth first search of a minimum spanning tree.
The tree must be in sparse representation."""
return [MSTNode(current_node, 0)] + _depth_first_search_recurse(current_node,
mst_array,
[current_node],
0)
def _find_tsp_path_from_distances(undirected_graph, start_index):
"""Finds a path for an undirected graph in the a csr matrix form.
undirected_graph must be in the following form:
[
[ v(0,0), v(0, 1), v(0, 2), v(0, 3) ],
[ v(1,0), v(1, 1), v(1, 2), v(1, 3) ],
[ v(2,0), v(2, 1), v(2, 2), v(2, 3) ],
[ v(3,0), v(3, 1), v(3, 2), v(3, 3) ]
]
If v(i, j) and v(j, i) are both zero, then there is no connection
between the two nodes. If one of them is nonzero, then the distance
between the two is the highest value of either of them.
So for instance:
[
[ 0, 8, 0, 3 ],
[ 8, 0, 2, 5 ],
[ 0, 2, 0, 6 ],
[ 3, 5, 6, 0 ]
]
MSTNode 0 is connected to node 0 by nothing (eg, cannot connect to itself)
MSTNode 0 is connected to node 1 by 8 (csr[0][1] == csr[1][0] == 8)
MSTNode 0 is connected to node 2 by 0 (no connection)
MSTNode 0 is connected to node 3 by 3 (csr[0][3] == csr[3][0] == 3)
MSTNode 3 is connected to node 0 by 3 (csr[0][3] == csr[3][0] == 3)
MSTNode 3 is connected to node 1 by 5 (csr[1][3] == csr[3][1] == 5)
MSTNode 3 is connected to node 2 by 6 (csr[2][3] == csr[3][2] == 6)
MSTNode 3 is connected to node 3 by zero (cannot connect to itself).
The output graph is in the same form, (but it only shows the minimum
spanning tree and not all the connections).
"""
for i in range(0, len(undirected_graph)):
assert len(undirected_graph[i]) == len(undirected_graph[0])
mst_array = minimum_spanning_tree(undirected_graph)
path = depth_first_search(start_index, mst_array)
return path
def find_tsp_path(cities, start_index):
"""Finds a travelling-salesman-path from a bunch of cities.
First we need to take our list of cities and then construct a sparce-matrix
undirected graph of cities with the distances between each of the vertices
on the graph.
Each city is given an index, and the matrix will be N x N large. Each
column of the matrix is the distance from city i (the row) to city j
(the column index).
A distance of zero means that the city can't be reached from that one.
So for instance, with a list of cities like this, you should get the
following:
- City(0, 3)
- City(0, 1)
- City(1, 1)
[
[ 0, 2, sqrt(pow(2, 2), pow(1, 2)) ],
[ 2, 0, 1 ],
[ sqrt(pow(2, 2), pow(1, 2)), 1, 0 ]
].
After we have found that table, we use _find_tsp_path_from_distances to
find a path from those distances which returns a list of MSTNodes. We
then convert that path into the cities themselves by looking up the
indicies on the nodes in the cities array."""
distance_undirected_graph = []
for city_row_index in range (0, len(cities)):
row_graph = []
for city_col_index in range(0, len(cities)):
if city_col_index == city_row_index:
row_graph.append(0)
else:
reference_city = cities[city_col_index]
target_city = cities[city_row_index]
row_graph.append(math.sqrt(math.pow(target_city.x -
reference_city.x, 2) +
math.pow(target_city.y -
reference_city.y, 2)))
distance_undirected_graph.append(row_graph)
for row in distance_undirected_graph:
assert len(row) == len(distance_undirected_graph)
assert len(row) == len(distance_undirected_graph[0])
mst_path = _find_tsp_path_from_distances(distance_undirected_graph,
start_index)
path = []
for node in mst_path:
path.append(cities[node.which])
return path + [cities[start_index]] | {
"repo_name": "smspillaz/artificial-intelligence",
"path": "artificialintelligence/tsp_mst.py",
"copies": "1",
"size": "5830",
"license": "mit",
"hash": 3930108494755539000,
"line_mean": 34.3393939394,
"line_max": 83,
"alpha_frac": 0.5638078902,
"autogenerated": false,
"ratio": 3.6392009987515603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9698681598408717,
"avg_score": 0.0008654581085686565,
"num_lines": 165
} |
"""Artificial Local Searches"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import logging
import multiprocessing
import threading
import six
from . import base
from .. import agents
logger = logging.getLogger('artificial')
@six.add_metaclass(abc.ABCMeta)
class Local(base.SearchBase):
"""Base Local Search.
Base class for HillClimbing and LocalBeam searches.
Parameters
----------
strategy : ('default'|'steepest-ascent')
Defines the climbing policy.
Options are:
--- 'classic' : first child that improves utility is chosen.
--- 'steepest-ascent' : child that provides greatest
utility improvement is chosen.
restart_limit : int
Define maximum number of random-restarts.
If `1`, classic HillClimbing is performed and no restarts occur.
If limit is passed and it's greater than 1, the agent will restart
`i` times before returning a solution.
"""
def __init__(self, agent, root=None,
strategy='steepest-ascent', restart_limit=1):
super(Local, self).__init__(agent=agent, root=root)
assert isinstance(agent, agents.UtilityBasedAgent), \
'Local searches require an utility based agent.'
self.strategy = strategy
self.restart_limit = restart_limit
class HillClimbing(Local):
"""Hill Climbing Search.
Perform Hill Climbing search according to a designated strategy.
"""
def search(self):
self.solution_candidate_ = self.root
strategy_is_classic = self.strategy == 'classic'
current = self.root
iteration, limit = 0, self.restart_limit or 1
utility = self.agent.utility
for iteration in range(limit):
logger.info('hill-climbing (attempt p#%i)', iteration)
stalled = False
if current is None:
current = self.agent.environment.state_class_.random()
while not stalled:
children = self.agent.predict(current)
stalled = True
for child in children:
if utility(child) > utility(current):
current = child
stalled = False
# Classic strategy always takes the first
# child that improves utility.
if strategy_is_classic: break
if (not self.solution_candidate_ or
utility(current) > utility(self.solution_candidate_)):
# We've just found a better solution!
logger.info('solution candidate found: %s', current)
self.solution_candidate_ = current
# Force random restart.
current = None
return self
class LocalBeam(Local):
"""Local Beam.
Parameters
----------
k : ['auto'|int] (default='auto')
The number of beams to keep track of. If value is `auto`,
then the number of beams is inferred from the number of processors
available.
strategy : ('default'|'steepest-ascent')
Defines the climbing policy.
Options are:
--- 'classic' : first child that improves utility is choosen.
--- 'steepest-ascent' : child that provides greatest
utility improvement is choosen.
restart_limit : int
Define maximum number of random-restarts.
If `1`, classic HillClimbing is performed and no restarts occur.
If limit is passed and it's greater than 1, the agent will restart
`i` times before returning a solution.
"""
class Beam(threading.Thread):
def __init__(self, manager):
super(LocalBeam.Beam, self).__init__()
self.manager = manager
self.hill_climber = HillClimbing(agent=manager.agent,
strategy=manager.strategy)
def run(self):
it, limit = 0, self.manager.restart_limit or 1
while it < limit:
it += 1
state = self.hill_climber.search().solution_candidate_
with self.manager._solution_update_lock:
if (not self.manager.solution_candidate_ or
self.manager.agent.utility(state) >
self.manager.agent.utility(
self.manager.solution_candidate_)):
self.manager.solution_candidate_ = state
def __init__(self, agent, root=None, k='auto',
strategy='steepest-ascent', restart_limit=1):
super(LocalBeam, self).__init__(agent=agent,
root=root,
strategy=strategy,
restart_limit=restart_limit)
self.k = k
self.beams = None
self._solution_update_lock = threading.Lock()
def restart(self, root):
super(LocalBeam, self).restart(root=root)
self.beams = None
return self
def search(self):
self.solution_candidate_ = self.solution_path_ = None
if self.k == 'auto':
k = multiprocessing.cpu_count()
elif isinstance(self.k, int):
k = self.k
else:
raise ValueError('Unknown value for k (%s)' % str(self.k))
self.beams = [self.Beam(self) for _ in range(k)]
for beam in self.beams: beam.start()
for beam in self.beams: beam.join()
return self
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/local.py",
"copies": "1",
"size": "5653",
"license": "mit",
"hash": 8607761631317621000,
"line_mean": 29.722826087,
"line_max": 74,
"alpha_frac": 0.5611179904,
"autogenerated": false,
"ratio": 4.437205651491365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5498323641891365,
"avg_score": null,
"num_lines": null
} |
"""Artificial Local Searches Test"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
from unittest import TestCase
import numpy as np
from artificial import agents, base
from artificial.searches.local import HillClimbing, LocalBeam
random_state = np.random.RandomState(0)
class _TState(base.State):
@property
def is_goal(self):
return self.data == 100
def h(self):
return abs(self.data - 100)
@classmethod
def random(cls):
return cls(random_state.randint(-1000, 1000))
class _TestEnvironment(base.Environment):
state_class_ = _TState
def update(self):
pass
class _UtilityTestAgent(agents.UtilityBasedAgent):
def predict(self, state):
children = [_TState(data=state.data + i - 5,
action=0 if i < 5 else 2 if i == 5 else 1)
for i in range(10)]
return children
class HillClimbingTest(TestCase):
def setUp(self):
self.env = _TestEnvironment(_TState(0))
self.agent = _UtilityTestAgent(HillClimbing, self.env, actions=None)
def test_sanity(self):
with self.assertRaises(AssertionError):
HillClimbing(agent=None, root=_TState(10))
s = HillClimbing(agent=self.agent, root=_TState(10))
self.assertIsNotNone(s)
def test_search(self):
s = (HillClimbing(agent=self.agent)
.restart(_TState(0))
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
def test_classic_strategy(self):
s = (HillClimbing(agent=self.agent, strategy='classic')
.restart(_TState(0))
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
def test_random_restart(self):
s = (HillClimbing(agent=self.agent, restart_limit=2)
.restart(_TState(0))
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
class LocalBeamTest(TestCase):
def setUp(self):
self.env = _TestEnvironment(_TState(0))
self.agent = _UtilityTestAgent(LocalBeam, self.env, actions=None)
def test_sanity(self):
with self.assertRaises(AssertionError):
LocalBeam(agent=None, root=_TState(10), k=2)
with self.assertRaises(ValueError):
(LocalBeam(agent=self.agent, root=_TState(10), k='invalid')
.search())
s = LocalBeam(agent=self.agent, root=_TState(10), k=2)
self.assertIsNotNone(s)
def test_search(self):
s = (LocalBeam(agent=self.agent, root=_TState(0))
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
def test_classic_strategy(self):
s = (LocalBeam(agent=self.agent, root=self.env.current_state,
strategy='classic', k=2)
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
def test_random_restart(self):
s = (LocalBeam(agent=self.agent, root=self.env.current_state,
k=2, restart_limit=2)
.search())
self.assertTrue(s.solution_candidate_.is_goal,
str(s.solution_candidate_))
self.assertEqual(s.solution_candidate_.data, 100)
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/tests/local_test.py",
"copies": "1",
"size": "3780",
"license": "mit",
"hash": 8253533682902146000,
"line_mean": 29.24,
"line_max": 76,
"alpha_frac": 0.5997354497,
"autogenerated": false,
"ratio": 3.720472440944882,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9820207890644882,
"avg_score": 0,
"num_lines": 125
} |
# Artificially bump priority for these packages.
QMANAGER_PRIORITY_PACKAGES = ""
# Port status codes
PENDING = 1 # Yet to build
PHASE2 = 2 # Failed once
class Package:
def __init__(self, name, path, prefix, comment, descr, maintainer,
cats, www):
__slots__ = ["name", "path", "prefix", "comment", "descr",
"maintainer", "www", "bdep", "rdep", "edep", "pdep",
"fdep", "alldep", "parents", "depth", "categories"]
self.name = name
self.path = path
self.prefix = prefix
self.comment = comment
self.descr = descr
self.maintainer = maintainer
self.categories = cats
self.www = www
# Populated later
self.fdep = []
self.edep = []
self.bdep = []
self.rdep = []
self.pdep = []
self.alldep = []
self.parents = []
self.status = PENDING
self.attempts = 0
# Whether the package build has completed and is hanging around
# to resolve dependencies for others XXX use status
self.done = False
# Depth is the maximum length of the dependency chain of this port
self.depth = None
def set_depth_recursive(self):
"""Recursively populate the depth tree up from a given package
through dependencies, assuming empty values on entries not yet
visited."""
if self.depth is None:
if len(self.parents) > 0:
max = 0
for i in self.parents:
w = i.set_depth_recursive()
if w > max:
max = w
self.depth = max + 1
else:
self.depth = 1
for port in QMANAGER_PRIORITY_PACKAGES:
if self.name.startswith(port):
# Artificial boost to try and get it building earlier
self.depth = 100
return self.depth
def remove(self):
""" Clean ourselves up but don't touch references in other objects;
they still need to know about us as dependencies etc """
self.fdep = None
self.edep = None
self.pdep = None
self.bdep = None
self.rdep = None
self.alldep = None
self.parents = None
def destroy(self):
""" Remove a package and all references to it """
for pkg in self.alldep:
if pkg.parents is not None:
# Already removed but not destroyed
try:
pkg.parents.remove(self)
except ValueError:
continue
# Remove references to current package in all dependents.
for pkg in self.parents:
try:
pkg.fdep.remove(self)
except ValueError:
pass
try:
pkg.edep.remove(self)
except ValueError:
pass
try:
pkg.pdep.remove(self)
except ValueError:
pass
try:
pkg.bdep.remove(self)
except ValueError:
pass
try:
pkg.rdep.remove(self)
except ValueError:
pass
pkg.alldep.remove(self)
self.remove()
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab
| {
"repo_name": "flz/portbuild-ng",
"path": "lib/portbuild/package.py",
"copies": "1",
"size": "2940",
"license": "bsd-2-clause",
"hash": -98818582708430270,
"line_mean": 25.017699115,
"line_max": 71,
"alpha_frac": 0.5931972789,
"autogenerated": false,
"ratio": 3.9622641509433962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9992274697589831,
"avg_score": 0.012637346450713105,
"num_lines": 113
} |
#Artificial Neural Network
#includes
import configparser
import math
import matplotlib.pyplot as plt
import numpy as np
import random
from decimal import *
#global variables
weights = [];
topology = [];
data_training = [];
data_test = [];
learning_rate = 0;
weight_min = 0;
weight_max = 0;
error_terms = [];
outputs = [];
result_offset = 4.5;
partition_num = 0;
partition_size = 0;
data_sets = [];
scalings = [{'min' : 1.5*(10**9), 'max' : 2.5*(10**9)},
{'min' : 1.5*(10**8), 'max' : 4.5*(10**8)},
{'min' : 0, 'max' : 150}];
def read_config():
global partition_num;
global learning_rate;
global weight_min;
global weight_max;
global iteration_num;
config = configparser.ConfigParser();
config.read("config.txt");
temp = config["general"]["topology"];
temp = temp.split(",");
for s in temp:
topology.append(int(s));
learning_rate = float(config['general']['learning_rate']);
weight_min = float(config['general']['weight_min']);
weight_max = float(config['general']['weight_max']);
partition_num = int(config['general']['partition_num']);
def read_input():
read_config();
def print_weights():
print("***** WEIGHTS *****");
for i in range(0, len(weights)):
print("Layer 0 (" + str(topology[i]) + " -> " + str(topology[i+1]) + "):");
print("---------------");
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
print("%.6f " % weights[i][j][k], end="");
print();
print("---------------");
print();
def fill_dummy_weights():
w = 0.1;
for i in range(0, len(weights)):
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
weights[i][j][k] = w;
w = w + 0.1;
def fill_random_weights(min_limit, max_limit):
for i in range(0, len(weights)):
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
weights[i][j][k] = random.uniform(min_limit, max_limit);
def init_weights():
for i in range(0, len(topology)-1):
weights.append([]);
for j in range(0, topology[i+1]):
weights[i].append([]);
for k in range(0, topology[i]):
weights[i][j].append(0);
weights[i][j].append(0);
def init_error_terms():
for layer in range(0, len(topology)):
error_terms.append([]);
for row in range(0, topology[layer]):
error_terms[layer].append(0);
def init_outputs():
for layer in range(0, len(topology)):
outputs.append([]);
for row in range(0, topology[layer]):
outputs[layer].append(0);
def plot_sigmoid():
x_list = np.arange(-8, 8, 0.1);
y_list = [];
for x in x_list:
y_list.append(sigmoid(x));
plt.plot(x_list, y_list);
plt.show();
def sigmoid(x):
#avoid overflow fuckups
if x < -100:
x = -100;
res = 1/(1+(math.exp(-x)));
return res;
def output_function(x):
return sigmoid(x) + 4.5;
def calculate_output(input_sample):
return output_function(calculate_net(len(topology)-1, 0, input_sample));
def print_nets(input_sample):
print("***** NETS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.2f " % calculate_net(layer, row, input_sample), end = "");
print();
print();
def print_outputs():
print("***** OUTPUTS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.20f " % outputs[layer] [row], end = "");
print();
print();
def print_error_terms():
print("***** ERROR TERMS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.6f " % error_terms[layer] [row], end = "");
print();
print();
def read_input():
file = open("Data_Training.txt");
file_lines = file.readlines();
file.close();
for line in file_lines:
temp = line.split();
data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];
data_sample_numbers = [];
for s in data_sample_strings:
data_sample_numbers.append(float(s));
data_training.append(data_sample_numbers);
file = open("Data_Test.txt");
file_lines = file.readlines();
file.close();
for line in file_lines:
temp = line.split();
data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];
data_sample_numbers = [];
for s in data_sample_strings:
data_sample_numbers.append(float(s));
data_test.append(data_sample_numbers);
random.shuffle(data_training);
def partition_data():
global partition_size;
partition_size = math.floor(len(data_training)/partition_num);
print("Total data: " + str(len(data_training)));
print("Partition size: " + str(partition_size));
for i in range(0, partition_size*partition_num, partition_size):
data_sets.append(data_training[i:(i+partition_size)]);
def examine_input():
a = [];
b = [];
c = [];
d = [];
for data_sample in data_training:
a.append(data_sample[0]);
b.append(data_sample[1]);
c.append(data_sample[2]);
d.append(data_sample[3]);
exit();
def scale_training_data():
for data_sample in data_training:
for i in range(0, topology[0]):
data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);
def scale_test_data():
for data_sample in data_test:
for i in range(0, topology[0]):
data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);
def scale_data():
scale_training_data();
scale_test_data();
def init():
read_config();
read_input();
scale_data();
init_weights();
fill_random_weights(weight_min, weight_max);
init_error_terms();
init_outputs();
partition_data();
def calculate_output_error_term(target_output, calculated_output):
return (target_output - calculated_output) * calculated_output * (1 - calculated_output);
def calculate_net(layer, row):
result = 0;
for i in range(0, topology[layer-1]):
result = result + outputs[layer-1][i] * weights[layer-1][row][i];
result = result + (1 * weights[layer-1][row][-1]);
return result;
def calculate_outputs(input_sample):
for input_node in range(0, topology[0]):
outputs[0][input_node] = input_sample[input_node];
for layer in range(1, len(topology)):
for row in range(0, topology[layer]):
outputs[layer][row] = sigmoid(calculate_net(layer, row));
def calculate_error_term(layer, row):
result = 0;
for row_from_next_layer in range(0, topology[layer+1]):
result = result + error_terms[layer+1][row_from_next_layer] * weights[layer][row_from_next_layer][row];
result = result * outputs[layer][row] * (1 - outputs[layer][row]);
return result
def calculate_error_terms(target_output):
error_terms[-1][0] = calculate_output_error_term(target_output, outputs[-1][0]);
for layer in reversed(range(1, len(topology)-1)):
for row in range(0, topology[layer]):
error_terms[layer][row] = calculate_error_term(layer, row);
def update_weights():
for layer in range(0, len(topology)-1):
for destination_row in range(0, topology[layer+1]):
for source_row in range(0, topology[layer]):
delta_weight = learning_rate * error_terms[layer+1][destination_row] * outputs[layer][source_row];
weights[layer][destination_row][source_row] = weights[layer][destination_row][source_row] + delta_weight;
weights[layer][destination_row][-1] = weights[layer][destination_row][-1] + learning_rate * error_terms[layer+1][destination_row] * 1;
def iterate_once(data_list):
squared_errors = [];
for data_sample in data_list:
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
squared_errors.append((target_result - outputs[-1][0])**2);
calculate_error_terms(target_result);
update_weights();
mean_squared_error = sum(squared_errors)/float(len(squared_errors));
return mean_squared_error;
def temp_test():
data_sample = data_training[0];
print_weights();
for i in range(0, 10000):
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
calculate_error_terms(target_result);
update_weights();
print_weights();
def get_mean_error(data_list):
squared_errors = [];
for data_sample in data_list:
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
squared_errors.append((target_result - outputs[-1][0])**2);
calculate_error_terms(target_result);
mean_squared_error = sum(squared_errors)/float(len(squared_errors));
return mean_squared_error;
def calculate_iteration_num(training, validation):
fill_random_weights(weight_min, weight_max);
error_old = get_mean_error(validation);
consecutive_worse_num = 0;
iterations = 0;
while True:
iterate_once(training);
iterations = iterations + 1;
error_new = get_mean_error(validation);
#print("Iteration = " + str(iterations) + ", error = " + str(error_new));
if error_new > error_old:
consecutive_worse_num = consecutive_worse_num + 1;
if consecutive_worse_num == 10:
break;
else:
consecutive_worse_num = 0;
error_old = error_new;
return iterations;
def train_network(number_of_iterations):
errors = []
for i in range(0, number_of_iterations):
errors.append(iterate_once(data_training));
return errors;
def estimate_iteration_num():
best_iterations = [];
for i in range(0, partition_num):
validation = data_training[ (i*partition_size) : ((i+1)*partition_size) ];
if i == 0:
training = data_training[ (i+1)*partition_size : partition_num*partition_size ];
if i == (partition_num-1):
training = data_training[0:partition_size*(partition_num-1)];
else:
training = data_training[0:i*partition_size] + data_training[(i+1)*partition_size:partition_num*partition_size];
#print("Training = " + str(training));
#print("Validation = " + str(validation));
print("Performing K-fold cross validation... %2d%%" % int(i*100*partition_size/(partition_num*partition_size)));
iteration_number = calculate_iteration_num(training, validation);
best_iterations.append(iteration_number);
average_iterations = int(sum(best_iterations)/len(best_iterations));
print("Best iterations:" + str(best_iterations));
print("Average best iterations: " + str(average_iterations));
return average_iterations;
def estimate_and_train():
global weights;
all_errors = [];
errors = [];
weight_sets = [];
number_of_iterations = estimate_iteration_num();
for i in range(0, 10):
print("Running training network, cycle " + str(i));
fill_random_weights(weight_min, weight_max);
all_errors.append(train_network(number_of_iterations));
errors.append(get_mean_error(data_training));
print("Error on whole training data set: " + str(errors[-1]));
weight_sets.append(weights);
weights = weight_sets[errors.index(min(errors))];
plt.plot(all_errors[errors.index(min(errors))]);
plt.show()
test_error = get_mean_error(data_test);
print("Test data error is " + str(test_error));
#main
init();
estimate_and_train();
| {
"repo_name": "mishless/LearningSystems",
"path": "a1/ANN.py",
"copies": "1",
"size": "12339",
"license": "mit",
"hash": 835051681647635500,
"line_mean": 30.6384615385,
"line_max": 146,
"alpha_frac": 0.5779236567,
"autogenerated": false,
"ratio": 3.536543422184007,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4614467078884007,
"avg_score": null,
"num_lines": null
} |
"""Artificial Searches Base"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import six
from artificial import agents
@six.add_metaclass(abc.ABCMeta)
class SearchBase(object):
"""Search Base Template.
Defines the basic contract shared between search algorithms,
including agent, space and root properties.
Parameters
----------
agent : Agent-like
Agent that requested search. This is needed as only an agent
of the specific domain problem can predict outcomes based on
their own actions.
root : State-like
The state in which the search should start.
Attributes
----------
space_ : set
A set used to contain states and efficient repetition checking.
solution_candidate_ : State-like
A State's subclass found when performing the search.
solution_path_ : list
A list of intermediate states to achieve the goal state.
This attribute is undefined when `backtracks` parameter
is set to False.
"""
def __init__(self, agent, root=None):
assert isinstance(agent, agents.GoalBasedAgent), \
'First Search requires an goal based agent.'
self.a = self.agent = agent
self.root = self.solution_candidate_ = self.solution_path_ = None
self.space_ = set()
self.restart(root)
def restart(self, root):
self.root = root
self.space_ = {root} if root else set()
self.solution_candidate_ = None
self.solution_path_ = None
return self
@abc.abstractmethod
def search(self):
"""Search for solution candidate.
This method should set the `solution_candidate` property
to the State-like object found by the search at hand and, finally,
return `self` object.
"""
def backtrack(self):
"""Backtrack answer.
For problems where the path to the solution matters, users can call
this method to backtrack the solution candidate found and set the
`solution_path` property.
IMPORTANT: this method should always come after searching (the call
for `search` method), as only there `solution_candidate`
property is set.
"""
state_sequence = []
state = self.solution_candidate_
if state is None:
raise RuntimeError('Cannot backtrack a nonexistent state. You are '
'most likely backtracking before searching, '
'which is illegal.')
while state:
state_sequence.insert(0, state)
state = state.parent
self.solution_path_ = state_sequence
return self
def solution_path_as_action_list(self):
"""Build a list of actions from the solution candidate path.
IMPORTANT: this method must always be executed after `backtrack` call,
as only there `solution_path` is set.
Returns
-------
actions : array-like, a list containing the actions performed
by the sates in `solution_path`.
"""
return ([s.action for s in self.solution_path_[1:]]
if self.solution_path_
else None)
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/searches/base.py",
"copies": "1",
"size": "3321",
"license": "mit",
"hash": 991477915225280000,
"line_mean": 27.6293103448,
"line_max": 79,
"alpha_frac": 0.6067449563,
"autogenerated": false,
"ratio": 4.6840620592383635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 116
} |
"""Artificial States"""
# Author: Lucas David -- <ld492@drexel.edu>
# License: MIT (c) 2016
import abc
import copy as copy_lib
import six
class State(object):
"""State.
Keeps track of episodic updates in the environment, maintaining
a sequence through a recursive reference to parent states.
Attributes
----------
computed_utility_ : float
The utility of a state which was already computed once.
The attribute is then set, preventing unnecessary work
to be done when utility is called multiple times and
it's an expensive job (e.g., LocalBeam, GeneticAlgorithm
searches).
Examples
--------
Note: these are merely toy examples on how to represent problems using
`State`. You will most likely want to extend `State` class and override
`is_goal` and `h` methods.
# 1. Romania Routing Problem.
State(0)
# 2. Dirt Cleaner Problem.
# The first four elements in the list represent if the sector
# is dirt or not. The last one contains the agent's current position.
State([1, 1, 1, 1, 0])
"""
def __init__(self, data, parent=None, action=None, g=0):
self.data = data
self.parent = parent
self.action = action
self.g = g
self.computed_utility_ = None
@property
def is_goal(self):
"""Checks if `State` object is the environment's goal.
Some problems involve searching if a state is the agent's goal or
the environment's (i.e. global) goal.
By default, `GoalBasedAgent.is_goal` property is exactly the state's
`is_goal` property. Hence this must be overridden according to the
problem at hand.
"""
return False
def h(self):
"""Heuristic Function.
An heuristic function is used by some searches, such as `GreedyFirst`
and `AStar`, in an attempt to decrease the process' time and memory
requirements.
"""
return 0
def f(self):
"""F Function.
A sum of the local cost and the heuristic function.
"""
return self.g + self.h()
def mitosis(self, parenting=True, copy=True, **mutation):
"""Nuclear division of current state into a new one.
Parameters
----------
parenting : bool (default=True)
Define clone parent as :self if true. parent will be None,
otherwise.
copy : bool (default=True)
Deeply copy data if True. Otherwise, data is simply transfered to
the newly divided `State`.
mutation : dict
Attributes which should mutate, as well as their mutated values.
Notes
-----
By default, divisions create a child s.t. `child.g = parent.g + 1`.
This can be overridden by simply passing the parameter g in `mutation`
dict.
Returns
-------
The clone made.
"""
if 'g' not in mutation:
mutation['g'] = self.g + 1
# self.__class__ is used here instead of `State` directly,
# as we want to keep the specified class implemented by the user.
return self.__class__(
data=copy_lib.deepcopy(self.data) if copy else self.data,
parent=self if parenting else None,
**mutation
)
def __eq__(self, other):
return isinstance(other, State) and self.data == other.data
def __hash__(self):
try:
return hash(self.data)
except TypeError:
# Attempts to convert data to str first,
# as strings are always hashable.
return hash(str(self.data))
def __str__(self):
if self.action is None:
return ('data: %s, g: %d' % (str(self.data), self.g))
return ('data: %s, action: %s, g: %d'
% (str(self.data), self.action, self.g))
@classmethod
def random(cls):
"""Generate Random State.
A class method that generates a random state. This is useful for
optimization problems or genetic algorithm searches, where the
*solution path* is not important (nor the starting point),
in opposite to the final state itself.
Notes
-----
Searches that allow random restart, (e.g.: `HillClimbing`) might
require a valid implementation of this method.
"""
raise NotImplementedError
class GeneticState(State):
"""Genetic State.
State abstraction for searches using `GeneticAlgorithm` class.
"""
def cross(self, other):
"""The cross operator that produces new individuals from two
existing ones.
Parameters
----------
other : GeneticState-like object.
Returns
-------
GeneticState-like object. The offspring.
"""
raise NotImplementedError
def mutate(self, factor, probability):
"""The mutation operator that changes the current individual.
Parameters
----------
factor : the factor which scales the area affected of a mutation.
probability : the probability of a gene to mutate.
Returns
-------
Always return self!
"""
raise NotImplementedError
| {
"repo_name": "lucasdavid/artificial",
"path": "artificial/base/state.py",
"copies": "1",
"size": "5321",
"license": "mit",
"hash": 4986048970346554000,
"line_mean": 26.8586387435,
"line_max": 78,
"alpha_frac": 0.5916181169,
"autogenerated": false,
"ratio": 4.441569282136895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5533187399036894,
"avg_score": null,
"num_lines": null
} |
# artificialturf - emulation of greenlet using threads
# For performance reasons, this should be rewritten to use Java once expose is available
import sys
import threading
from collections import namedtuple
from java.util.concurrent import ArrayBlockingQueue
class error(Exception):
pass
class GreenletExit(Exception):
pass
ROOT = object()
print "root", ROOT
GreenletArgs = namedtuple("GreenletArgs", ["args", "kwargs"])
GreenletException = namedtuple("GreenletException", ["typ", "val", "tb"])
def getcurrent():
# This should not be called when the root greenlet is initialized
print "getcurrent: thread local context", context._current
return context._current
def _handle_result(result, applicable=False):
# This fairly complex logic models the C implementation
if isinstance(result, GreenletArgs):
if applicable:
return result.args, result.kwargs
if result.args and result.kwargs:
return result.args, result.kwargs
elif result.args:
return result.args
else:
return result.kwargs
elif isinstance(result, GreenletException):
if isinstance(result.typ, GreenletExit):
return None
else:
raise result.typ, result.val, result.tb
else:
raise AssertionException("Not valid mailbox result for greenlet")
class greenlet(object):
def __init__(self, run=None, parent=None):
self.run = run
if parent is ROOT:
self.parent = None
elif parent is not None:
self.parent = parent
else:
parent = getcurrent()
print "Setting parent", parent
self.parent = parent
self._frame = self._mailbox = None
self._thread = threading.current_thread() # temp FIXME
print "Set the parent {} {}".format(self, self.parent)
# Top user frame of this greenlet; per the normal C
# implementation of greenlet, this is only available during
# the execution of the greenlet - not when it's not running
self._frame = None
# Mailbox is used in this code to highlight that it's a specialized
# queue of length 1, used for the specific synchronization model of
# greenlet.switch
self._mailbox = ArrayBlockingQueue(1)
# Set up thread for the actual emulation. This could be a
# lightweight thread, such as provided by Quasar
if self.parent is None:
# Special case root greenlets
self._thread = threading.current_thread()
else:
self._thread = threading.Thread(target=self._wrapper)
self._thread.setDaemon(True) # greenlets don't block exit; FIXME apparently daemon=True doesn't yet work on Jython
self._thread.start() # the wrapper will immediately block on its mailbox
print "Initialized greenlet {}".format(self)
def __str__(self):
if self.parent is None:
parent_id = None
else:
parent_id = "{:#x}".format(id(self.parent))
return "<greenlet id={:#x}, parent={}, frame={}, mailbox={}, thread={} daemon={}>".format(
id(self), parent_id, self._frame, self._mailbox, self._thread.name, self._thread.isDaemon())
__repr__ = __str__
def _propagate(self, *args, **kwargs):
print "In switch to parent for {} from {}".format(self, context._current)
self._mailbox.add(GreenletArgs(args, kwargs))
def switch(self, *args, **kwargs):
# Using add ensures that we will quickly fail if multiple greenlets
# switch to the same one. Should not happen in actual greenlets,
# and presumably the user-directed scheduling of switch should ensure
# the same for this emulation
print "In switch for {} from {}".format(self, context._current)
self._mailbox.add(GreenletArgs(args, kwargs))
self._frame = sys._getframe(-1) # caller
try:
print "Waiting on mailbox from switched away greenlet {}".format(context._current)
result = _handle_result(context._current._mailbox.take())
print "Completed waiting on mailbox from switched away greenlet {} result={} thread={}".format(context._current, result, threading.current_thread())
return result
finally:
self._frame = None
def throw(self, *args):
if len(args == 0):
self._mailbox.add(GreenletException(GreenletExit(), None, None))
else:
self._mailbox.add(GreenletException(
args[0], args[1] if len(args) > 1 else None, args[2] if len(args) > 2 else None))
self._frame = sys._getframe(-1) # caller
try:
return _handle_result(context._current._mailbox.take())
finally:
self._frame = None
@property
def dead(self):
return not self._thread.is_alive()
@property
def gr_frame(self):
return self._frame
def __nonzero__(self):
# NB: defining this method makes for tests to be more interesting than usual;
# always need to compare a greenlet is None instead of if greenlet!
return self._thread.is_alive() and not hasattr(self, "run")
def _wrapper(self):
# Now that this thread is started, we need to be prepared to
# be immediately switched to it on a subsequent scheduling
# (all user directed of course)
context._current = self
args, kwargs = _handle_result(self._mailbox.take(), applicable=True)
# per the greenlet docs, the run attribute must be deleted
# once the greenlet starts running
run = self.run
del self.run
if run:
print "Running greenlet thread {} self={} run={} args={} kwargs={}".format(self._thread.name, self, run, args, kwargs)
result = run(*args, **kwargs)
print "Completed greenlet thread {}".format(self._thread.name)
# Switch up the parent hierarchy
if self.parent is not None:
print "Switching to parent={} result={} from context={}".format(self.parent, result, context._current)
self.parent._propagate(result)
print "Completed greenlet {}".format(self)
def __del__(self):
self.throw()
# Consider two greenlets, which we will name alice and bob. Some code
# is running in the context of alice, then it calls
# bob.switch(...). This code has a reference to the bob greenlet,
# because of the user-directed and explicit scheduling in the greenlet
# model. But it needs to retrieve the alice context. As we usually do
# in such cases, we model this context with a thread local.
context = threading.local()
# Subsequent greenlet calls in this thread should get this parent;
# but what about other threads? FIXME
context._current = greenlet(run=None, parent=ROOT)
print "context._current", context._current
| {
"repo_name": "jythontools/artificialturf",
"path": "greenlet/__init__.py",
"copies": "1",
"size": "6969",
"license": "apache-2.0",
"hash": -6973187422876367000,
"line_mean": 36.2673796791,
"line_max": 161,
"alpha_frac": 0.6353852777,
"autogenerated": false,
"ratio": 4.3312616532007455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036671564708419258,
"num_lines": 187
} |
# Artillery Gunner Game
# Chapter 12
# MyLibrary.py
import sys, time, random, math, pygame
from pygame.locals import *
# calculates distance between two points
def distance(point1, point2):
delta_x = point1.x - point2.x
delta_y = point1.y - point2.y
dist = math.sqrt(delta_x * delta_x + delta_y * delta_y)
return dist
# calculates velocity of an angle
def angular_velocity(angle):
vel = Point(0, 0)
vel.x = math.cos(math.radians(angle))
vel.y = math.sin(math.radians(angle))
return vel
# calculates angle between two points
def target_angle(x1, y1, x2, y2):
delta_x = x2 - x1
delta_y = y2 - y1
angle_radians = math.atan2(delta_y, delta_x)
angle_degrees = math.degrees(angle_radians)
return angle_degrees
# wraps a degree angle at boundary
def wrap_angle(angle):
return abs(angle % 360)
# prints text using the supplied font
def print_text(font, x, y, text, color=(255, 255, 255)):
imgText = font.render(text, True, color)
screen = pygame.display.get_surface()
screen.blit(imgText, (x, y))
# MySprite class extends pygame.sprite.Sprite
class MySprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.master_image = None
self.frame = 0
self.old_frame = -1
self.frame_width = 1
self.frame_height = 1
self.first_frame = 0
self.last_frame = 0
self.columns = 1
self.last_time = 0
self.direction = 0
self.velocity = Point(0.0, 0.0)
self.rotation = 0.0 # degrees #added
self.old_rotation = 0.0 # added
# X property
def _getx(self):
return self.rect.x
def _setx(self, value):
self.rect.x = value
X = property(_getx, _setx)
# Y property
def _gety(self):
return self.rect.y
def _sety(self, value):
self.rect.y = value
Y = property(_gety, _sety)
# position property
def _getpos(self):
return self.rect.topleft
def _setpos(self, pos):
self.rect.topleft = pos
position = property(_getpos, _setpos)
def load(self, filename, width=0, height=0, columns=1):
self.master_image = pygame.image.load(filename).convert_alpha()
self.set_image(self.master_image, width, height, columns)
def set_image(self, image, width=0, height=0, columns=1):
self.master_image = image
if width == 0 and height == 0:
self.frame_width = image.get_width()
self.frame_height = image.get_height()
else:
self.frame_width = width
self.frame_height = height
rect = self.master_image.get_rect()
self.last_frame = (rect.width // width) * (rect.height // height) - 1
self.rect = Rect(0, 0, self.frame_width, self.frame_height)
self.columns = columns
def update(self, current_time, rate=30):
if self.last_frame > self.first_frame:
# update animation frame number
if current_time > self.last_time + rate:
self.frame += 1
if self.frame > self.last_frame:
self.frame = self.first_frame
self.last_time = current_time
else:
self.frame = self.first_frame
# build current frame only if it changed
frame_x = (self.frame % self.columns) * self.frame_width
frame_y = (self.frame // self.columns) * self.frame_height
rect = Rect(frame_x, frame_y, self.frame_width, self.frame_height)
self.image = self.master_image.subsurface(rect)
self.old_frame = self.frame
# this is only used when bypassing Group
def draw(self, surface):
surface.blit(self.image, (self.X, self.Y))
def __str__(self):
return str(self.frame) + "," + str(self.first_frame) + \
"," + str(self.last_frame) + "," + str(self.frame_width) + \
"," + str(self.frame_height) + "," + str(self.columns) + \
"," + str(self.rect)
# Point class
class Point(object):
def __init__(self, x, y):
self.__x = x
self.__y = y
# X property
def getx(self): return self.__x
def setx(self, x): self.__x = x
x = property(getx, setx)
# Y property
def gety(self): return self.__y
def sety(self, y): self.__y = y
y = property(gety, sety)
def __str__(self):
return "{X:" + "{:.0f}".format(self.__x, 2) + \
",Y:" + "{:.0f}".format(self.__y) + "}"
class Terrain():
def __init__(self, min_height, max_height, total_points):
self.min_height = min_height
self.max_height = max_height
self.total_points = total_points + 1
self.grid_size = 800 / total_points
self.height_map = list()
self.generate()
def generate(self):
# clear list
if len(self.height_map) > 0:
for n in range(self.total_points):
self.height_map.pop()
# first point
last_x = 0
last_height = (self.max_height + self.min_height) / 2
self.height_map.append(last_height)
direction = 1
run_length = 0
# remaining points
for n in range(1, self.total_points):
rand_dist = random.randint(1, 10) * direction
height = last_height + rand_dist
self.height_map.append(int(height))
if height < self.min_height:
direction = -1
elif height > self.max_height:
direction = 1
last_height = height
if run_length <= 0:
run_length = random.randint(1, 3)
direction = random.randint(1, 2)
if direction == 2: direction = -1
else:
run_length -= 1
def get_height(self, x):
x_point = int(x / self.grid_size)
return self.height_map[x_point]
def draw(self, surface):
last_x = 0
for n in range(1, self.total_points):
# draw circle at current point
height = 600 - self.height_map[n]
x_pos = int(n * self.grid_size)
pos = (x_pos, height)
color = (255, 255, 255)
# pygame.draw.circle(surface, color, pos, 4, 1)
if n == grid_point:
pygame.draw.circle(surface, (0, 255, 0), pos, 4, 0)
# draw line from previous point
last_height = 600 - self.height_map[n - 1]
last_pos = (last_x, last_height)
pygame.draw.line(surface, color, last_pos, pos, 2)
last_x = x_pos
# this function initializes the game
def game_init():
global screen, backbuffer, font, timer, crosshair, crosshair_group, terrain
pygame.init()
screen = pygame.display.set_mode((800, 600))
backbuffer = pygame.Surface((800, 600))
pygame.display.set_caption("Artillery Gunner Game")
font = pygame.font.Font(None, 30)
timer = pygame.time.Clock()
# create terrain
terrain = Terrain(50, 400, 100)
# this function initializes the audio system
def audio_init():
global shoot_sound, boom_sound
# initialize the audio mixer
pygame.mixer.init()
# load sound files
shoot_sound = pygame.mixer.Sound("shoot.wav")
boom_sound = pygame.mixer.Sound("boom.wav")
# this function uses any available channel to play a sound clip
def play_sound(sound):
channel = pygame.mixer.find_channel(True)
channel.set_volume(0.5)
channel.play(sound)
# these functions draw a cannon at the specified position
def draw_player_cannon(surface, position):
# draw turret
turret_color = (30, 180, 30)
start_x = position.x + 15
start_y = position.y + 15
start_pos = (start_x, start_y)
vel = angular_velocity(wrap_angle(player_cannon_angle - 90))
end_pos = (start_x + vel.x * 30, start_y + vel.y * 30)
pygame.draw.line(surface, turret_color, start_pos, end_pos, 6)
# draw body
body_color = (30, 220, 30)
rect = Rect(position.x, position.y + 15, 30, 15)
pygame.draw.rect(surface, body_color, rect, 0)
pygame.draw.circle(surface, body_color, (position.x + 15, position.y + 15), 15, 0)
def draw_computer_cannon(surface, position):
# draw turret
turret_color = (180, 30, 30)
start_x = position.x + 15
start_y = position.y + 15
start_pos = (start_x, start_y)
vel = angular_velocity(wrap_angle(computer_cannon_angle - 90))
end_pos = (start_x + vel.x * 30, start_y + vel.y * 30)
pygame.draw.line(surface, turret_color, start_pos, end_pos, 6)
# draw body
body_color = (220, 30, 30)
rect = Rect(position.x, position.y + 15, 30, 15)
pygame.draw.rect(surface, body_color, rect, 0)
pygame.draw.circle(surface, body_color, (position.x + 15, position.y + 15), 15, 0)
if __name__ == '__main__':
global screen, back_buffer, font, timer, cross_hair, cross_hair_group, terrain, shoot_sound, boom_sound
# main program begins
game_init()
audio_init()
game_over = False
player_score = 0
enemy_score = 0
last_time = 0
mouse_x = mouse_y = 0
grid_point = 0
player_score = computer_score = 0
player_cannon_position = Point(0, 0)
player_cannon_angle = 45
player_cannon_power = 8.0
computer_cannon_position = Point(0, 0)
computer_cannon_angle = 315
computer_cannon_power = 8.0
player_firing = False
player_shell_position = Point(0, 0)
player_shell_velocity = Point(0, 0)
computer_firing = False
computer_shell_position = Point(0, 0)
computer_shell_velocity = Point(0, 0)
# main loop
while True:
timer.tick(30)
ticks = pygame.time.get_ticks()
# event section
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
elif event.type == MOUSEMOTION:
mouse_x, mouse_y = event.pos
elif event.type == MOUSEBUTTONUP:
terrain.generate()
# get key states
keys = pygame.key.get_pressed()
if keys[K_ESCAPE]:
sys.exit()
elif keys[K_UP] or keys[K_w]:
player_cannon_angle = wrap_angle(player_cannon_angle - 1)
elif keys[K_DOWN] or keys[K_s]:
player_cannon_angle = wrap_angle(player_cannon_angle + 1)
elif keys[K_RIGHT] or keys[K_d]:
if player_cannon_power <= 10.0:
player_cannon_power += 0.1
elif keys[K_LEFT] or keys[K_a]:
if player_cannon_power > 0.0:
player_cannon_power -= 0.1
if keys[K_SPACE]:
if not player_firing:
play_sound(shoot_sound)
player_firing = True
angle = wrap_angle(player_cannon_angle - 90)
player_shell_velocity = angular_velocity(angle)
player_shell_velocity.x *= player_cannon_power
player_shell_velocity.y *= player_cannon_power
player_shell_position = player_cannon_position
player_shell_position.x += 15
player_shell_position.y += 15
# update section
if not game_over:
# keep turret inside a reasonable range
if player_cannon_angle > 180:
if player_cannon_angle < 270: player_cannon_angle = 270
elif player_cannon_angle <= 180:
if player_cannon_angle > 90: player_cannon_angle = 90
# calculate mouse position on terrain
grid_point = int(mouse_x / terrain.grid_size)
# move player shell
if player_firing:
player_shell_position.x += player_shell_velocity.x
player_shell_position.y += player_shell_velocity.y
# has shell hit terrain?
height = 600 - terrain.get_height(player_shell_position.x)
if player_shell_position.y > height:
player_firing = False
if player_shell_velocity.y < 10.0:
player_shell_velocity.y += 0.1
# has shell gone off the screen?
if player_shell_position.x < 0 or player_shell_position.x > 800:
player_firing = False
if player_shell_position.y < 0 or player_shell_position.y > 600:
player_firing = False
# move computer shell
if computer_firing:
computer_shell_position.x += computer_shell_velocity.x
computer_shell_position.y += computer_shell_velocity.y
# has shell hit terrain?
height = 600 - terrain.get_height(computer_shell_position.x)
if computer_shell_position.y > height:
computer_firing = False
if computer_shell_velocity.y < 10.0:
computer_shell_velocity.y += 0.1
# has shell gone off the screen?
if computer_shell_position.x < 0 or computer_shell_position.x > 800:
computer_firing = False
if computer_shell_position.y < 0 or computer_shell_position.y > 600:
computer_firing = False
else:
# is the computer ready to fire?
play_sound(shoot_sound)
computer_firing = True
computer_cannon_power = random.randint(1, 10)
angle = wrap_angle(computer_cannon_angle - 90)
computer_shell_velocity = angular_velocity(angle)
computer_shell_velocity.x *= computer_cannon_power
computer_shell_velocity.y *= computer_cannon_power
computer_shell_position = computer_cannon_position
computer_shell_position.x += 15
computer_shell_position.y += 15
# look for a hit by player's shell
if player_firing:
dist = distance(player_shell_position, computer_cannon_position)
if dist < 30:
play_sound(boom_sound)
player_score += 1
player_firing = False
# look for a hit by computer's shell
if computer_firing:
dist = distance(computer_shell_position, player_cannon_position)
if dist < 30:
play_sound(boom_sound)
computer_score += 1
computer_firing = False
# drawing section
backbuffer.fill((20, 20, 120))
# draw the terrain
terrain.draw(backbuffer)
# draw player's gun
y = 600 - terrain.get_height(70 + 15) - 20
player_cannon_position = Point(70, y)
draw_player_cannon(backbuffer, player_cannon_position)
# draw computer's gun
y = 600 - terrain.get_height(700 + 15) - 20
computer_cannon_position = Point(700, y)
draw_computer_cannon(backbuffer, computer_cannon_position)
# draw player's shell
if player_firing:
x = int(player_shell_position.x)
y = int(player_shell_position.y)
pygame.draw.circle(backbuffer, (20, 230, 20), (x, y), 4, 0)
# draw computer's shell
if computer_firing:
x = int(computer_shell_position.x)
y = int(computer_shell_position.y)
pygame.draw.circle(backbuffer, (230, 20, 20), (x, y), 4, 0)
# draw the back buffer
screen.blit(backbuffer, (0, 0))
if not game_over:
print_text(font, 0, 0, "SCORE " + str(player_score))
print_text(font, 0, 20, "ANGLE " + "{:.1f}".format(player_cannon_angle))
print_text(font, 0, 40, "POWER " + "{:.2f}".format(player_cannon_power))
if player_firing:
print_text(font, 0, 60, "FIRING")
print_text(font, 650, 0, "SCORE " + str(computer_score))
print_text(font, 650, 20, "ANGLE " + "{:.1f}".format(computer_cannon_angle))
print_text(font, 650, 40, "POWER " + "{:.2f}".format(computer_cannon_power))
if computer_firing:
print_text(font, 650, 60, "FIRING")
print_text(font, 0, 580, "CURSOR " + str(Point(mouse_x, mouse_y)) + \
", GRID POINT " + str(grid_point) + ", HEIGHT " + \
str(terrain.get_height(mouse_x)))
else:
print_text(font, 0, 0, "GAME OVER")
pygame.display.update()
| {
"repo_name": "Great-Li-Xin/PythonDev",
"path": "Games/resources/code/chap13/ArtilleryGunnerGame.py",
"copies": "1",
"size": "17013",
"license": "mit",
"hash": -8562866120927651000,
"line_mean": 32.9342915811,
"line_max": 107,
"alpha_frac": 0.5461705754,
"autogenerated": false,
"ratio": 3.639144385026738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9682655194685634,
"avg_score": 0.0005319531482206588,
"num_lines": 487
} |
artist_name = input("What artist would you like to look up?")
# print ("Hello," + artist_name)
import requests
response = requests.get('https://api.spotify.com/v1/search?query='+ artist_name +'&type=artist&limit=50&market=US&offset=50')
response = response.json()
#print(smallresponse)
#
#print(smallresponse['artists']['items'])
artists = response['artists']['items']
counter = 0
if len(artists) == 0:
print("No artists exist with that name.")
artist_name = input("Try again. Please tell me which artist would you like to look up?")
response = requests.get('https://api.spotify.com/v1/search?query='+ artist_name +'&type=artist&limit=50&market=US&offset=50')
response = response.json()
artists = response['artists']['items']
for artist in artists:
counter = counter + 1
print(counter, artist['name'])
# print(artists[0]['name'])
artist_num = input("What number is associated with the artist you are interested in?")
artist_of_interest = artists[int(artist_num) - 1]['name']
#print("Are you interested in",artist_of_interest , "?")
artist_of_interest_id = artists[int(artist_num)-1]['id']
#il_wayne_id = '55Aa2cqylxrFIXC767Z865'
top_tracks_response = requests.get('https://api.spotify.com/v1/artists/'+ artist_of_interest_id+'/top-tracks?country=US')
toptracksdata = top_tracks_response.json()
toptracks = toptracksdata['tracks']
print("The top tracks by", artist_of_interest, "are:")
for track in toptracks:
print(track['name'])
albumsresponse = requests.get('https://api.spotify.com/v1/artists/' + artist_of_interest_id + '/albums')
album_data = albumsresponse.json()
albulms = album_data['items']
if len(albulms) > 1:
most_pop = 0
least_pop = 100
for albulm in albulms:
print("albulm name:", albulm['name'])
popresponse = requests.get('https://api.spotify.com/v1/albums/' + albulm['id'])
pop_data = popresponse.json()
pop = pop_data['popularity']
if pop > most_pop:
most_pop = pop
popular_albulm = albulm['name']
if pop < least_pop:
least_pop = pop
least_albulm = albulm['name']
print(albulm['name'], "has a popularity of", pop)
print("Their most popular albulm is", popular_albulm, "and their least popular albulm is", least_albulm)
#albulm['id']
else: print("This artist only has one albulm.")
# smallresponse = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&limit=50&market=US&offset=50')
# smallresponse = smallresponse.json()
# print(smallresponse)
#testing testing testing#
| {
"repo_name": "M0nica/python-foundations-hw",
"path": "05/SpotifySearch.py",
"copies": "1",
"size": "2589",
"license": "mit",
"hash": -4238733616812453000,
"line_mean": 31.7721518987,
"line_max": 129,
"alpha_frac": 0.6732329085,
"autogenerated": false,
"ratio": 3.1268115942028984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9271126325318386,
"avg_score": 0.005783635476902351,
"num_lines": 79
} |
"""Artist puzzles from <http://code.org> built on `tkinter` only.
Artist is similar to the great `turtle` standard module for teaching
programming but builds on a foundation of puzzle and solution, (which
`turtle` does not):
- Subset of basic Python turtle commands (all needed for puzzles).
- Puzzles created by students with Artist can be checked against
a known solution saved as JSON.
- New puzzles can be created with Artist by simply `artist.save()` and
creating challenge stub programs for students to complete that `load()`
the saved challenge.
- Artist has only `move_*`, `turn_*`, and `jump_*` and always uses
verbs to begin method and function names.
- Artist methods correspond one-to-one with those from <http://code.org>
for easier porting by students.
- Artist supports sprite animation and theming (e.g. zombie, turtle, etc.).
- Artist includes sound and sound theming as well.
- Artist can be made to be very slow or very, very fast unlike `turtle`
- Artist metaphor matches 'canvas' metaphor used in all graphics coding.
- Artist draws lines individually instead of updating a single line with
new coordinates so that the artists drawn `lines` can be checked to
see if the line was drawn forward or backward and give credit for that
specific line segment. This allows set() to isolate the essential lines
when checking solutions without throwing out an otherwise good solution
that was drawn in a different way. This is critical for code.org puzzles
since often there is more than one way to retrace drawn lines to get
to a new position.
"""
import os
import json
import math
import random
from .tkcanvas import Canvas
from .gamegrids import XYGrid,xy,slope,bearing,length
class Artist():
start_direction = 0
startx = 0
starty = 0
color = 'black'
width = 7
speed = 'normal'
resources = os.path.join(os.path.dirname(__file__),'res','artist')
def __init__(self,proto=None):
"""In most cases you want Artist.from_json() instead."""
self.grid = None
self.solution = None
# aggregate
if proto:
self.canvas = proto.canvas
self.puzzle = proto.puzzle
self.log = proto.log
self.uid = proto.uid
self.type = proto.type
self.theme = proto.theme
self.x = proto.x
self.y = proto.y
self.direction = proto.start_direction
self.startx = proto.startx
self.starty = proto.starty
self.lastx = proto.lastx
self.lasty = proto.lasty
self.last_direction = proto.direction
self.sprite = proto.sprite
else:
self.canvas = Canvas()
self.puzzle = []
self.log = []
self.uid = None
self.type = 'artist'
self.theme = 'default'
self.x = self.startx
self.y = self.starty
self.direction = self.start_direction
self.lastx = self.x
self.lasty = self.y
self.last_direction = self.direction
self.sprite = None
self._lines_to_draw = [] # drawing cache
@property
def title(self):
return self.canvas.title
@title.setter
def title(self,new):
self._title = new
if not new:
if self.uid:
self.canvas.title = self.uid
else:
if self.uid:
self.canvas.title = new + ' [' + self.uid + ']'
else:
self.canvas.title = new
@title.deleter
def title(self):
self.canvas.title = self.uid
def config(self,conf):
"""Sets attributes based dictionary (usually after JSON load)."""
for key in conf:
if key in ('startx','starty','start_direction'):
setattr(__class__,key,conf[key])
if key in ('puzzle','uid','title','type','theme'):
setattr(self,key,conf[key])
def pen_color(self,color):
"""Just to be compatible with 'Show Code' JavaScript"""
self.color = color
@classmethod
def from_json(cls,json_):
if type(json_) is str:
json_ = json.loads(json_)
instance = cls()
instance.config(json_)
return instance
def setup(self):
self.title = self._title # for missing uid
self.direction = self.start_direction
self.x = self.startx
self.y = self.starty
self.grid = XYGrid().init(400,400,0)
self.draw_lines(self.puzzle, color='lightgrey', speed='fastest')
self.solution = XYGrid(self.grid)
self.grid = XYGrid().init(400,400,0) # wipe
strip = os.path.join(self.resources,self.theme,
'sprite_strip180_70x50.gif')
self.sprite = self.canvas.create_sprite(strip)
self.sprite.move(self.startx,self.starty,self.start_direction)
def check(self):
if self.grid == self.solution:
return self.good_job()
else:
if self._close_enough():
return self.good_job()
else:
return self.try_again()
def _close_enough(self):
for y in range(400):
for x in range(400):
if self.solution[x][y] and not self.grid.ping(x,y):
return False
if self.grid[x][y] and not self.solution.ping(x,y):
return False
return True
def show_check(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if not self.solution[x][y] and not self.grid[x][y]:
pass
elif self.solution[x][y] == self.grid[x][y]:
canvas.poke(x,-y,'lightgreen')
elif self.solution[x][y]:
canvas.poke(x,-y,'red')
elif self.grid[x][y]:
canvas.poke(x,-y,'orange')
self.wait()
def show_solution(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def show_lines(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def show_wrong(self):
canvas = Canvas()
for y in range(-200,201):
for x in range(-200,201):
if self.grid[x][y] and self.grid[x][y] != self.solution[x][y]:
canvas.poke(x,-y,'black')
self.wait()
def save(self,name=None,fname=None):
name = name if name else self.uid
if os.path.isdir('puzzles'):
fname = os.path.join('puzzles', name + '.json')
assert not os.path.isfile(fname), '{} exists'.format(name)
else:
fname = name + '.json'
with open(fname,'w') as f:
f.write(json.dumps({
"uid": self.uid,
"type": self.type,
"title": self._title,
"startx": self.startx,
"starty": self.starty,
"start_direction": self.start_direction,
"puzzle": self.log
}))
def try_again(self,message='Nope. Try again.'):
# TODO replace with a canvas splash window graphic
print(message)
self.canvas.exit_on_click()
def good_job(self,message='Perfect! Congrats!'):
# TODO replace with a canvas splash window graphic
print(message)
self.canvas.exit_on_click()
def wait_for_click(self):
return self.good_job('Beautiful!')
wait = wait_for_click
def clear(self):
self._lines_to_draw = []
self.log = []
def draw_lines(self,lines,color=None,speed=None):
self.grid.draw_lines(lines,1)
if speed:
self.canvas.speed = speed
else:
self.canvas.speed = self.speed
for line in lines:
if self.sprite:
self.sprite.move(line[0],line[1],bearing(line))
if color:
self.canvas.draw_line(line,color=color)
else:
self.canvas.draw_line(line)
if self.sprite:
self.sprite.move(line[2],line[3],bearing(line))
self.canvas.speed = self.speed
def _draw(self):
self.draw_lines(self._lines_to_draw)
self._lines_to_draw = []
def _move(self,amount):
(self.x,self.y) = xy(self.x,self.y,self.direction,amount)
def move(self,amount=100):
self.lastx = self.x
self.lasty = self.y
self._move(amount)
if self.color == 'random':
color = self.random_color()
else:
color = self.color
line = (self.lastx,self.lasty,self.x,self.y,color,self.width)
self._lines_to_draw.append(line)
self.log.append(line)
self._draw()
move_forward = move
forward = move
fd = move
def move_backward(self,amount=100):
self.move(-amount)
backward = move_backward
back = move_backward
bk = move_backward
def jump(self,amount=100):
self._move(amount)
jump_forward = jump
def jump_backward(self,amount=100):
self.jump(-amount)
def turn(self,amount=90):
self.last_direction = self.direction
self.direction += amount
self.direction %= 360
self.canvas.delay()
if self.sprite:
self.sprite.move(self.x,self.y,self.direction)
def turn_right(self,amount=90):
self.turn(amount)
right = turn_right
rt = turn_right
def turn_left(self,amount=90):
self.turn(-amount)
left = turn_left
lt = turn_left
def flip(self):
if self.direction > 0:
self.direction -= 180
else:
self.direction += 180
rev = flip
reverse = flip
@staticmethod
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
return '#{:02x}{:02x}{:02x}'.format(r,g,b)
random_colour = random_color
colour_random = random_color
color_random = random_color
| {
"repo_name": "skilstak/code-dot-org-python",
"path": "codestudio/artist.py",
"copies": "1",
"size": "10452",
"license": "unlicense",
"hash": -303974540282219300,
"line_mean": 29.9230769231,
"line_max": 78,
"alpha_frac": 0.5590317643,
"autogenerated": false,
"ratio": 3.835596330275229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9845176185583151,
"avg_score": 0.009890381798415568,
"num_lines": 338
} |
"""artlaasya admin"""
from django.contrib import admin
from .models import Artist, Genre, Artwork, Event
class ArtistAdmin(admin.ModelAdmin):
ordering = ('last_name', 'first_name',)
search_fields = ('last_name', 'first_name',)
list_display = ('last_name', 'first_name', 'is_active',)
list_filter = ('is_active',)
fieldsets = (
('Personal', {
'fields': (('is_active'), ('first_name', 'last_name'),)
}),
('Biographical',
{'fields': ('biography', 'description',)
}),
(None, {
'fields': (('slug', 'created', 'updated'),)
}),
)
readonly_fields = ('slug', 'created', 'updated',)
# /ArtistAdmin
admin.site.register(Artist, ArtistAdmin)
class GenreAdmin(admin.ModelAdmin):
ordering = ('name',)
search_fields = ('name',)
list_display = ('name', 'is_active',)
list_filter = ('is_active',)
fieldsets = (
('Identity', {
'fields': (('is_active', 'name', 'location'), 'description',)
}),
(None, {
'fields': ('slug',)
}),
)
readonly_fields = ('slug',)
# /GenreAdmin
admin.site.register(Genre, GenreAdmin)
class ArtworkAdmin(admin.ModelAdmin):
ordering = ('name',)
search_fields = ('title', 'artist__last_name', 'artist__first_name',
'inventory_name', 'internal_name',)
list_display = ('title', 'artist', 'is_representative', 'is_active',)
list_filter = ('artist', 'is_representative', 'is_active',)
fieldsets = (
('Identity', {
'fields': (('is_active'), ('title', 'name',),
('inventory_name', 'internal_name'),
('artist', 'year', 'is_representative'),)
}),
('Physical Attributes', {
'fields': (('image_height', 'image_width', 'measurement_units'),
('height_metric', 'width_metric', 'metric_units'),
('height_imperial', 'width_imperial', 'imperial_units'),
('genre', 'medium_description', 'style_class'),
'description',)
}),
('Acquisition', {
'fields': (('price', 'status'), ('is_price_displayed'),
'alternative_pricing_message',)
}),
('Image Upload', {
'fields': (('uploaded_image', 'create_deepzoom'),)
}),
(None, {
'fields': (('slug', 'created', 'updated'),)
}),
)
readonly_fields = ('name', 'height_metric', 'width_metric',
'metric_units', 'height_imperial', 'width_imperial',
'imperial_units', 'slug', 'created', 'updated',)
# /ArtworkAdmin
admin.site.register(Artwork, ArtworkAdmin)
class EventAdmin(admin.ModelAdmin):
ordering = ('-created',)
search_fields = ('title', 'start_date',)
list_display = ('title', 'start_date', 'is_admission', 'is_active',)
list_filter = ('is_admission', 'is_active',)
fieldsets = (
('Identity', {
'fields': (('is_active'), ('title', 'image'),)
}),
('Admissions', {
'fields': (('total_seats', 'is_admission', 'admission_price'),)
}),
('Details', {
'fields': ('type', ('start_date', 'end_date'), 'location',
'details',)
}),
(None, {
'fields': (('slug', 'created', 'updated'),)
}),
)
readonly_fields = ('slug', 'created', 'updated',)
# /EventAdmin
admin.site.register(Event, EventAdmin)
#EOF - artlaasya admin | {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/admin.py",
"copies": "1",
"size": "3713",
"license": "bsd-3-clause",
"hash": -1538519036197071400,
"line_mean": 30.5877192982,
"line_max": 80,
"alpha_frac": 0.4874764342,
"autogenerated": false,
"ratio": 3.8717413972888424,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48592178314888423,
"avg_score": null,
"num_lines": null
} |
"""artlaasya managers"""
from django.db import models
from django.utils import timezone
from datetime import timedelta
class ArtistQuerySet(models.QuerySet):
DAYS = 30
DATE = timezone.now() - timedelta(days=DAYS)
def related_artwork(self):
return self.select_related('artwork_artist')
def active(self):
return self.filter(is_active=True)
def recent(self):
return self.filter(created__gte=self.DATE)
def orderly(self):
return self.order_by('last_name', 'first_name')
def distinctly(self):
return self.distinct('last_name', 'first_name')
# /ArtistQuerySet
class ArtistManager(models.Manager):
def get_queryset(self):
return ArtistQuerySet(self.model, using=self._db)
def related_artwork(self):
return self.get_query_set().related_artwork()
def active(self):
return self.get_queryset().active()
def recent(self):
return self.get_queryset().recent()
def orderly(self):
return self.get_queryset().orderly()
def distinctly(self):
return self.get_queryset().distinctly()
# /ArtistManager
class GenreQuerySet(models.QuerySet):
def contemporary(self):
return self.filter(name="Contemporary")
def traditional(self):
return self.exclude(name="Contemporary")
# /GenreQuerySet
class GenreManager(models.Manager):
def get_queryset(self):
return GenreQuerySet(self.model, using=self._db)
def contemporary(self):
return self.get_queryset().contemporary()
def traditional(self):
return self.get_queryset().traditional()
# /GenreManager
class ArtworkQuerySet(models.QuerySet):
DAYS = 30
DATE = timezone.now() - timedelta(days=DAYS)
def active(self):
return self.filter(is_active=True)
def recent(self):
return self.filter(created__gte=self.DATE)
def representative(self):
return self.filter(is_representative=True)
def orderly(self):
return self.order_by('artist__last_name', 'artist__first_name')
def distinctly(self):
return self.distinct('artist__last_name', 'artist__first_name')
def contemporary(self):
return self.filter(genre__name="Contemporary")
def traditional(self):
return self.exclude(genre__name="Contemporary")
# /ArtworkQuerySet
class ArtworkManager(models.Manager):
def get_queryset(self):
return ArtworkQuerySet(self.model, using=self._db)
def active(self):
return self.get_queryset().active()
def recent(self):
return self.get_queryset().recent()
def representative(self):
return self.get_queryset().representative()
def orderly(self):
return self.get_queryset().orderly()
def distinctly(self):
return self.get_queryset().distinctly()
def contemporary(self):
return self.get_queryset().contemporary()
def traditional(self):
return self.get_queryset().traditional()
# /ArtworkManager
class EventQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
# /EventQuerySet
class EventManager(models.Manager):
def get_queryset(self):
return EventQuerySet(self.model, using=self._db)
def active(self):
return self.get_queryset().active()
# /EventManager
#EOF - artlaasya managers | {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/managers.py",
"copies": "1",
"size": "3674",
"license": "bsd-3-clause",
"hash": 8833735151300755000,
"line_mean": 22.5066666667,
"line_max": 71,
"alpha_frac": 0.6121393576,
"autogenerated": false,
"ratio": 4.100446428571429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5212585786171429,
"avg_score": null,
"num_lines": null
} |
"""artlaasya menu_tags"""
from django import template
from artlaasya.models import Artist, Artwork, Genre
register = template.Library()
@register.assignment_tag
def get_genres_menu():
'''
Retrieves 'name', and 'slug' of Traditional genres.
'''
return Genre.genres.traditional(
).values_list('name',
'slug')
#end get_genres_menu
@register.assignment_tag(takes_context=True)
def get_sidebar_NEW_menu(context):
'''
Retrieves 'first_name', 'last_name', and 'slug' of Artists
categorized as NEW.
'''
return Artist.artists.recent(
).active(
).values('slug',
'first_name',
'last_name')
#end get_sidebar_NEW_menu
@register.assignment_tag
def get_sidebar_CONT_menu():
'''
Retrieves 'first_name', 'last_name', and 'slug' of Artists
categorized as CONTEMPORARY.
'''
return Artwork.artworks.contemporary(
).orderly(
).distinct(
).active(
).values('artist__slug',
'artist__first_name',
'artist__last_name')
#end get_sidebar_CONT_menu
@register.assignment_tag
def get_sidebar_TRAD_menu():
'''
Retrieves 'first_name', 'last_name', and 'slug' of Artists
categorized as TRADITIONAL.
'''
_genres_TRAD = Genre.genres.traditional(
).values_list('name',
'slug')
_artists_TRAD = []
for _genre_TRAD in _genres_TRAD:
_artists_TRAD.append(Artwork.artworks.filter(genre__name=_genre_TRAD[0]
).orderly(
).distinctly(
).active(
).values('artist__slug',
'artist__first_name',
'artist__last_name'))
return list(zip(_genres_TRAD, _artists_TRAD))
#end get_sidebar_TRAD_menu
@register.assignment_tag
def get_sidebar_ALL_menu():
'''
Retrieves 'first_name', 'last_name', and 'slug' of Artists categorized as ALL.
'''
return Artist.artists.active(
).values('slug',
'first_name',
'last_name')
#end get_sidebar_ALL_menu
#EOF - menu_tags | {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/templatetags/menu_tags.py",
"copies": "1",
"size": "2708",
"license": "bsd-3-clause",
"hash": 7675933790760690000,
"line_mean": 29.511627907,
"line_max": 82,
"alpha_frac": 0.4538404727,
"autogenerated": false,
"ratio": 4.179012345679013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132852818379012,
"avg_score": null,
"num_lines": null
} |
"""artlaasya models"""
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
import os
import string
from datetime import date
import deepzoom.models as deepzoom_models
import artlaasya.managers as artlaasya_managers
from artlaasya.mixins import ModelDiffMixin
class ArtistRatchet(models.Model):
"""
Provides a numerical suffix for appending to an `Artist` slug so that
uniqueness is ensured in the event more than one artist shares the same
name. The same could be accomplished using the primary key, but this is
more uniform and creates more aesthetically pleasing URLs.
The suffix only ratchets, so CRUD operations are guaranteed to produce
slugs that cannot be mistakenly re-applied to different artists.
"""
class Meta:
app_label = settings.APP_LABEL
ordering = ['name']
ratchets = models.Manager()
name = models.CharField(max_length=61,
unique=True,
default="artistratchet_name",
editable=False)
suffix = models.PositiveIntegerField(default=0,
editable=False)
# /ArtistRatchet
class Artist(ModelDiffMixin, models.Model):
"""
Represents an artist.
Composed with `ModelDiffMixin` mixin which tracks field changes.
Uses `ArtistManager` manager class which provides friendlier manager name
and a set of common querysets for artists.
Provides optional uploadable `biography` PDF file.
"""
class Meta:
app_label = settings.APP_LABEL
get_latest_by = 'created'
ordering = ['last_name', 'first_name']
artists = artlaasya_managers.ArtistManager()
def get_sluggified_filename(instance, filename):
_extension = os.path.splitext(filename)[1]
return instance.slug + _extension
def get_biography_filepath(instance, filename):
_filename = instance.get_sluggified_filename(filename)
return os.path.join(settings.DEFAULT_ARTIST_BIOGRAPHY_ROOT, _filename)
first_name = models.CharField(max_length=30,
help_text="Max 30 characters.")
last_name = models.CharField(max_length=30,
help_text="Max 30 characters.")
slug = models.SlugField(max_length=65,
unique=True,
editable=False,
help_text="(system-constructed)")
is_active = models.BooleanField(default=True)
description = models.TextField(blank=True,
help_text="Unlimited characters.")
biography = models.FileField(upload_to=get_biography_filepath,
blank=True,
help_text="PDF-formatted file.")
created = models.DateTimeField(auto_now_add=True,
editable=False)
updated = models.DateTimeField(auto_now=True,
editable=False)
def get_absolute_url(self):
return reverse('v_artist',
kwargs={'artist_name': self.slug})
def __unicode__(self):
return six.u('%s %s') % (self.first_name, self.last_name)
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
# /Artist
class Genre(ModelDiffMixin, models.Model):
"""
Represents an artwork genre.
Composed with `ModelDiffMixin` mixin which tracks field changes.
"""
class Meta:
app_label = settings.APP_LABEL
get_latest_by = 'created'
ordering = ['name']
genres = artlaasya_managers.GenreManager()
name = models.CharField(max_length=60,
unique=True,
help_text="Max 60 characters.")
slug = models.SlugField(max_length=65,
editable=False)
is_active = models.BooleanField(default=True)
location = models.CharField(max_length=100,
blank=True,
help_text="Max 100 characters.")
description = models.TextField(blank=True,
help_text="Unlimited characters.")
def __unicode__(self):
return six.u('%s') % (self.name)
def __str__(self):
return '%s' % (self.name)
# /Genre
class ArtworkRatchet(models.Model):
"""
Provides a numerical suffix for appending to an Artwork slug so that
uniqueness is ensured in the event more than one artwork shares the same
title. The same could be accomplished using the primary key, but this is
more uniform and creates more aesthetically pleasing URLs.
The suffix only ratchets, so CRUD operations are guaranteed to produce
slugs that cannot be mistakenly re-applied to different artworks.
"""
class Meta:
app_label = settings.APP_LABEL
ordering = ['title']
ratchets = models.Manager()
title = models.CharField(max_length=100,
unique=True,
default="artworkratchet_title",
editable=False)
suffix = models.PositiveIntegerField(default=0,
editable=False)
# /ArtworkRatchet
class Artwork(deepzoom_models.UploadedImage):
"""
Represents an artwork.
Composed with `ModelDiffMixin` mixin which tracks field changes.
Uses `ArtworkManager` manager class which provides friendlier manager name
and a set of common querysets for artworks.
Links to an artist.
Provides the means to designate an artwork representative for the artist.
Provides the means to display a replacement pricing message in place of
a price, if needed.
"""
class Meta:
app_label = settings.APP_LABEL
get_latest_by = 'created'
ordering = ['artist__last_name', 'artist__first_name', 'name', 'title']
artworks = artlaasya_managers.ArtworkManager()
STYLE_CHOICES = (
('TRAD', 'Traditional'),
('ABST', 'Abstract'),
('FIGU', 'Figurative'),
('SEMA', 'Semi-Abstract'),
('SEMF', 'Semi-Figurative'),
)
UNIT_CHOICES = (
('C', 'cm'),
('I', 'in'),
)
STATUS_CHOICES = (
('AVAL', 'Available'),
('SOLD', 'Sold'),
)
title = models.CharField(max_length=100,
help_text="Max 100 characters.")
is_active = models.BooleanField(default=True)
inventory_name = models.CharField(max_length=100,
unique=True,
help_text="Max 100 characters.")
internal_name = models.CharField(max_length=100,
unique=True,
help_text="Max 100 characters.")
artist = models.ForeignKey(Artist,
related_name='artworks_authored')
year = models.CharField(max_length=4,
blank=True,
help_text="Max 4 characters.")
is_representative = models.BooleanField(default=False)
genre = models.ForeignKey(Genre,
related_name='artworks_included')
style_class = models.CharField(max_length=4,
choices=STYLE_CHOICES,
default='TRAD')
medium_description = models.CharField(max_length=100,
help_text="Max 100 characters.")
description = models.TextField(blank=True,
help_text="Unlimited characters.")
image_height = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
help_text="Two digits after decimal \
point.")
image_width = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
help_text="Two digits after decimal \
point.")
measurement_units = models.CharField(max_length=2,
choices=UNIT_CHOICES,
default='I',
blank=True)
height_metric = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
editable=False,
help_text="(system-calculated)")
width_metric = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
editable=False,
help_text="(system-calculated)")
metric_units = models.CharField(max_length=2,
choices=UNIT_CHOICES,
default='C',
blank=True,
editable=False,
help_text="(system-assigned)")
height_imperial = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
editable=False,
help_text="(system-calculated)")
width_imperial = models.DecimalField(max_digits=10,
decimal_places=2,
blank=True,
null=True,
editable=False,
help_text="(system-calculated)")
imperial_units = models.CharField(max_length=2,
choices=UNIT_CHOICES,
default='I',
blank=True,
editable=False,
help_text="(system-assigned)")
price = models.PositiveIntegerField(help_text="Displayed only if price is \
toggled to be displayed.")
is_price_displayed = models.BooleanField(default=True)
alternative_pricing_message = models.CharField(max_length=100,
default="Please inquire",
blank=True,
help_text="Max 100 characters.\
Displayed only if price \
is toggled NOT to be \
displayed.")
status = models.CharField(max_length=4,
choices=STATUS_CHOICES,
default='AVAL')
def get_absolute_url(self):
return reverse('v_artwork',
kwargs={'artist_name': self.artist.slug,
'artwork_title': self.slug})
def __unicode__(self):
return six.u('%s') % (self.title)
def __str__(self):
return '%s' % (self.title)
# /Artwork
class EventRatchet(models.Model):
"""
Provides a numerical suffix for appending to an Event slug so that
uniqueness is ensured in the event more than one event shares the same
title. The same could be accomplished using the primary key, but this is
more uniform and creates more aesthetically pleasing URLs.
The suffix only ratchets, so CRUD operations are guaranteed to produce
slugs that cannot be mistakenly re-applied to different events.
"""
class Meta:
app_label = settings.APP_LABEL
ordering = ['title']
ratchets = models.Manager()
title = models.CharField(max_length=128,
unique=True,
default="eventratchet_title",
editable=False)
suffix = models.PositiveIntegerField(default=0,
editable=False)
# /EventRatchet
class Event(ModelDiffMixin, models.Model):
"""
Represents a gallery event.
Composed with `ModelDiffMixin` mixin which tracks field changes.
Uses `EventManager` manager class which provides friendlier manager name
and a set of common querysets for events.
Links to an artist.
Provides an display image.
Provides start/end dates and times.
"""
class Meta:
app_label = settings.APP_LABEL
get_latest_by = 'created'
ordering = ['-start_date']
events = artlaasya_managers.EventManager()
def get_sluggified_filename(instance, filename):
_extension = os.path.splitext(filename)[1]
return instance.slug + _extension
def get_event_image_filepath(instance, filename):
_filename = instance.get_sluggified_filename(filename)
return os.path.join(settings._DEFAULT_EVENT_IMAGE_ROOT, _filename)
title = models.CharField(max_length=128,
unique=True,
help_text="Max 128 characters.")
is_active = models.BooleanField(verbose_name="is event still active",
default=True)
slug = models.SlugField(max_length=132,
editable=False,
help_text="(system-constructed)")
type = models.CharField(max_length = 128,
help_text="Max 128 characters.")
artist = models.ManyToManyField(Artist,
related_name='events_presented',
blank=True,
null=True)
image = models.ImageField(upload_to=get_event_image_filepath,
help_text="Image will be automatically resized.")
total_seats = models.PositiveIntegerField(blank=True,
null=True,
help_text="Total seats allotted.")
is_admission = models.BooleanField(default=False)
admission_price = models.PositiveIntegerField(blank=True,
null=True,
help_text="Displayed only if \
admission is charged.")
start_date = models.DateField(default=date.today(),
help_text="Set to date of event.")
end_date = models.DateField(default=date.today(),
help_text="Set to start date if one-day event.")
time = models.CharField(max_length=20,
default='6:00 pm - 9:00 pm',
help_text="Max 20 characters.")
location = models.CharField(max_length=128,
help_text="Max 128 characters.")
details = models.TextField(max_length=1024,
help_text="Unlimited characters.")
created = models.DateTimeField(auto_now_add=True,
editable=False)
updated = models.DateTimeField(auto_now=True,
editable=False)
def get_absolute_url(self):
return reverse('v_events', kwargs={
'event_title': self.slug})
def __unicode__(self):
return six.u('%s') % (self.title)
def __str__(self):
return '%s' % (self.title)
# /Event
#EOF - artlaasya models
| {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/models.py",
"copies": "1",
"size": "17288",
"license": "bsd-3-clause",
"hash": -3962760480845819000,
"line_mean": 33.7929606625,
"line_max": 82,
"alpha_frac": 0.4822420176,
"autogenerated": false,
"ratio": 5.049065420560748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04500097558562631,
"num_lines": 483
} |
'''artlaasya signals'''
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save, pre_delete
try:
from django.utils.text import slugify
except ImportError:
try:
from django.template.defaultfilters import slugify
except ImportError:
print("Unable to import `slugify`.")
except:
print("Unable to import `slugify`.")
from decimal import Decimal
from artlaasya.utils import is_django_version_greater_than, delete_uploaded_file
from artlaasya.models import (Artist,
ArtistRatchet,
Genre,
Artwork,
ArtworkRatchet,
Event,
EventRatchet)
DJANGO_SAVE_UPDATEABLE = is_django_version_greater_than(1, 4)
@receiver(pre_save, sender=Artist)
def slugify__artist(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `name`.
Artist [`first_name` + `last_name` + suffix] --> `slug`.
"""
name_fields_changed = ('first_name' in instance.changed_fields or
'last_name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
_ratchet, _created = ArtistRatchet.ratchets.get_or_create(name=_name)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_name, _suffix]))
@receiver(post_save, sender=Artist)
def deactivate_artworks_of_inactive_artist(sender, instance, created, **kwargs):
"""
Ensures that all artworks of an artist are deactivated when artist is
deactivated.
"""
is_active_field_changed = ('is_active' in instance.changed_fields)
if (is_active_field_changed and not instance.is_active):
for _artwork in instance.artworks_authored.all():
if _artwork.is_active:
_artwork.is_active = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_active'])
else:
_artwork.save()
@receiver(pre_save, sender=Artist, dispatch_uid="d__a_b")
def delete__artist_biography(sender, instance, **kwargs):
"""
If file already exists, but new file uploaded, delete existing file.
"""
biography_field_changed = ('biography' in instance.changed_fields)
if biography_field_changed:
previous_file = instance.get_field_diff('biography')[0]
if previous_file:
delete_uploaded_file(previous_file.path)
@receiver(pre_delete, sender=Artist, dispatch_uid="d__a")
def delete__artist(sender, instance, **kwargs):
"""
Deletes `biography` uploaded file when Artist is deleted.
"""
if instance.biography:
delete_uploaded_file(instance.biography.path)
@receiver(pre_save, sender=Genre)
def slugify__genre(sender, instance, slugify=slugify, **kwargs):
"""
Manages the slugifying of `name`.
Genre [`name`] --> `slug`.
"""
name_fields_changed = ('name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
instance.slug = slugify(_name)
@receiver(pre_save, sender=Artwork)
def name_slugify__artwork(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Artwork [`title` + suffix] --> `name' --> `slug`.
UploadedImage provides `name` and `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.name):
_title=instance.title.lower()
_ratchet, _created = ArtworkRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.name = '-'.join([instance.title, _suffix])
instance.slug = slugify(instance.name)
@receiver(pre_save, sender=Artwork)
def calculate_artwork_dimensions(sender, instance, **kwargs):
"""
Calculates artwork measurements in other measurement system.
"""
dimension_fields_changed = ('image_height' in instance.changed_fields or
'image_width' in instance.changed_fields or
'measurement_units' in instance.changed_fields)
if (dimension_fields_changed or
not instance.image_height and not instance.image_width):
if instance.measurement_units == 'I':
instance.height_imperial = instance.image_height
instance.width_imperial = instance.image_width
instance.imperial_units = 'I'
instance.height_metric = round((Decimal(2.54) * instance.image_height), 2)
instance.width_metric = round((Decimal(2.54) * instance.image_width), 2)
instance.metric_units = 'C'
elif instance.measurement_units == 'C':
instance.height_metric = instance.image_height
instance.width_metric = instance.image_width
instance.metric_units = 'C'
instance.height_imperial = round((Decimal(0.394) * instance.image_height), 2)
instance.width_imperial = round((Decimal(0.394) * instance.image_width), 2)
instance.imperial_units = 'I'
@receiver(post_save, sender=Artwork)
def ensure_artwork_uniquely_representative(sender, instance, created, **kwargs):
"""
Ensures that only one artwork is representative for any one artist.
"""
if instance.is_representative:
_artworks = Artwork.artworks.filter(artist__slug=instance.artist.slug
).exclude(slug=instance.slug)
for _artwork in _artworks:
if _artwork.is_representative:
_artwork.is_representative = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_representative'])
else:
_artwork.save()
@receiver(pre_save, sender=Event)
def slugify__event(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Event [`title` + suffix] --> `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.title):
_title=instance.title.lower()
_ratchet, _created = EventRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_title, _suffix]))
@receiver(pre_save, sender=Event, dispatch_uid="d__e_i")
def delete__event_image(sender, instance, **kwargs):
"""
If image already exists, but new image uploaded, deletes existing image file.
"""
image_field_changed = ('image' in instance.changed_fields)
if image_field_changed:
previous_image = instance.get_field_diff('image')[0]
if previous_image:
delete_uploaded_file(previous_image.path)
@receiver(pre_delete, sender=Event, dispatch_uid="d__e")
def delete__event(sender, instance, **kwargs):
"""
Deletes `image` uploaded file when Event is deleted.
"""
delete_uploaded_file(instance.image.path)
#EOF - artlaasya signals
| {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/signals.py",
"copies": "1",
"size": "7852",
"license": "bsd-3-clause",
"hash": 7641381968289807000,
"line_mean": 35.9323671498,
"line_max": 89,
"alpha_frac": 0.6093988793,
"autogenerated": false,
"ratio": 3.8414872798434443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4950886159143444,
"avg_score": null,
"num_lines": null
} |
"""artlaasya views"""
from django.shortcuts import get_object_or_404, get_list_or_404, render_to_response
from django.core.context_processors import csrf
from django.template import RequestContext
from django.views.generic import TemplateView
from django.contrib.sitemaps import Sitemap
from django.conf import settings
from django.db.models import Q
import re
import os.path
import operator
from random import shuffle
from artlaasya.models import Artist, Genre, Artwork, Event
class BaseSitemap(Sitemap):
"""
Base class for other Sitemap classes.
"""
changefreq = "weekly"
def location(self, obj):
return obj.get_absolute_url()
def lastmod(self, obj):
return obj.updated or obj.created
# /BaseSitemap
class StaticSitemap(Sitemap):
"""
Sitemap class for static pages.
"""
priority = 0.5
changefreq = "never"
lastmod = None
def items(self):
return ["/contact",
"/termsofuse",
"/privacy",
"/termsofsale", ]
def location(self, obj):
return obj
# /StaticSitemap
class ArtistSitemap(BaseSitemap):
"""
Sitemap for Artists.
"""
priority = 0.75
def items(self):
return Artist.artists.active()
# /ArtistSitemap
class ArtworkSitemap(BaseSitemap):
"""
Sitemap for Artworks.
"""
priority = 1.0
def items(self):
return Artwork.artworks.active()
# /ArtworkSitemap
class EventSitemap(BaseSitemap):
"""
Sitemap for Events.
"""
def items(self):
return Event.events.active()
# /EventSitemap
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
return [normspace(' ', (t[0] or t[1]).strip()) for t in findterms(query_string)]
# /normalize_query
def get_query(query_string, search_fields):
"""
Compound query compiler.
"""
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
return query
# /get_query
def merge_lists(list1, list2):
"""
Alternative list merge function to `zip_longest()`. It does not extend
shorter list with values unlike `zip_longest()` which extends with `None`.
"""
num = min(len(list1), len(list2))
result = [None]*(num*2)
result[::2] = list1[:num]
result[1::2] = list2[:num]
result.extend(list1[num:])
result.extend(list2[num:])
return result
# /merge_lists
#===============================================================================
def home(request):
"""
Returns all artworks that are representative for each artist and that are
active.
Only one artwork can be representative per artist.
"""
_artworks = get_list_or_404(Artwork.artworks.representative().active())
return render_to_response('t_home.html',
{'artworks': _artworks},
context_instance=RequestContext(request))
# /home
def artist(request, artist_name=None):
"""
Returns an artist and all active artworks for that artist.
"""
_artist = get_object_or_404(Artist.artists.active(), slug=artist_name)
_artworks = get_list_or_404(Artwork.artworks.active(), artist=_artist)
return render_to_response('t_artist.html',
{'artist': _artist,
'artworks': _artworks},
context_instance=RequestContext(request))
# /artist
def artists(request, artist_genre=None):
"""
Returns either only new, only contemporary, only traditional, or all
artworks that are representative for each artist and that are active.
New artists are those added since the interval of time defined in the
`recent` queryset of the `ArtistManager` manager.
"""
if (artist_genre == 'new'):
_artworks = get_list_or_404(Artwork.artworks.recent(
).representative(
).active(
).orderly(),
artist__is_active=True)
elif (artist_genre == 'all'):
_artworks = get_list_or_404(Artwork.artworks.representative(
).active(
).orderly(),
artist__is_active=True)
elif (artist_genre == 'contemporary'):
_artworks = get_list_or_404(Artwork.artworks.contemporary(
).representative(
).active(
).orderly(),
artist__is_active=True)
elif (artist_genre == 'traditional'):
_artworks = get_list_or_404(Artwork.artworks.traditional(
).representative(
).active(
).orderly(),
artist__is_active=True)
return render_to_response('t_artists.html',
{'artworks': _artworks},
context_instance=RequestContext(request))
# /artists
def artwork(request, artist_name=None, artwork_title=None):
"""
Returns the specified artwork for the specified artist and all additional
artworks by that artist that are active.
"""
_selected_artwork = get_object_or_404(Artwork.artworks.active(),
slug=artwork_title)
_other_artworks = Artwork.artworks.filter(artist__slug=artist_name).exclude(
slug=artwork_title).active()
return render_to_response('t_artwork.html',
{'other_artworks': _other_artworks,
'selected_artwork': _selected_artwork},
context_instance=RequestContext(request))
# /artwork
def artworks(request, artwork_genre=None):
"""
Returns either only new, only contemporary, only traditional, or all
artworks for each artist that are active.
New artists are those added since the interval of time defined in the
`recent` queryset of the `ArtworkManager` manager.
Requirements dictated that artists within a genre must be ordered randomly
to ensure equal promotion placement for each artist over page views.
Requirements dictated that when all artwork genres are listed, the genres
must be interleaved into a listing order that alternates between
"contemporary" and "traditional" artworks.
Since requirements dictated that art genre could only be linked to artwork,
and not artist, two queries are needed instead of one for artists with a
`select_related()`. The first to locate all artists of a particular genre,
and the second to retrieve all artworks for each artist.
Artists are retrieved per genre, then shuffled, then merged if all or new,
and finally artworks are retrieved for the artists.
It is slower than the more straightforward solution of linking genre to
artist too, but is compliant with requirements...
"""
if (artwork_genre == 'new'):
#Find any NEW contemporary artists.
_contemporary_new = list(Artwork.artworks.contemporary(
).recent(
).active(
).orderly(
).distinctly(
).values_list('artist__slug',
flat=True))
#Find any NEW traditional artists.
_traditional_new = list(Artwork.artworks.traditional(
).recent(
).active(
).orderly(
).distinctly(
).values_list('artist__slug',
flat=True))
#Shuffle the lists.
shuffle(_contemporary_new)
shuffle(_traditional_new)
#Merge the two lists together in alternating order.
_alternating_new = merge_lists(_contemporary_new, _traditional_new)
#Gather all artworks for each artist.
_artworks = []
for _artist in _alternating_new:
_artworks.extend(Artwork.artworks.filter(artist__slug=_artist))
else:
#Find ALL contemporary artists.
_contemporary_all = list(Artwork.artworks.contemporary(
).active(
).orderly(
).distinctly(
).values_list('artist__slug',
flat=True))
#Find ALL traditional artists.
_traditional_all = list(Artwork.artworks.traditional(
).active(
).orderly(
).distinctly(
).values_list('artist__slug',
flat=True))
#Shuffle the lists.
shuffle(_contemporary_all)
shuffle(_traditional_all)
if (artwork_genre == 'all'):
#If ALL, merge the two shuffled lists together in alternating order.
_alternating_all = merge_lists(_contemporary_all, _traditional_all)
#Gather all artworks for each artist.
_artworks = []
for _artist in _alternating_all:
_artworks.extend(Artwork.artworks.filter(artist__slug=_artist))
elif (artwork_genre == 'contemporary'):
#If ONLY contemporary, gather only the contemporary artworks.
_artworks = []
for _artist in _contemporary_all:
_artworks.extend(Artwork.artworks.filter(artist__slug=_artist))
elif (artwork_genre == 'traditional'):
#If ONLY traditional, gather only the traditional artworks.
_artworks = []
for _artist in _traditional_all:
_artworks.extend(Artwork.artworks.filter(artist__slug=_artist))
return render_to_response('t_artworks.html',
{'artworks': _artworks},
context_instance=RequestContext(request))
# /artworks
def learn(request, artwork_genre=None):
"""
Returns an art genre.
"""
_genre = get_object_or_404(Genre, slug=artwork_genre)
return render_to_response('t_learn.html',
{'genre': _genre},
context_instance=RequestContext(request))
# /learn
def event(request, event_title=None):
"""
Returns an active event.
"""
_event = get_object_or_404(Event.events.active(), slug=event_title)
return render_to_response('t_event.html',
{'event': _event},
context_instance=RequestContext(request))
#end event
def events(request):
"""
Returns all active events.
"""
_events = get_list_or_404(Event.events.active())
return render_to_response('t_events.html',
{'events': _events},
context_instance=RequestContext(request))
#end events
class search(TemplateView):
"""
Returns a simple search template.
"""
template_name = "search.html"
# /search
def searching(request):
"""
Simple search function.
"""
query_string = ''
artworks_found = None
artists_found = None
if ('q' in request.GET) and request.GET['q'].strip():
query_string = request.GET['q']
artwork_query = get_query(query_string, ['title',
'genre__name',
'medium_description',
'description',])
artworks_found = Artwork.artworks.filter(artwork_query).active().orderly()
artist_query = get_query(query_string, ['first_name',
'last_name',])
artists_found = Artist.artists.filter(artist_query).active().orderly()
return render_to_response('t_search_results.html',
{'query_string': query_string,
'artworks_found': artworks_found,
'artists_found': artists_found},
context_instance=RequestContext(request))
# /searching
#EOF - artlaasya views
| {
"repo_name": "davidjcox/artlaasya",
"path": "artlaasya/views.py",
"copies": "1",
"size": "13818",
"license": "bsd-3-clause",
"hash": 154104018505726720,
"line_mean": 34.7979274611,
"line_max": 85,
"alpha_frac": 0.5141843972,
"autogenerated": false,
"ratio": 4.646267652992602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5660452050192603,
"avg_score": null,
"num_lines": null
} |
'Artmaking, a tiny demonstration game for Curveship.'
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
from item_model import Actor, Room, Thing
from action_model import Modify, Sense
import can
import when
discourse = {
'metadata': {
'title': 'Artmaking',
'headline': 'A very simple example',
'people': [('by', 'Nick Montfort')],
'prologue': 'Settle for nothing less than an artistic breakthrough.'},
'spin':
{'commanded': '@artist', 'focalizer': '@artist', 'narratee': '@artist'}}
initial_actions = [Sense('ogle', '@artist', direct='@studio', modality='sight')]
class Art(Thing):
'@sculpture is the only instance.'
def react(self, world, basis):
'Win the game when smashed.'
actions = []
if (basis.verb in ['kick', 'strike'] and basis.direct == str(self)):
damage = Modify('puncture', basis.agent, direct=str(self),
feature='intact', new=False)
damage.after = """finally, a worthy contribution to the art world
... victory!"""
damage.final = True
actions = [damage]
return actions
items = [
Actor('@artist in @studio',
article='the',
called='artist',
gender='female',
allowed=can.possess_any_item,
refuses=[('LEAVE way=(north|out)', when.always,
'[@artist/s] [have/v] work to do')]),
Room('@studio',
article='the',
called='studio',
exits={},
sight='a bare studio space with a single exit, to the north'),
Thing('@box in @studio',
article='a',
called='box',
open=False,
allowed=can.contain_and_support_things,
sight='the medium-sized parcel [is/1/v] [open/@box/a]'),
Art('@sculpture in @box',
article='a',
called='sculpture',
intact=True,
sight='a sculpture of a mountain, made to order in China')]
| {
"repo_name": "lucidbard/curveship",
"path": "fiction/artmaking.py",
"copies": "3",
"size": "2031",
"license": "isc",
"hash": -7257453192477348000,
"line_mean": 29.7727272727,
"line_max": 80,
"alpha_frac": 0.5672082718,
"autogenerated": false,
"ratio": 3.538327526132404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02527215595495772,
"num_lines": 66
} |
# Art-Net protocol for Pimoroni Unicorn Hat
# Open Pixel Control protocol for Pimoroni Unicorn Hat
# License: MIT
import unicornhat as unicorn
from twisted.internet import protocol, endpoints
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
# Adjust the LED brightness as needed.
unicorn.brightness(0.5)
class ArtNet(DatagramProtocol):
def datagramReceived(self, data, (host, port)):
if ((len(data) > 18) and (data[0:8] == "Art-Net\x00")):
rawbytes = map(ord, data)
opcode = rawbytes[8] + (rawbytes[9] << 8)
protocolVersion = (rawbytes[10] << 8) + rawbytes[11]
if ((opcode == 0x5000) and (protocolVersion >= 14)):
sequence = rawbytes[12]
physical = rawbytes[13]
sub_net = (rawbytes[14] & 0xF0) >> 4
universe = rawbytes[14] & 0x0F
net = rawbytes[15]
rgb_length = (rawbytes[16] << 8) + rawbytes[17]
#print "seq %d phy %d sub_net %d uni %d net %d len %d" % \
#(sequence, physical, sub_net, universe, net, rgb_length)
idx = 18
x = 0
y = 0
while ((idx < (rgb_length+18)) and (y < 8)):
r = rawbytes[idx]
idx += 1
g = rawbytes[idx]
idx += 1
b = rawbytes[idx]
idx += 1
unicorn.set_pixel(x, y, r, g, b)
x += 1
if (x > 7):
x = 0
y += 1
unicorn.show()
class OPC(protocol.Protocol):
# Parse Open Pixel Control protocol. See http://openpixelcontrol.org/.
MAX_LEDS = 64
parseState = 0
pktChannel = 0
pktCommand = 0
pktLength = 0
pixelCount = 0
pixelLimit = 0
def dataReceived(self, data):
rawbytes = map(ord, data)
#print "len(rawbytes) %d" % len(rawbytes)
#print rawbytes
i = 0
while (i < len(rawbytes)):
#print "parseState %d i %d" % (OPC.parseState, i)
if (OPC.parseState == 0): # get OPC.pktChannel
OPC.pktChannel = rawbytes[i]
i += 1
OPC.parseState += 1
elif (OPC.parseState == 1): # get OPC.pktCommand
OPC.pktCommand = rawbytes[i]
i += 1
OPC.parseState += 1
elif (OPC.parseState == 2): # get OPC.pktLength.highbyte
OPC.pktLength = rawbytes[i] << 8
i += 1
OPC.parseState += 1
elif (OPC.parseState == 3): # get OPC.pktLength.lowbyte
OPC.pktLength |= rawbytes[i]
i += 1
OPC.parseState += 1
OPC.pixelCount = 0
OPC.pixelLimit = min(3*OPC.MAX_LEDS, OPC.pktLength)
#print "OPC.pktChannel %d OPC.pktCommand %d OPC.pktLength %d OPC.pixelLimit %d" % \
# (OPC.pktChannel, OPC.pktCommand, OPC.pktLength, OPC.pixelLimit)
if (OPC.pktLength > 3*OPC.MAX_LEDS):
print "Received pixel packet exeeds size of buffer! Data discarded."
if (OPC.pixelLimit == 0):
OPC.parseState = 0
elif (OPC.parseState == 4):
copyBytes = min(OPC.pixelLimit - OPC.pixelCount, len(rawbytes) - i)
if (copyBytes > 0):
OPC.pixelCount += copyBytes
#print "OPC.pixelLimit %d OPC.pixelCount %d copyBytes %d" % \
# (OPC.pixelLimit, OPC.pixelCount, copyBytes)
if ((OPC.pktCommand == 0) and (OPC.pktChannel <= 1)):
x = 0
y = 0
iLimit = i + copyBytes
while ((i < iLimit) and (y < 8)):
#print "i %d" % (i)
r = rawbytes[i]
i += 1
g = rawbytes[i]
i += 1
b = rawbytes[i]
i += 1
unicorn.set_pixel(x, y, r, g, b)
#print "x %d y %d r %d g %d b %d" % (x,y,r,g,b)
x += 1
if (x > 7):
x = 0
y += 1
if (OPC.pixelCount >= OPC.pixelLimit):
unicorn.show()
else:
i += copyBytes
if (OPC.pixelCount == OPC.pktLength):
OPC.parseState = 0
else:
OPC.parseState += 1
elif (OPC.parseState == 5):
discardBytes = min(OPC.pktLength - OPC.pixelLimit, len(rawbytes) - i)
#print "discardBytes %d" % (discardBytes)
OPC.pixelCount += discardBytes
i += discardBytes
if (OPC.pixelCount >= OPC.pktLength):
OPC.parseState = 0
else:
print "Invalid OPC.parseState %d" % (OPC.parseState)
class OPCFactory(protocol.Factory):
def buildProtocol(self, addr):
return OPC()
reactor.listenUDP(6454, ArtNet())
endpoints.serverFromString(reactor, "tcp:7890").listen(OPCFactory())
reactor.run()
| {
"repo_name": "bbx10/artnet-unicorn-hat",
"path": "artnet-server.py",
"copies": "1",
"size": "5540",
"license": "mit",
"hash": 1174678060999263500,
"line_mean": 39.7352941176,
"line_max": 99,
"alpha_frac": 0.4510830325,
"autogenerated": false,
"ratio": 3.9014084507042255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48524914832042254,
"avg_score": null,
"num_lines": null
} |
"""Artnotizen organizes all dated files in a given directory in year/month/day
folders, compiles are markdown files to HTML, and constructs an easy to navigate
single-page index in index.html."""
from collections import OrderedDict
from datetime import datetime
import os
import pipes
import re
import shlex
import subprocess
import sys
import time
import urllib
import urlparse
from jinja2 import Environment, PackageLoader, FileSystemLoader
_GENFILES_REGEXP = re.compile(r"(index\.html$)|(lib/.*)$")
_DATE_REGEXP = re.compile(r"^(\d{4})(\d{2})?(\d{2})?.*")
_DATE_PATH_REGEXP = re.compile(r"(\d{4})/(\d{2})?/?(\d{2})?/?.*")
_LIBRARY_URLS = [
"http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js",
"http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.3/jquery-ui.min.js",
]
def is_hidden(path):
"""Check whether given path is hidden.
Only works for POSIX, checks for basename starting with '.'.
Args:
path: path to check for hidden status, absolute or relative
Returns:
Boolean hidden status
"""
return os.path.basename(os.path.abspath(path))[0] == "."
def listfiles(path, hidden=False):
"""Recursively list all files below a given path. Mimics find -type f.
Args:
path: path to search for files
Returns:
Flat list of files relative to path
"""
all_files = []
for root, dirnames, files in os.walk(path, topdown=True):
if not hidden:
if is_hidden(root):
# dirnames must be modified in-place to affect subsequent
# iterations
dirnames[:] = []
continue
files = [f for f in files if not is_hidden(f)]
all_files.extend([os.path.join(root, f) for f in files])
return all_files
def organize_notes(directory):
"""Organize notes into year/month/day folders in the given directory.
Filenames are preserved, and only files matching ^\\d{4}(\\d{2})?(\\d{2})?.*
are affected.
Args:
directory: string to current working directory
Returns:
A list of all matching dated notes and a list of other files
"""
all_files = listfiles(directory)
notes = [f for f in all_files
if _DATE_REGEXP.match(os.path.basename(f))]
others = [os.path.join(directory, f )for f in all_files
if not f in notes and not _GENFILES_REGEXP.match(f)]
out = []
for note in notes:
year, month, day = _DATE_REGEXP.match(os.path.basename(note)).groups("")
note_dir = os.path.join(directory, year, month, day)
dst = os.path.join(note_dir, os.path.basename(note))
if note != dst:
# Handles directory creation and file move
os.renames(note, dst)
out.append(dst)
return out, others
def wait_for_all(running, delay, callback=None):
"""Poll all processes in running at interval delay until all are complete.
WARNING: This function modifies running in-place. Any further processing of
its processes should be handled using the callback argument.
Args:
running: dictionary with subprocess.Popen values.
delay: polling interval in seconds.
callback: optional function of (key, proc) to be called on the
completion of proc if proc has a return code of 0.
Returns:
None on completion of all processes.
"""
while running:
for key, proc in running.iteritems():
retcode = proc.poll()
if retcode is not None:
if retcode != 0:
print >> sys.stderr, "{} returned with value {}".format(
key, retcode)
elif callback is not None:
callback(key, proc)
del running[key]
break
else:
time.sleep(delay)
continue
def compile_markdown(files, markdown_ext, markdown_cmd, delay=0.1):
"""Select and compile markdown files from files to HTML.
Args:
files: list of files to filter and compile
ext: file extension for markdown files
cmd: command to compile markdown to html. Must write to stdout.
delay: polling delay for launched compilation processes.
Returns:
A list of the same length as files with the markdown filenames replaced
by the corresponding html files.
"""
md_files = [f for f in files if os.path.splitext(f)[1] == markdown_ext]
out = files[:]
cmd_args = shlex.split(markdown_cmd)
running = {}
for mkd in md_files:
html_filename = os.path.splitext(mkd)[0] + ".html"
with open(html_filename, "wb") as outfile:
args = cmd_args + [pipes.quote(mkd)]
running[" ".join(args)] = subprocess.Popen(args, stdout=outfile)
out[out.index(mkd)] = html_filename
# Poll compilation processes until all complete
wait_for_all(running, delay)
return out
class _Note(object):
"""_Note stores information regarding a particular note.
Attributes:
name: String name of the note, often a filename
path: String path to the note relative to index.html
"""
def __init__(self, path, name):
self.name = name
self.path = path
class _Group(object):
"""_Group stores groups of notes, possibly with child groups.
Attributes:
key: A key for sorting
identifier: Separate identifier for HTML elements
notes: A list of _Note's
children: A dictionary of child _Group's
"""
def __init__(self, key="", identifier=None, notes=None, children=None):
self.key = key
if identifier is None:
identifier = key
if notes is None:
notes = []
if children is None:
children = {}
self.identifier = identifier
self.notes = notes
self.children = children
def sort(self):
"""Sort children by key and notes by name.
Converts children to an OrderedDict."""
self.children = OrderedDict(sorted(self.children.items(),
key=lambda t: t[1].key))
self.notes.sort(key=lambda x: x.name)
def index_data(notes, directory, depth):
"""Extract index data from list of paths to note files.
"""
groups = {"Other notes": _Group("zzzz", "other")}
for note in notes:
info = _Note(name=os.path.basename(note),
path=os.path.relpath(note, directory))
match = _DATE_PATH_REGEXP.search(note)
if not match:
groups["Other notes"].notes.append(info)
continue
year, month, day = match.groups()
date = datetime.strptime(year + month + day, "%Y%m%d")
if year not in groups:
groups[year] = _Group(year)
if depth == "year" or month == "" or (
day == "" and depth == "week"):
groups[year].notes.append(info)
continue
if depth == "month":
nice_month = date.strftime("%B")
if nice_month not in groups[year].children:
groups[year].children[nice_month] = _Group(month, year + month)
groups[year].children[nice_month].notes.append(info)
continue
if depth == "week":
week = str(date.isocalendar()[1])
if week not in groups[year].children:
groups[year].children[week] = _Group(int(week), year + week)
groups[year].children[week].notes.append(info)
continue
groups = OrderedDict(sorted(groups.items(), key=lambda t: t[1].key,
reverse=True))
for key in groups:
groups[key].sort()
return groups
def download_libraries(library_urls, directory):
"""Download libraries from CDN as needed
Downloads libraries provided in library_urls to {directory}/lib. Does not
overwrite existing libraries with the same filenames.
Returns a list of library paths relative to directory.
"""
lib_dir = os.path.join(directory, "lib")
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
libraries = []
for url in library_urls:
filename = os.path.basename(urlparse.urlparse(url)[2])
out = os.path.join(lib_dir, filename)
libraries.append(out)
if not os.path.exists(out):
urllib.urlretrieve(url, out)
return libraries
def build_index(notes, others, directory, template_path, depth):
"""Build HTML index of notes.
Notes in year/[month/[day/]] folders are placed under appropriate headings.
Other notes are organized in lexicographic order.
"""
if os.path.exists(template_path):
env = Environment(loader=FileSystemLoader(template_path))
else:
env = Environment(loader=PackageLoader("artnotizen"))
libraries = download_libraries(_LIBRARY_URLS, directory)
env.globals = {
"notes": index_data(set(notes + others), directory, depth),
"libraries": libraries,
}
template = env.get_template("index.html")
with open(os.path.join(directory, "index.html"), "wb") as indexfile:
print >> indexfile, template.render()
| {
"repo_name": "awblocker/artnotizen",
"path": "src/lib.py",
"copies": "1",
"size": "9213",
"license": "apache-2.0",
"hash": -5894505601621335000,
"line_mean": 34.98828125,
"line_max": 80,
"alpha_frac": 0.6116357321,
"autogenerated": false,
"ratio": 4.042562527424309,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5154198259524309,
"avg_score": null,
"num_lines": null
} |
"""A RT problem with two distinct modes: short wave length on the
left and long wavelenght on the right. This allows one to see
how the growth rate depends on wavenumber.
"""
from __future__ import print_function
import numpy as np
import sys
import mesh.patch as patch
from util import msg
def init_data(my_data, rp):
""" initialize the rt problem """
msg.bold("initializing the rt problem...")
# make sure that we are passed a valid patch object
if not isinstance(my_data, patch.CellCenterData2d):
print("ERROR: patch invalid in rt2.py")
print(my_data.__class__)
sys.exit()
# get the density, momenta, and energy as separate variables
dens = my_data.get_var("density")
xmom = my_data.get_var("x-momentum")
ymom = my_data.get_var("y-momentum")
ener = my_data.get_var("energy")
gamma = rp.get_param("eos.gamma")
grav = rp.get_param("compressible.grav")
dens1 = rp.get_param("rt2.dens1")
dens2 = rp.get_param("rt2.dens2")
p0 = rp.get_param("rt2.p0")
amp = rp.get_param("rt2.amp")
sigma = rp.get_param("rt2.sigma")
# initialize the components, remember, that ener here is
# rho*eint + 0.5*rho*v**2, where eint is the specific
# internal energy (erg/g)
xmom[:, :] = 0.0
ymom[:, :] = 0.0
dens[:, :] = 0.0
f_l = 18
f_r = 3
# set the density to be stratified in the y-direction
myg = my_data.grid
ycenter = 0.5*(myg.ymin + myg.ymax)
p = myg.scratch_array()
j = myg.jlo
while j <= myg.jhi:
if (myg.y[j] < ycenter):
dens[:, j] = dens1
p[:, j] = p0 + dens1*grav*myg.y[j]
else:
dens[:, j] = dens2
p[:, j] = p0 + dens1*grav*ycenter + dens2*grav*(myg.y[j] - ycenter)
j += 1
idx_l = myg.x2d < (myg.xmax - myg.xmin)/3.0
idx_r = myg.x2d >= (myg.xmax - myg.xmin)/3.0
ymom[idx_l] = amp*np.sin(4.0*np.pi*f_l*myg.x2d[idx_l] /
(myg.xmax-myg.xmin))*np.exp(-(myg.y2d[idx_l]-ycenter)**2/sigma**2)
ymom[idx_r] = amp*np.sin(4.0*np.pi*f_r*myg.x2d[idx_r] /
(myg.xmax-myg.xmin))*np.exp(-(myg.y2d[idx_r]-ycenter)**2/sigma**2)
ymom *= dens
# set the energy (P = cs2*dens)
ener[:, :] = p[:, :]/(gamma - 1.0) + \
0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]
def finalize():
""" print out any information to the user at the end of the run """
pass
| {
"repo_name": "zingale/pyro2",
"path": "compressible/problems/rt2.py",
"copies": "2",
"size": "2462",
"license": "bsd-3-clause",
"hash": 9092478982521482000,
"line_mean": 26.6629213483,
"line_max": 95,
"alpha_frac": 0.5658001625,
"autogenerated": false,
"ratio": 2.7756482525366404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9340278003051622,
"avg_score": 0.0002340823970037453,
"num_lines": 89
} |
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
# Run this to characterise a remote.
# import as_drivers.nec_ir.art
from sys import platform
import uasyncio as asyncio
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
if platform == 'pyboard':
from pyb import Pin
elif platform == 'esp8266' or ESP32:
from machine import Pin, freq
else:
print('Unsupported platform', platform)
from .aremote import *
errors = {BADSTART : 'Invalid start pulse', BADBLOCK : 'Error: bad block',
BADREP : 'Error: repeat', OVERRUN : 'Error: overrun',
BADDATA : 'Error: invalid data', BADADDR : 'Error: invalid address'}
def cb(data, addr):
if data == REPEAT:
print('Repeat')
elif data >= 0:
print(hex(data), hex(addr))
else:
print('{} Address: {}'.format(errors[data], hex(addr)))
def test():
print('Test for IR receiver. Assumes NEC protocol.')
print('ctrl-c to stop.')
if platform == 'pyboard':
p = Pin('X3', Pin.IN)
elif platform == 'esp8266':
freq(160000000)
p = Pin(13, Pin.IN)
elif ESP32:
p = Pin(23, Pin.IN)
ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing
loop = asyncio.get_event_loop()
try:
loop.run_forever()
except KeyboardInterrupt:
print('Interrupted')
finally:
asyncio.new_event_loop() # Still need ctrl-d because of interrupt vector
test()
| {
"repo_name": "peterhinch/micropython-async",
"path": "v3/as_drivers/nec_ir/art.py",
"copies": "1",
"size": "1557",
"license": "mit",
"hash": -2782633328743102000,
"line_mean": 27.8333333333,
"line_max": 81,
"alpha_frac": 0.6377649326,
"autogenerated": false,
"ratio": 3.414473684210526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4552238616810526,
"avg_score": null,
"num_lines": null
} |
# Author: Peter Hinch
# Copyright Peter Hinch 2017 Released under the MIT license
# Run this to characterise a remote.
from sys import platform
import uasyncio as asyncio
ESP32 = platform == 'esp32' or platform == 'esp32_LoBo'
if platform == 'pyboard':
from pyb import Pin
elif platform == 'esp8266' or ESP32:
from machine import Pin, freq
else:
print('Unsupported platform', platform)
from aremote import *
errors = {BADSTART : 'Invalid start pulse', BADBLOCK : 'Error: bad block',
BADREP : 'Error: repeat', OVERRUN : 'Error: overrun',
BADDATA : 'Error: invalid data', BADADDR : 'Error: invalid address'}
def cb(data, addr):
if data == REPEAT:
print('Repeat')
elif data >= 0:
print(hex(data), hex(addr))
else:
print('{} Address: {}'.format(errors[data], hex(addr)))
def test():
print('Test for IR receiver. Assumes NEC protocol.')
if platform == 'pyboard':
p = Pin('X3', Pin.IN)
elif platform == 'esp8266':
freq(160000000)
p = Pin(13, Pin.IN)
elif ESP32:
p = Pin(23, Pin.IN)
ir = NEC_IR(p, cb, True) # Assume r/c uses extended addressing
loop = asyncio.get_event_loop()
loop.run_forever()
test()
| {
"repo_name": "peterhinch/micropython-async",
"path": "v2/nec_ir/art.py",
"copies": "1",
"size": "1329",
"license": "mit",
"hash": 8119847627591523000,
"line_mean": 27.2765957447,
"line_max": 78,
"alpha_frac": 0.6380737397,
"autogenerated": false,
"ratio": 3.347607052896725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4485680792596725,
"avg_score": null,
"num_lines": null
} |
"""art URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
from general import views
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^general/', include('general.urls', namespace="general")),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
]
| {
"repo_name": "memnonila/art",
"path": "art/urls.py",
"copies": "1",
"size": "1052",
"license": "mit",
"hash": 3722788621007718000,
"line_mean": 39.4615384615,
"line_max": 102,
"alpha_frac": 0.6920152091,
"autogenerated": false,
"ratio": 3.542087542087542,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9732235686213682,
"avg_score": 0.00037341299477221804,
"num_lines": 26
} |
"""ArtWorkManager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', include('home.urls', namespace='home')),
url(r'^login/', include('login.urls', namespace='login')),
url(r'^manager/', include('manager.urls', namespace='manager')),
url(r'^developer/', include('developer.urls', namespace='developer')),
url(r'^art/', include('art.urls', namespace='art')),
url(r'^plan/', include('plan.urls', namespace='plan')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"repo_name": "biansetuliao/ArtWorkManager",
"path": "ArtWorkManager/urls.py",
"copies": "1",
"size": "1284",
"license": "mit",
"hash": -1294615595672244200,
"line_mean": 43.275862069,
"line_max": 77,
"alpha_frac": 0.6908099688,
"autogenerated": false,
"ratio": 3.5766016713091924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767411640109193,
"avg_score": 0,
"num_lines": 29
} |
"""A rudimentary port of the intro video used for the intro animation on
pymunk.org. The code is tested on both Windows and Android.
Note that it doesn't display Kivy best practices, the intro_video
code was just converted to Kivy in the most basic way to show that its possible,
its not supposed to show the best way to structure a Kivy application using
Pymunk.
"""
__version__ = "0.1.3"
# python main.py -m screen:iphone4,portrait
import random
random.seed(5)
import cffi
import kivy
from kivy.app import App
from kivy.clock import Clock
from kivy.core.image import Image as CoreImage
from kivy.core.window import Window
from kivy.graphics import Color, Ellipse, Line, Quad, Rectangle, Triangle
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.scatter import Scatter
from kivy.uix.scatterlayout import ScatterLayout
from kivy.uix.widget import Widget
import pymunk
import pymunk.autogeometry
from pymunk.vec2d import Vec2d
class PymunkDemo(RelativeLayout):
def small_ball(self, space):
for x in range(10):
mass = 3
radius = 8
moment = pymunk.moment_for_circle(mass, 0, radius)
b = pymunk.Body(mass, moment)
c = pymunk.Circle(b, radius)
c.friction = 1
x = random.randint(100, 350)
y = random.randint(300, 340)
b.position = x, y
space.add(b, c)
with self.canvas:
Color(0.2, 0.6, 0.86)
c.ky = self.ellipse_from_circle(c)
def big_ball(self, space):
mass = 1000
radius = 50
moment = pymunk.moment_for_circle(mass, 0, radius)
b = pymunk.Body(mass, moment)
c = pymunk.Circle(b, radius)
c.friction = 1
c.color = 255, 0, 0
b.position = 800, 200
b.apply_impulse_at_local_point((-10000, 0), (0, 1000))
space.add(b, c)
with self.canvas:
Color(1, 0, 0)
c.ky = self.ellipse_from_circle(c)
def boxfloor(self, space):
mass = 10
vs = [(-50, 30), (60, 22), (-50, 22)]
moment = pymunk.moment_for_poly(mass, vs)
b = pymunk.Body(mass, moment)
s = pymunk.Poly(b, vs)
s.friction = 1
s.color = 0, 0, 0
b.position = 600, 250
space.add(b, s)
with self.canvas:
Color(0.2, 0.2, 0.2)
s.ky = Triangle(points=self.points_from_poly(s))
def box(self, space):
mass = 10
moment = pymunk.moment_for_box(mass, (40, 20))
b = pymunk.Body(mass, moment)
s = pymunk.Poly.create_box(b, (40, 20))
s.friction = 1
b.position = 600, self.box_y
self.box_y += 30
space.add(b, s)
with self.canvas:
Color(0.2, 0.2, 0.2)
s.ky = Quad(points=self.points_from_poly(s))
def car(self, space):
pos = Vec2d(100, 100)
wheel_color = 0.2, 0.86, 0.47
shovel_color = 0.86, 0.47, 0.2
mass = 100
radius = 25
moment = pymunk.moment_for_circle(mass, 20, radius)
wheel1_b = pymunk.Body(mass, moment)
wheel1_s = pymunk.Circle(wheel1_b, radius)
wheel1_s.friction = 1.5
wheel1_s.color = wheel_color
space.add(wheel1_b, wheel1_s)
mass = 100
radius = 25
moment = pymunk.moment_for_circle(mass, 20, radius)
wheel2_b = pymunk.Body(mass, moment)
wheel2_s = pymunk.Circle(wheel2_b, radius)
wheel2_s.friction = 1.5
wheel2_s.color = wheel_color
space.add(wheel2_b, wheel2_s)
mass = 100
size = (50, 30)
moment = pymunk.moment_for_box(mass, size)
chassi_b = pymunk.Body(mass, moment)
chassi_s = pymunk.Poly.create_box(chassi_b, size)
space.add(chassi_b, chassi_s)
vs = [(0, 0), (0, -45), (25, -45)]
shovel_s = pymunk.Poly(chassi_b, vs, transform=pymunk.Transform(tx=85))
shovel_s.friction = 0.5
shovel_s.color = shovel_color
space.add(shovel_s)
wheel1_b.position = pos - (55, 0)
wheel2_b.position = pos + (55, 0)
chassi_b.position = pos + (0, 25)
space.add(
pymunk.PinJoint(wheel1_b, chassi_b, (0, 0), (-25, -15)),
pymunk.PinJoint(wheel1_b, chassi_b, (0, 0), (-25, 15)),
pymunk.PinJoint(wheel2_b, chassi_b, (0, 0), (25, -15)),
pymunk.PinJoint(wheel2_b, chassi_b, (0, 0), (25, 15)),
)
speed = -4
space.add(
pymunk.SimpleMotor(wheel1_b, chassi_b, speed),
pymunk.SimpleMotor(wheel2_b, chassi_b, speed),
)
with self.canvas:
Color(*wheel_color)
wheel1_s.ky = self.ellipse_from_circle(wheel1_s)
Color(*wheel_color)
wheel2_s.ky = self.ellipse_from_circle(wheel2_s)
Color(*shovel_color)
chassi_s.ky = Quad(points=self.points_from_poly(chassi_s))
shovel_s.ky = Triangle(points=self.points_from_poly(shovel_s))
def cannon(self, space):
mass = 100
radius = 15
moment = pymunk.moment_for_circle(mass, 0, radius)
b = pymunk.Body(mass, moment)
s = pymunk.Circle(b, radius)
s.color = 0.86, 0.2, 0.6
b.position = 700, 400
space.add(b, s)
impulse = Vec2d(-200000, -75000)
b.apply_impulse_at_local_point((impulse))
with self.canvas:
Color(*s.color)
s.ky = self.ellipse_from_circle(s)
def create_logo_lines(self, logo_img):
logo_bb = pymunk.BB(0, 0, logo_img.width, logo_img.height)
def sample_func(point):
try:
color = logo_img.read_pixel(point[0], point[1])
return color[3] * 255
except Exception:
return 0
line_set = pymunk.autogeometry.march_soft(
logo_bb, logo_img.width, logo_img.height, 99, sample_func
)
r = 10
lines = []
for line in line_set:
line = pymunk.autogeometry.simplify_curves(line, 0.7)
max_x = 0
min_x = 1000
max_y = 0
min_y = 1000
for l in line:
max_x = max(max_x, l.x)
min_x = min(min_x, l.x)
max_y = max(max_y, l.y)
min_y = min(min_y, l.y)
w, h = max_x - min_x, max_y - min_y
# we skip the line which has less than 35 height, since its the "hole" in
# the p in pymunk, and we dont need it.
if h < 35:
continue
center = Vec2d(min_x + w / 2.0, min_y + h / 2.0)
t = pymunk.Transform(a=1.0, d=1.0, tx=-center.x, ty=-center.y)
r += 30
if r > 255:
r = 0
line = [Vec2d(l.x, 300 - l.y) for l in line]
lines.append(line)
return lines
def create_logo(self, lines, space):
for line in lines:
for i in range(len(line) - 1):
shape = pymunk.Segment(space.static_body, line[i], line[i + 1], 1)
shape.friction = 0.5
space.add(shape)
def init(self):
self.step = 1 / 60.0
ci = CoreImage("pymunk_logo.png", keep_data=True)
self.logo_lines = self.create_logo_lines(ci)
self.logo_img = ci
self.touches = {}
self.start()
def start(self):
self.space = space = pymunk.Space()
space.gravity = 0, -900
space.sleep_time_threshold = 0.3
space.steps = 0
self.create_logo(self.logo_lines, space)
with self.canvas:
Rectangle(
texture=self.logo_img.texture,
pos=(0, 300 - self.logo_img.height),
size=self.logo_img.size,
)
floor = pymunk.Segment(space.static_body, (-100, 0), (900, 62), 5)
floor.friction = 1.0
space.add(floor)
with self.canvas:
Color(0.2, 0.2, 0.2)
floor.ky = Line(points=[-100, 0, 900, 62], width=5)
# we use our own event scheduling to make sure a event happens exactly
# after X amount of simulation steps
self.events = []
self.events.append((10, self.big_ball))
for x in range(8):
self.events.append((1 + 10 * x, self.small_ball))
self.events.append((200, self.big_ball))
self.events.append((350, self.boxfloor))
self.box_y = 150
for x in range(8):
self.events.append((400 + x * 10, self.box))
self.events.append((650, self.car))
self.events.append((850, self.cannon))
self.events.append((1200, self.reset))
self.update_event = Clock.schedule_interval(self.update, 1.0 / 20.0)
def reset(self, *args):
self.clear_widgets()
self.update_event.cancel()
self.canvas.clear()
self.start()
def update(self, dt):
stepdelay = 25
for x in range(6):
self.space.step(1.0 / 60.0 / 2)
self.space.step(1.0 / 60.0 / 2)
self.space.steps += 1
if (
len(self.events) > 0
and self.space.steps - stepdelay > self.events[0][0]
):
_, f = self.events.pop(0)
f(self.space)
for shape in self.space.shapes:
if hasattr(shape, "ky") and not shape.body.is_sleeping:
if isinstance(shape, pymunk.Circle):
body = shape.body
shape.ky[0].pos = body.position - (shape.radius, shape.radius)
circle_edge = body.position + Vec2d(shape.radius, 0).rotated(
body.angle
)
shape.ky[1].points = [
body.position.x,
body.position.y,
circle_edge.x,
circle_edge.y,
]
if isinstance(shape, pymunk.Segment):
body = shape.body
p1 = body.position + shape.a.cpvrotate(body.rotation_vector)
p2 = body.position + shape.b.cpvrotate(body.rotation_vector)
shape.ky.points = p1.x, p1.y, p2.x, p2.y
if isinstance(shape, pymunk.Poly):
shape.ky.points = self.points_from_poly(shape)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
p = self.to_local(*touch.pos)
d = self.touches[touch.uid]
d["line"].points = [d["start"][0], d["start"][1], p[0], p[1]]
self.canvas.remove(d["line"])
mass = 50
radius = 15
moment = pymunk.moment_for_circle(mass, 0, radius)
b = pymunk.Body(mass, moment)
s = pymunk.Circle(b, radius)
s.color = 0.86, 0.2, 0.6
b.position = d["start"]
self.space.add(b, s)
impulse = 200 * (Vec2d(*p) - d["start"])
b.apply_impulse_at_local_point(impulse)
with self.canvas:
Color(*s.color)
s.ky = self.ellipse_from_circle(s)
def on_touch_move(self, touch):
if touch.grab_current is self:
p = self.to_local(*touch.pos)
d = self.touches[touch.uid]
d["line"].points = [d["start"][0], d["start"][1], p[0], p[1]]
def on_touch_down(self, touch):
touch.grab(self)
p = self.to_local(*touch.pos)
self.touches[touch.uid] = {"start": p}
with self.canvas:
Color(1, 0, 0, 0.5)
line = Line(points=[p[0], p[1], p[0], p[1]], width=15)
self.touches[touch.uid]["line"] = line
return True
def ellipse_from_circle(self, shape):
pos = shape.body.position - (shape.radius, shape.radius)
e = Ellipse(pos=pos, size=[shape.radius * 2, shape.radius * 2])
circle_edge = shape.body.position + Vec2d(shape.radius, 0).rotated(
shape.body.angle
)
Color(0.17, 0.24, 0.31)
l = Line(
points=[
shape.body.position.x,
shape.body.position.y,
circle_edge.x,
circle_edge.y,
]
)
return e, l
def points_from_poly(self, shape):
body = shape.body
ps = [p.rotated(body.angle) + body.position for p in shape.get_vertices()]
vs = []
for p in ps:
vs += [p.x, p.y]
return vs
class MyApp(App):
def build(self):
Window.clearcolor = (1, 1, 1, 1)
Window.set_title("Pymunk demo")
demo = PymunkDemo()
demo.size_hint = 1, 1
demo.init()
demo.pos = 0, 300
l = FloatLayout()
l.add_widget(demo)
return l
if __name__ == "__main__":
MyApp().run()
| {
"repo_name": "viblo/pymunk",
"path": "examples/kivy_pymunk_demo/main.py",
"copies": "1",
"size": "13128",
"license": "mit",
"hash": 3532567208560812500,
"line_mean": 31.5756823821,
"line_max": 85,
"alpha_frac": 0.5246039001,
"autogenerated": false,
"ratio": 3.301810865191147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4326414765291147,
"avg_score": null,
"num_lines": null
} |
"""A RuleSet is a set of math rules organized by category.
The actual rules are in the rulesets submodule.
Usage:
- Call begin_rule_set.
- Add topics with add_topic
- Set a default topic order by using set_topic_order.
- Add rules by importing modules which make use of the rule decorator.
- call end_rule_set.
there is an alternative and less declarative interface; see the RuleSet class.
Rules themselves should do one of two things:
- Fail. A failed rule does nothing except returnFAILED_RULE (note the caps) from this module.
- Succeed. A successful rule should return the result of an invocation of rule_return_value.
"""
from __future__ import unicode_literals
from . import data
#the constant for a failed rule.
#we compare with this using is.
FAILED_RULE = object()
class NoSuchTopicError(Exception):
"""Thrown when we try to add a rule for an unregistered topic."""
def __init__(self, topic):
super(Exception, self).__init__("No Such Topic: {}".format(topic))
class NoSuchTagError(Exception):
def __init__(self, tag):
super(Exception, self).__init__("No such tag: {}".format(tag))
class NoSuchRuleSetError(Exception):
def __init__(self, rule_set):
super(Exception, self).__init__("No such rule set {}".format(rule_set))
class Rule(object):
"""A rule: holds locale, func associations."""
def __init__(self):
self.rule_dict = dict()
def add(self, locale, func):
self.rule_dict[locale] = func
def execute(self, node, locale):
if locale in self.rule_dict:
return self.rule_dict[locale](node)
elif 'default' in self.rule_dict:
return self.rule_dict['default'](node)
return FAILED_RULE
class RuleSet(object):
def __init__(self, name):
self.name = name
self.topics = []
self.topic_order = [] #which order to look at topics for rules.
self.rules = dict()
def get_topics():
return self.topics
def add_topic(self, topic):
self.topics.append(topic)
self.rules[topic] = dict()
def set_rule(self, topic, for_tag, func, locale = 'default'):
"""set a rule which is active when a topic is active, for the MathML tag for_tag, and which is executed by calling func.
Func receives one argument: a node object."""
if for_tag not in data.all_tags:
raise NoSuchTagError(for_tag)
if topic not in self.topics:
raise NoSuchTopicError(topic)
if for_tag not in self.rules[topic]:
self.rules[topic][for_tag] = Rule()
self.rules[topic][for_tag].add(locale, func)
def get_topic_order(self):
return self.topic_order
def set_topic_order(self, new_order):
for i in new_order:
if i not in self.topics:
raise NoSuchTopicError(i)
self.topic_order = new_order
#the decorator-based API.
current_rule_set = None
rule_sets = dict()
no_current_rule_set = Exception("Error: attempt to add rules with decorator-based API before beginning rule set")
def begin_rule_set(name):
global current_rule_set
current_rule_set = RuleSet(name)
def end_rule_set():
global rule_sets, current_rule_set
if current_rule_set is None:
raise no_current_rule_set
rule_sets[current_rule_set.name] = current_rule_set
current_rule_set = None
def add_topic(topic):
global current_rule_set
if current_rule_set is None:
raise no_current_rule_set
current_rule_set.add_topic(topic)
def add_topics(*args):
for i in args:
add_topic(i)
def set_topic_order(order):
global current_rule_set
if current_rule_set is None:
raise no_current_rule_set
current_rule_set.set_topic_order(order)
def set_rule(topic, tag, func, locale = 'default'):
global current_rule_set
if current_rule_set is None:
raise no_current_rule_set
current_rule_set.set_rule(topic, tag, func, locale)
def rule(topic, tag, locale = 'default'):
def rule_dec(func):
set_rule(topic, tag, func, locale)
return func
return rule_dec
#the special function that we use for return values:
def rule_return_value(node, template_string, template_string_low_verbocity = None, zoom_targets = None):
if template_string_low_verbocity is None:
template_string_low_verbocity = template_string
if zoom_targets is None:
zoom_targets = node.get_zoom_targets()
return {
'template_string' : template_string,
'template_string_low_verbocity': template_string_low_verbocity,
'zoom_targets' : zoom_targets,
}
def _apply_node(node, rule_set, locale):
passed_rule = rule_return_value(node, template_string = "Error: could not translate node")
candidates = []
for i in rule_set.get_topic_order():
if node.tag in rule_set.rules[i]:
candidates += [(i, rule_set.rules[i][node.tag])]
for i, rule in candidates:
result = rule.execute(node, locale)
if result is not FAILED_RULE:
passed_rule = result
break
#apply passed_rule
node.zoom_targets = passed_rule['zoom_targets']
node.template_string = passed_rule['template_string']
node.template_string_low_verbocity = passed_rule['template_string_low_verbocity']
def apply_rule_set(tree, rule_set_name, locale):
if rule_set_name not in rule_sets:
raise NoSuchRuleSetError(rule_set_name)
rule_set = rule_sets[rule_set_name]
nodes = list(tree.iterate())
for i in nodes:
_apply_node(i, rule_set, locale)
#it's a bredth-first iterator, so reversing it gives us deepest first.
for i in reversed(nodes):
i.compute_strings()
| {
"repo_name": "3mousetech/mathml_accessibility",
"path": "mathml_accessibility/rule_set.py",
"copies": "1",
"size": "5221",
"license": "mpl-2.0",
"hash": 7967967802982255000,
"line_mean": 29.7117647059,
"line_max": 122,
"alpha_frac": 0.7186362766,
"autogenerated": false,
"ratio": 3.0766057748968767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9154124513366916,
"avg_score": 0.028223507625992118,
"num_lines": 170
} |
# A run is a sequence of adjacent repeated values. Write a program that prints the
# runs by including them in parentheses, like this:
# 1 2 (5 5) 3 1 2 4 3 (2 2 2 2) 3 6 (5 5) 6 3 1
#
# Use the following pseudocode:
# Set a boolean variable inRun to false.
# For each valid index i in the list
# If inRun
# If values[i] is different from the preceding value
# Print ).
# inRun = false.
# If not inRun
# If values[i] is the same as the following value
# Print (.
# inRun = true.
# Print values[i].
# If inRun, print ).
# FUNCTIONS
def run(list):
inRun = False
for i in range(len(list) - 1):
if inRun == True:
if list[i] != list[i - 1]:
print(")", end = " ")
inRun = False
elif inRun == False:
if list[i] == list[i + 1]:
print("(", end = " ")
inRun = True
print(list[i], end = ", ")
if inRun == True:
print(") ", end = "")
# main
def main():
exampleList = [ 1, 2, 5, 5, 3, 1, 2, 4, 3, 2, 2, 2, 2, 3, 6, 5, 5, 6, 3, 1 ]
print("List, before", exampleList)
print("List, after")
print(run(exampleList))
# PROGRAM RUN
main() | {
"repo_name": "futurepr0n/Books-solutions",
"path": "Python-For-Everyone-Horstmann/Chapter6-Lists/P6.14.py",
"copies": "1",
"size": "1313",
"license": "mit",
"hash": 3385202291786896000,
"line_mean": 25.8163265306,
"line_max": 82,
"alpha_frac": 0.4851485149,
"autogenerated": false,
"ratio": 3.4192708333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4404419348233333,
"avg_score": null,
"num_lines": null
} |
"""A run loop for agent/environment interaction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
def run_loop(agents, env, max_frames=0):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
for agent in agents:
agent.setup(observation_spec, action_spec)
try:
while True:
timesteps = env.reset()
for a in agents:
a.reset()
while True:
total_frames += 1
actions = [agent.step(timestep, total_frames)
for agent, timestep in zip(agents, timesteps)]
if max_frames and total_frames >= max_frames:
return
if timesteps[0].last():
break
timesteps = env.step(actions)
except KeyboardInterrupt:
pass
finally:
elapsed_time = time.time() - start_time
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
| {
"repo_name": "sino30535/pysc2_simple64_agent",
"path": "run_loop_m.py",
"copies": "1",
"size": "1108",
"license": "apache-2.0",
"hash": 3715218716718686700,
"line_mean": 26.7,
"line_max": 65,
"alpha_frac": 0.6326714801,
"autogenerated": false,
"ratio": 3.820689655172414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9862819869171053,
"avg_score": 0.018108253220271996,
"num_lines": 40
} |
"""Arun Microelectronics Ltd. gauge drivers
Driver for Arun Microelectronics Ltd. (AML) gauges according to the
manual of an NGC2D instrument, the driver should also support PGC1
instruments.
"""
import unittest
import pyhard2.driver as drv
Cmd, Access = drv.Command, drv.Access
def _parse_stat_byte(stat):
"""Parse status byte."""
mode = 'local' if stat & 0b10000 == 0 else 'remote'
ig = 1 if stat & 0b1000000 == 0 else 2
connected = stat & 0b10000000 == 0
return (mode, ig, connected)
def _parse_err_byte(err):
"""Parse error byte."""
error = err & 0b1 == 0
temperature_error = err & 0b10 == 0
temperature_warning = err & 0b1000 == 0
return (error, temperature_error, temperature_warning)
def _parser(type_):
"""Wrap message parsers.
Parameters:
type_ (str): {"measure", "unit", "type", "status", "error"}
"""
def parser(status):
"""Parse message."""
ig_type = {"I": "ion gauge",
"P": "Pirani",
"M": "capacitance manometer"}.get(status[1], "error")
stat, err = status[4:6]
stat = _parse_stat_byte(ord(stat))
err = _parse_err_byte(ord(err))
pressure = float(status[5:12])
unit = {"T": "Torr",
"P": "Pascal",
"M": "mBar"}.get(status[13], "error")
return dict(measure=pressure,
unit=unit,
type=ig_type,
status=stat,
error=err,
)[type_]
return parser
class Protocol(drv.CommunicationProtocol):
"""Communication protocol.
Communication is read only:
.. uml::
group Query
User -> Instrument: "*{command}{node}"
note right: {node} is not used on NGC2D instruments
User <-- Instrument: 17-bytes response
end
"""
def __init__(self, socket):
super(Protocol, self).__init__(socket)
self._socket.baudrate = 9600
self._socket.timeout = 0.1
self._socket.newline = "\r\n"
self._node = 0 # required for compatibility with older hardware
def read(self, context):
self._socket.write("*{reader}{node}\r\n".format(
reader=context.reader, node=self._node))
return self._socket.readline()
class Ngc2d(drv.Subsystem):
"""Driver for NGC2D ion gauges.
.. graphviz:: gv/Ngc2d.txt
"""
def __init__(self, socket):
super(Ngc2d, self).__init__()
self.setProtocol(Protocol(socket))
# Commands
self.poll = Cmd("P", Access.WO)
# control
# release
self.reset_error = Cmd("E", Access.WO)
self.measure = Cmd("S", rfunc=_parser("measure"))
self.unit = Cmd("S", rfunc=_parser("unit"))
self.IG_type = Cmd("S", rfunc=_parser("type"))
self.error = Cmd("S", rfunc=_parser("error"))
self.status = Cmd("S", rfunc=_parser("status"))
# emission
# gauge off
# override
# inhibit
class TestAml(unittest.TestCase):
def setUp(self):
socket = drv.TesterSocket()
# Return a pressure of 1.3e-7 mbar.
socket.msg = {"*S0\r\n": "GI1\x65\x001.3E-07,M0\r\n"}
self.i = Ngc2d(socket)
def test_measure(self):
self.assertEqual(self.i.measure.read(), 1.3e-7)
def test_unit(self):
self.assertEqual(self.i.unit.read(), "mBar")
if __name__ == "__main__":
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
unittest.main()
| {
"repo_name": "Synss/pyhard2",
"path": "pyhard2/driver/aml.py",
"copies": "1",
"size": "3605",
"license": "mit",
"hash": -59354821380886040,
"line_mean": 26.9457364341,
"line_max": 72,
"alpha_frac": 0.5600554785,
"autogenerated": false,
"ratio": 3.534313725490196,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45943692039901957,
"avg_score": null,
"num_lines": null
} |
"""A runner outputing logits or normalized distriution from a decoder."""
from typing import Dict, List, Any
# pylint: disable=unused-import
from typing import Optional
# pylint: enable=unused-import
from typeguard import check_argument_types
import numpy as np
import tensorflow as tf
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
FeedDict, ExecutionResult,
NextExecute)
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.vocabulary import Vocabulary
class LogitsExecutable(Executable):
def __init__(self,
all_coders: List[ModelPart],
fetches: FeedDict,
vocabulary: Vocabulary,
normalize: bool = True,
pick_index: int = None) -> None:
self.all_coders = all_coders
self._fetches = fetches
self._vocabulary = vocabulary
self._normalize = normalize
self._pick_index = pick_index
self.decoded_sentences = [] # type: List[List[str]]
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
"""Get the feedables and tensors to run."""
return self.all_coders, self._fetches, {}
def collect_results(self, results: List[Dict]) -> None:
if len(results) != 1:
raise ValueError('LogitsRunner needs exactly 1 execution result, '
'got {}'.format(len(results)))
train_loss = results[0]["train_loss"]
runtime_loss = results[0]["runtime_loss"]
# logits_list in shape (time, batch, vocab)
logits_list = results[0]["logits"]
# outputs are lists of strings (batch, time)
outputs = [[] for _ in logits_list[0]] # type: List[List[str]]
for time_step in logits_list:
for logits, output_list in zip(time_step, outputs):
if self._normalize:
logits = np.exp(logits) / np.sum(np.exp(logits), axis=0)
if self._pick_index:
instance_logits = str(logits[self._pick_index])
else:
instance_logits = ",".join(str(l) for l in logits)
output_list.append(instance_logits)
str_outputs = [["\t".join(l)] for l in outputs]
self.result = ExecutionResult(
outputs=str_outputs,
losses=[train_loss, runtime_loss],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
# pylint: disable=too-few-public-methods
class LogitsRunner(BaseRunner):
"""A runner which takes the output from decoder.decoded_logits.
The logits / normalized probabilities are outputted as tab-separates string
values. If the decoder produces a list of logits (as the recurrent
decoder), the tab separated arrays are separated with commas.
Alternatively, we may be interested in a single distribution dimension.
"""
def __init__(self,
output_series: str,
decoder: Any,
normalize: bool = True,
pick_index: int = None,
pick_value: str = None) -> None:
"""Initializes the logits runner.
Args:
output_series: Name of the series produces by the runner.
decoder: A decoder having logits.
normalize: Flag whether the logits should be normalized with
softmax.
pick_index: If not None, it specifies the index of the logit or the
probability that should be on output.
pick_value: If not None, it specifies a value from the decoder's
vocabulary whose logit or probability should be on output.
"""
super(LogitsRunner, self).__init__(output_series, decoder)
check_argument_types()
if pick_index is not None and pick_value is not None:
raise ValueError("Either a pick index or a vocabulary value can "
"be specified, not both at the same time.")
self._normalize = normalize
if pick_value is not None:
if pick_value in decoder.vocabulary:
self._pick_index = decoder.vocabulary.word_to_index[pick_value]
else:
raise ValueError(
"Value '{}' is not in vocabulary of decoder '{}'".format(
pick_value, decoder.name))
else:
self._pick_index = pick_index
def get_executable(self,
compute_losses: bool = False,
summaries: bool = True) -> LogitsExecutable:
if compute_losses:
fetches = {"train_loss": self._decoder.train_loss,
"runtime_loss": self._decoder.runtime_loss}
else:
fetches = {"train_loss": tf.zeros([]),
"runtime_loss": tf.zeros([])}
fetches["logits"] = self._decoder.decoded_logits
return LogitsExecutable(self.all_coders, fetches,
self._decoder.vocabulary,
self._normalize,
self._pick_index)
@property
def loss_names(self) -> List[str]:
return ["train_loss", "runtime_loss"]
| {
"repo_name": "bastings/neuralmonkey",
"path": "neuralmonkey/runners/logits_runner.py",
"copies": "1",
"size": "5409",
"license": "bsd-3-clause",
"hash": 5557953466234584000,
"line_mean": 37.3617021277,
"line_max": 79,
"alpha_frac": 0.570160843,
"autogenerated": false,
"ratio": 4.580016934801016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00022653652410919417,
"num_lines": 141
} |
"""A runner outputing logits or normalized distriution from a decoder."""
from typing import Dict, List, Optional
from typeguard import check_argument_types
import numpy as np
import tensorflow as tf
from neuralmonkey.decoders.classifier import Classifier
from neuralmonkey.decorators import tensor
from neuralmonkey.runners.base_runner import BaseRunner
# pylint: disable=too-few-public-methods
class LogitsRunner(BaseRunner[Classifier]):
"""A runner which takes the output from decoder.decoded_logits.
The logits / normalized probabilities are outputted as tab-separates string
values. If the decoder produces a list of logits (as the recurrent
decoder), the tab separated arrays are separated with commas.
Alternatively, we may be interested in a single distribution dimension.
"""
class Executable(BaseRunner.Executable["LogitsRunner"]):
def collect_results(self, results: List[Dict]) -> None:
if len(results) != 1:
raise ValueError("LogitsRunner needs exactly 1 execution "
"result, got {}".format(len(results)))
train_loss = results[0]["train_loss"]
runtime_loss = results[0]["runtime_loss"]
# logits_list in shape (time, batch, vocab)
logits_list = results[0]["logits"]
# outputs are lists of strings (batch, time)
outputs = [[] for _ in logits_list[0]] # type: List[List[str]]
for time_step in logits_list:
for logits, output_list in zip(time_step, outputs):
if self.executor.normalize:
logits = np.exp(logits) / np.sum(np.exp(logits),
axis=0)
if self.executor.pick_index:
instance_logits = str(logits[self.executor.pick_index])
else:
instance_logits = ",".join(str(l) for l in logits)
output_list.append(instance_logits)
str_outputs = [["\t".join(l)] for l in outputs]
self.set_runner_result(outputs=str_outputs,
losses=[train_loss, runtime_loss])
def __init__(self,
output_series: str,
decoder: Classifier,
normalize: bool = True,
pick_index: int = None,
pick_value: str = None) -> None:
"""Initialize the logits runner.
Args:
output_series: Name of the series produced by the runner.
decoder: A decoder having logits.
normalize: Flag whether the logits should be normalized with
softmax.
pick_index: If not None, it specifies the index of the logit or the
probability that should be on output.
pick_value: If not None, it specifies a value from the decoder's
vocabulary whose logit or probability should be on output.
"""
check_argument_types()
super().__init__(output_series, decoder)
if pick_index is not None and pick_value is not None:
raise ValueError("Either a pick index or a vocabulary value can "
"be specified, not both at the same time.")
self.pick_index = None # type: Optional[int]
self.normalize = normalize
if pick_value is not None:
if pick_value in self.decoder.vocabulary:
self.pick_index = self.decoder.vocabulary.index_to_word.index(
pick_value)
else:
raise ValueError(
"Value '{}' is not in vocabulary of decoder '{}'".format(
pick_value, decoder.name))
else:
self.pick_index = pick_index
@tensor
def fetches(self) -> Dict[str, tf.Tensor]:
return {"logits": self.decoder.decoded_logits,
"train_loss": self.decoder.train_loss,
"runtime_loss": self.decoder.runtime_loss}
@property
def loss_names(self) -> List[str]:
return ["train_loss", "runtime_loss"]
| {
"repo_name": "ufal/neuralmonkey",
"path": "neuralmonkey/runners/logits_runner.py",
"copies": "1",
"size": "4188",
"license": "bsd-3-clause",
"hash": -2792838528841022500,
"line_mean": 38.8857142857,
"line_max": 79,
"alpha_frac": 0.5768863419,
"autogenerated": false,
"ratio": 4.61742006615215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002857142857142857,
"num_lines": 105
} |
"""A runner outputing logits or normalized distriution from a decoder."""
from typing import Dict, List, Set, Optional
from typeguard import check_argument_types
import numpy as np
import tensorflow as tf
from neuralmonkey.decoders.classifier import Classifier
from neuralmonkey.runners.base_runner import (
BaseRunner, Executable, FeedDict, ExecutionResult, NextExecute)
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.vocabulary import Vocabulary
class LogitsExecutable(Executable):
def __init__(self,
all_coders: Set[ModelPart],
fetches: FeedDict,
vocabulary: Vocabulary,
normalize: bool,
pick_index: Optional[int]) -> None:
self._all_coders = all_coders
self._fetches = fetches
self._vocabulary = vocabulary
self._normalize = normalize
self._pick_index = pick_index
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
"""Get the feedables and tensors to run."""
return self._all_coders, self._fetches, []
def collect_results(self, results: List[Dict]) -> None:
if len(results) != 1:
raise ValueError("LogitsRunner needs exactly 1 execution result, "
"got {}".format(len(results)))
train_loss = results[0]["train_loss"]
runtime_loss = results[0]["runtime_loss"]
# logits_list in shape (time, batch, vocab)
logits_list = results[0]["logits"]
# outputs are lists of strings (batch, time)
outputs = [[] for _ in logits_list[0]] # type: List[List[str]]
for time_step in logits_list:
for logits, output_list in zip(time_step, outputs):
if self._normalize:
logits = np.exp(logits) / np.sum(np.exp(logits), axis=0)
if self._pick_index:
instance_logits = str(logits[self._pick_index])
else:
instance_logits = ",".join(str(l) for l in logits)
output_list.append(instance_logits)
str_outputs = [["\t".join(l)] for l in outputs]
self.result = ExecutionResult(
outputs=str_outputs,
losses=[train_loss, runtime_loss],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
# pylint: disable=too-few-public-methods
class LogitsRunner(BaseRunner[Classifier]):
"""A runner which takes the output from decoder.decoded_logits.
The logits / normalized probabilities are outputted as tab-separates string
values. If the decoder produces a list of logits (as the recurrent
decoder), the tab separated arrays are separated with commas.
Alternatively, we may be interested in a single distribution dimension.
"""
def __init__(self,
output_series: str,
decoder: Classifier,
normalize: bool = True,
pick_index: int = None,
pick_value: str = None) -> None:
"""Initialize the logits runner.
Args:
output_series: Name of the series produces by the runner.
decoder: A decoder having logits.
normalize: Flag whether the logits should be normalized with
softmax.
pick_index: If not None, it specifies the index of the logit or the
probability that should be on output.
pick_value: If not None, it specifies a value from the decoder's
vocabulary whose logit or probability should be on output.
"""
check_argument_types()
BaseRunner[Classifier].__init__(self, output_series, decoder)
if pick_index is not None and pick_value is not None:
raise ValueError("Either a pick index or a vocabulary value can "
"be specified, not both at the same time.")
self._pick_index = None # type: Optional[int]
self._normalize = normalize
if pick_value is not None:
if pick_value in self._decoder.vocabulary:
vocab_map = self._decoder.vocabulary.word_to_index
self._pick_index = vocab_map[pick_value]
else:
raise ValueError(
"Value '{}' is not in vocabulary of decoder '{}'".format(
pick_value, decoder.name))
else:
self._pick_index = pick_index
# pylint: disable=unused-argument
def get_executable(self,
compute_losses: bool,
summaries: bool,
num_sessions: int) -> LogitsExecutable:
fetches = {"logits": self._decoder.decoded_logits,
"train_loss": tf.zeros([]),
"runtime_loss": tf.zeros([])}
if compute_losses:
fetches["train_loss"] = self._decoder.train_loss
fetches["runtime_loss"] = self._decoder.runtime_loss
return LogitsExecutable(
self.all_coders, fetches, self._decoder.vocabulary,
self._normalize, self._pick_index)
# pylint: enable: unused-argument
@property
def loss_names(self) -> List[str]:
return ["train_loss", "runtime_loss"]
| {
"repo_name": "juliakreutzer/bandit-neuralmonkey",
"path": "neuralmonkey/runners/logits_runner.py",
"copies": "1",
"size": "5368",
"license": "bsd-3-clause",
"hash": 6776460654924637000,
"line_mean": 37.0709219858,
"line_max": 79,
"alpha_frac": 0.5890461997,
"autogenerated": false,
"ratio": 4.480801335559265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569847535259265,
"avg_score": null,
"num_lines": null
} |
"""A runner that prints out the input representation from an encoder."""
# pylint: disable=unused-import
from typing import Dict, List, Optional
# pylint: enable=unused-import
import tensorflow as tf
from neuralmonkey.model.model_part import ModelPart
from neuralmonkey.runners.base_runner import (BaseRunner, Executable,
ExecutionResult, NextExecute)
class RepresentationExecutable(Executable):
def __init__(self, prev_coders: List[ModelPart],
encoded: tf.Tensor,
used_session: int) -> None:
self._prev_coders = prev_coders
self._encoded = encoded
self._used_session = used_session
self.result = None # type: Optional[ExecutionResult]
def next_to_execute(self) -> NextExecute:
return self._prev_coders, {"encoded": self._encoded}, {}
def collect_results(self, results: List[Dict]) -> None:
if self._used_session > len(results):
raise ValueError(("Session id {} is higher than number of used "
"TensorFlow session ({}).").format(
self._used_session, len(results)))
vectors = results[self._used_session]['encoded']
self.result = ExecutionResult(
outputs=vectors.tolist(),
losses=[],
scalar_summaries=None,
histogram_summaries=None,
image_summaries=None)
class RepresentationRunner(BaseRunner):
"""Runner printing out representation from a encoder.
Using this runner is the way how to get input / other data representation
out from Neural Monkey.
"""
def __init__(self,
output_series: str,
encoder: ModelPart,
used_session: int = 0) -> None:
"""Initialize the representation runner.
Args:
output_series: Name of the output seriesi with vectors.
encoder: Used encoder.
used_session: Id of the TensorFlow session used in case of model
ensembles.
"""
super(RepresentationRunner, self).__init__(output_series, encoder)
self._used_session = used_session
self._encoded = encoder.encoded # type: ignore
def get_executable(self, compute_losses=False,
summaries=True) -> RepresentationExecutable:
return RepresentationExecutable(self.all_coders,
self._encoded,
self._used_session)
@property
def loss_names(self) -> List[str]:
return []
| {
"repo_name": "bastings/neuralmonkey",
"path": "neuralmonkey/runners/representation_runner.py",
"copies": "1",
"size": "2640",
"license": "bsd-3-clause",
"hash": 3695164450978232000,
"line_mean": 33.7368421053,
"line_max": 77,
"alpha_frac": 0.5875,
"autogenerated": false,
"ratio": 4.731182795698925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5818682795698925,
"avg_score": null,
"num_lines": null
} |
"""arXiv docstring to be inserted here."""
from feedparser import parse
from arxiv2bib import Cli
from engine import Engine
from result import Result
class ArxivResult(Result):
"""arXiv Result Parser"""
def get_title(self):
"""Return the publication title"""
try:
return self.parsed_obj['title'].encode('utf-8')
except:
return '(no title)'.encode('utf-8')
def get_authors(self):
"""Return a list of strings with the authors' names."""
authors = []
if 'authors' not in self.parsed_obj:
return ''
for i, author in enumerate(self.parsed_obj['authors']):
authors.append(author['name'].encode('utf-8'))
return authors
def get_year(self):
"""Return the publication year of the citation. """
if 'published_parsed' in self.parsed_obj:
return str(self.parsed_obj['published_parsed'].tm_year)
return ''
def get_id(self):
"""Extract and return the arXiv identifier."""
if 'id' in self.parsed_obj:
return self.parsed_obj['id'].split('/')[-1]
return ''
class ArxivEngine(Engine):
"""arXiv Engine"""
query_url = "http://export.arxiv.org/api/query?search_query=all:{}"
def fetch_results(self, contents):
"""
See http://arxiv.org/help/api/index for more on the structure of the
arXiv atomxml response.
"""
citations = []
contents_dict = parse(contents)
try:
for item in contents_dict['entries']:
citations.append(ArxivResult(item))
except KeyError:
raise ValueError("AtomXML response was not correctly formatted.")
return citations
def get_citation(self, identifier):
"""
Return the bib entry using nathan grigg's arxiv2bib
(http://nathangrigg.github.io/arxiv2bib/)
"""
cli = Cli([identifier])
cli.run()
return "\n".join(cli.output)
| {
"repo_name": "dimalik/pyopl",
"path": "pyopl/engines/arxiv.py",
"copies": "1",
"size": "2021",
"license": "bsd-3-clause",
"hash": -2739419853610217500,
"line_mean": 29.1641791045,
"line_max": 77,
"alpha_frac": 0.5853537853,
"autogenerated": false,
"ratio": 4.042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003917910447761195,
"num_lines": 67
} |
# a safe tracking script - servos are created seperately
# and their limits are programmed, they are then "bound" to
# the tracking service
tracker = Runtime.create("tracker","Tracking")
# create servos BEFORE starting the tracking service
# so we can specify values for the servos and specify names
# before it starts tracking
rotation = Runtime.create("rotation","Servo")
neck = Runtime.create("neck","Servo")
arduino = Runtime.create("arduino","Arduino")
arduino.connect("COM3", 57600, 8, 1, 0)
eye = Runtime.create("eye","OpenCV")
eye.setCameraIndex(1)
#attaching servos to arduino using PIN 3 and PIN 9
arduino.attach(rotation.getName() , 13)
arduino.attach(neck.getName(), 12)
# set safety limits - servos
# will not go beyond these limits
rotation.setMinMax(50,170)
neck.setMinMax(50,170)
# here we are binding are new servos with different names
# to the tracking service. If not specified the tracking service
# will create a servo named x and y
tracker.attach(arduino)
tracker.attachServos(rotation, neck)
tracker.attach(eye)
tracker.setRestPosition(90, 90)
# setXMinMax & setYMinMax (min, max) - this will set the min and maximum
# x value it will send the servo - typically this is not needed
# because the tracking service will pull the min and max positions from
# the servos it attaches too
tracker.setXMinMax(10, 170)
tracker.setYMinMax(10, 170)
# setServoPins (x, y) set the servo of the pan and tilt repectively
tracker.setServoPins(13,12)
# tracker.setCameraIndex(1) #change cameras if necessary
tracker.startService()
tracker.trackLKPoint()
#tracker.learnBackground()
| {
"repo_name": "sstocker46/pyrobotlab",
"path": "home/Alessandruino/Tracking.safe.modified.worky.py",
"copies": "5",
"size": "1615",
"license": "apache-2.0",
"hash": -1108815517122421500,
"line_mean": 30.0576923077,
"line_max": 72,
"alpha_frac": 0.7578947368,
"autogenerated": false,
"ratio": 3.2758620689655173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.11264324124663272,
"num_lines": 52
} |
"""A sample app that operates on GCS files with blobstore API."""
import cloudstorage
from google.appengine.api import app_identity
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import webapp2
# This handler creates a file in Cloud Storage using the cloudstorage
# client library and then reads the data back using the Blobstore API.
class CreateAndReadFileHandler(webapp2.RequestHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = '/{}/blobstore_demo'.format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, 'w') as filehandle:
filehandle.write('abcde\n')
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = '/gs{}'.format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# Read the file's contents using the Blobstore API.
# The last two parameters specify the start and end index of bytes we
# want to read.
data = blobstore.fetch_data(blob_key, 0, 6)
# Write the contents to the response.
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(data)
# Delete the file from Google Cloud Storage using the blob_key.
blobstore.delete(blob_key)
# This handler creates a file in Cloud Storage using the cloudstorage
# client library and then serves the file back using the Blobstore API.
class CreateAndServeFileHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = '/{}/blobstore_serving_demo'.format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, 'w') as filehandle:
filehandle.write('abcde\n')
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = '/gs{}'.format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# BlobstoreDownloadHandler serves the file from Google Cloud Storage to
# your computer using blob_key.
self.send_blob(blob_key)
app = webapp2.WSGIApplication([
('/', CreateAndReadFileHandler),
('/blobstore/read', CreateAndReadFileHandler),
('/blobstore/serve', CreateAndServeFileHandler)], debug=True)
| {
"repo_name": "sharbison3/python-docs-samples",
"path": "appengine/standard/blobstore/gcs/main.py",
"copies": "8",
"size": "3203",
"license": "apache-2.0",
"hash": -6226446186858241000,
"line_mean": 41.1447368421,
"line_max": 79,
"alpha_frac": 0.6912269747,
"autogenerated": false,
"ratio": 4.2367724867724865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
"""A sample app that operates on GCS files with blobstore API's BlobReader."""
import cloudstorage
from google.appengine.api import app_identity
from google.appengine.ext import blobstore
import webapp2
class BlobreaderHandler(webapp2.RequestHandler):
def get(self):
# Get the default Cloud Storage Bucket name and create a file name for
# the object in Cloud Storage.
bucket = app_identity.get_default_gcs_bucket_name()
# Cloud Storage file names are in the format /bucket/object.
filename = '/{}/blobreader_demo'.format(bucket)
# Create a file in Google Cloud Storage and write something to it.
with cloudstorage.open(filename, 'w') as filehandle:
filehandle.write('abcde\n')
# In order to read the contents of the file using the Blobstore API,
# you must create a blob_key from the Cloud Storage file name.
# Blobstore expects the filename to be in the format of:
# /gs/bucket/object
blobstore_filename = '/gs{}'.format(filename)
blob_key = blobstore.create_gs_key(blobstore_filename)
# [START blob_reader]
# Instantiate a BlobReader for a given Blobstore blob_key.
blob_reader = blobstore.BlobReader(blob_key)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# buffer size to 1 MB.
blob_reader = blobstore.BlobReader(blob_key, buffer_size=1048576)
# Instantiate a BlobReader for a given Blobstore blob_key, setting the
# initial read position.
blob_reader = blobstore.BlobReader(blob_key, position=0)
# Read the entire value into memory. This may take a while depending
# on the size of the value and the size of the read buffer, and is not
# recommended for large values.
blob_reader_data = blob_reader.read()
# Write the contents to the response.
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(blob_reader_data)
# Set the read position back to 0, then read and write 3 bytes.
blob_reader.seek(0)
blob_reader_data = blob_reader.read(3)
self.response.write(blob_reader_data)
self.response.write('\n')
# Set the read position back to 0, then read and write one line (up to
# and including a '\n' character) at a time.
blob_reader.seek(0)
for line in blob_reader:
self.response.write(line)
# [END blob_reader]
# Delete the file from Google Cloud Storage using the blob_key.
blobstore.delete(blob_key)
app = webapp2.WSGIApplication([
('/', BlobreaderHandler),
('/blobreader', BlobreaderHandler)], debug=True)
| {
"repo_name": "canglade/NLP",
"path": "appengine/standard/blobstore/blobreader/main.py",
"copies": "8",
"size": "2738",
"license": "apache-2.0",
"hash": 2486095787091167000,
"line_mean": 38.6811594203,
"line_max": 78,
"alpha_frac": 0.661431702,
"autogenerated": false,
"ratio": 4.074404761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8735836463904763,
"avg_score": null,
"num_lines": null
} |
# A sample Clever Instant Login implementation.
# Uses the Bottle framework and raw HTTP requests to demonstrate the OAuth2 flow.
import base64
import json
import os
import requests
import urllib
from bottle import app, redirect, request, route, run, template
from beaker.middleware import SessionMiddleware
# Obtain your Client ID and secret from your Clever developer dashboard at https://account.clever.com/partner/applications
CLIENT_ID = os.environ['CLIENT_ID']
CLIENT_SECRET = os.environ['CLIENT_SECRET']
if 'PORT' in os.environ:
PORT = os.environ['PORT']
else:
PORT = 2587
# Clever redirect URIs must be preregistered on your developer dashboard.
# If using the default PORT set above, make sure to register "http://localhost:2587/oauth"
REDIRECT_URI = 'http://localhost:{port}/oauth'.format(port=PORT)
CLEVER_OAUTH_URL = 'https://clever.com/oauth'
CLEVER_API_BASE = 'https://api.clever.com'
# Use the bottle session middleware to store an object to represent a "logged in" state.
session_opts = {
'session.type': 'memory',
'session.cookie_expires': 300,
'session.auto': True
}
myapp = SessionMiddleware(app(), session_opts)
# Our home page route will create a Clever Instant Login button.
@route('/')
def index():
encoded_string = urllib.urlencode({
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID,
'scope': 'read:user_id read:sis'
})
return template("<h1>Login!<br/><br/> \
<a href='https://clever.com/oauth/authorize?" + encoded_string +
"'><img src='http://assets.clever.com/sign-in-with-clever/sign-in-with-clever-small.png'/></a></h1>"
)
# Our OAuth 2.0 redirect URI location corresponds to what we've set above as our REDIRECT_URI
# When this route is executed, we will retrieve the "code" parameter and exchange it for a Clever access token.
# After receiving the access token, we use it with api.clever.com/me to determine its owner,
# save our session state, and redirect our user to our application.
@route('/oauth')
def oauth():
code = request.query.code
payload = {
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': REDIRECT_URI
}
headers = {
'Authorization': 'Basic {base64string}'.format(base64string =
base64.b64encode(CLIENT_ID + ':' + CLIENT_SECRET)),
'Content-Type': 'application/json',
}
# Get Token
try:
response = requests.post(CLEVER_OAUTH_URL + '/tokens', data=json.dumps(payload), headers=headers).json()
except requests.exceptions.RequestException as e:
return e
token = response['access_token']
bearer_headers = {
'Authorization': 'Bearer {token}'.format(token=token)
}
# Validate the returned token
try:
result = requests.get(CLEVER_OAUTH_URL + '/tokeninfo', headers=bearer_headers).json()
if result['client_id'] != CLIENT_ID:
return "Returned client_id does not match app client_id. Token is invalid."
except requests.exceptions.RequestException as e:
return e
# Determine who the token is for
try:
result = requests.get(CLEVER_API_BASE + '/me', headers=bearer_headers).json()
except requests.exceptions.RequestException as e:
return e
data = result['data']
# Only handle student logins for our app (other types include teachers and districts)
if data['type'] != 'student':
return template ("You must be a student to log in to this app but you are a {{type}}.", type=data['type'])
else:
if 'name' in data: #SIS scope
nameObject = data['name']
else:
#For student scopes, we'll have to take an extra step to get name data.
studentId = data['id']
student = requests.get(CLEVER_API_BASE + '/v1.1/students/{studentId}'.format(studentId=studentId),
headers=bearer_headers).json()
nameObject = student['data']['name']
session = request.environ.get('beaker.session')
session['nameObject'] = nameObject
redirect('/app')
# Our application logic lives here and is reserved only for users we've authenticated and identified.
@route('/app')
def app():
session = request.environ.get('beaker.session')
if 'nameObject' in session:
nameObject = session['nameObject']
return template("You are now logged in as {{name}}", name=nameObject['first'] + ' ' + nameObject['last'])
else:
return "You must be logged in to see this page! Click <a href='/'>here</a> to log in."
if __name__ == '__main__':
run(app=myapp, host='localhost', port=PORT)
| {
"repo_name": "kurtmansperger/Clever-Instant-Login-Python",
"path": "server.py",
"copies": "1",
"size": "4771",
"license": "mit",
"hash": 3626269856689339000,
"line_mean": 34.6044776119,
"line_max": 122,
"alpha_frac": 0.6491301614,
"autogenerated": false,
"ratio": 3.792527821939587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4941657983339587,
"avg_score": null,
"num_lines": null
} |
"""A sample client for the OpenBCI UDP server."""
from __future__ import print_function
import argparse
try:
import cPickle as pickle
except ImportError:
import _pickle as pickle
import json
import sys
sys.path.append('..') # help python find cyton.py relative to scripts folder
import socket
parser = argparse.ArgumentParser(
description='Run a UDP client listening for streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Handle JSON data rather than pickled Python objects.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
class UDPClient(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
self.client = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
self.client.bind((ip, port))
def start_listening(self, callback=None):
while True:
data, addr = self.client.recvfrom(1024)
print("data")
if self.json:
sample = json.loads(data)
# In JSON mode we only recieve channel data.
print(data)
else:
sample = pickle.loads(data)
# Note that sample is an OpenBCISample object.
print(sample.id)
print(sample.channel_data)
args = parser.parse_args()
client = UDPClient(args.host, int(args.port), args.json)
client.start_listening()
| {
"repo_name": "OpenBCI/OpenBCI_Python",
"path": "scripts/udp_client.py",
"copies": "1",
"size": "1622",
"license": "mit",
"hash": 4980071161417001000,
"line_mean": 26.0333333333,
"line_max": 77,
"alpha_frac": 0.6066584464,
"autogenerated": false,
"ratio": 3.965770171149144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5072428617549144,
"avg_score": null,
"num_lines": null
} |
"""A sample client for the OpenBCI UDP server."""
import argparse
import cPickle as pickle
import json
import sys;
sys.path.append(
'..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP client listening for streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Handle JSON data rather than pickled Python objects.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
class UDPClient(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
self.client = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
self.client.bind((ip, port))
def start_listening(self, callback=None):
while True:
data, addr = self.client.recvfrom(1024)
print("data")
if self.json:
sample = json.loads(data)
# In JSON mode we only recieve channel data.
print data
else:
sample = pickle.loads(data)
# Note that sample is an OpenBCISample object.
print sample.id
print sample.channel_data
args = parser.parse_args()
client = UDPClient(args.host, int(args.port), args.json)
client.start_listening()
| {
"repo_name": "neurotechuoft/Wall-EEG",
"path": "Code/OpenBCIPy/src/scripts/udp_client.py",
"copies": "1",
"size": "1564",
"license": "mit",
"hash": 8856808865530988000,
"line_mean": 26.9285714286,
"line_max": 73,
"alpha_frac": 0.6029411765,
"autogenerated": false,
"ratio": 3.890547263681592,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997679022863891,
"avg_score": 0.0033396423085363175,
"num_lines": 56
} |
# A sample context menu handler.
# Adds a 'Hello from Python' menu entry to .py files. When clicked, a
# simple message box is displayed.
#
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer, and browse to a directory with a .py file.
# * Right-Click on a .py file - locate and click on 'Hello from Python' on
# the context menu.
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.ContextMenu"
_reg_desc_ = "Python Sample Shell Extension (context menu)"
_reg_clsid_ = "{CED0336C-C9EE-4a7f-8D7F-C660393C381F}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods
def Initialize(self, folder, dataobj, hkey):
print("Init", folder, dataobj, hkey)
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
print("QCM", hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags)
# Query the items clicked on
format_etc = win32con.CF_HDROP, None, 1, -1, pythoncom.TYMED_HGLOBAL
sm = self.dataobj.GetData(format_etc)
num_files = shell.DragQueryFile(sm.data_handle, -1)
if num_files>1:
msg = "&Hello from Python (with %d files selected)" % num_files
else:
fname = shell.DragQueryFile(sm.data_handle, 0)
msg = "&Hello from Python (with '%s' selected)" % fname
idCmd = idCmdFirst
items = ['First Python content menu item']
if (uFlags & 0x000F) == shellcon.CMF_NORMAL: # Check == here, since CMF_NORMAL=0
print("CMF_NORMAL...")
items.append(msg)
elif uFlags & shellcon.CMF_VERBSONLY:
print("CMF_VERBSONLY...")
items.append(msg + " - shortcut")
elif uFlags & shellcon.CMF_EXPLORE:
print("CMF_EXPLORE...")
items.append(msg + " - normal file, right-click in Explorer")
elif uFlags & CMF_DEFAULTONLY:
print("CMF_DEFAULTONLY...\r\n")
else:
print("** unknown flags", uFlags)
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
for item in items:
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_STRING|win32con.MF_BYPOSITION,
idCmd, item)
indexMenu += 1
idCmd += 1
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
return idCmd-idCmdFirst # Must return number of menu items we added.
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
win32gui.MessageBox(hwnd, "Hello", "Wow", win32con.MB_OK)
def GetCommandString(self, cmd, typ):
# If GetCommandString returns the same string for all items then
# the shell seems to ignore all but one. This is even true in
# Win7 etc where there is no status bar (and hence this string seems
# ignored)
return "Hello from Python (cmd=%d)!!" % (cmd,)
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex")
subkey = winreg.CreateKey(key, "ContextMenuHandlers")
subkey2 = winreg.CreateKey(subkey, "PythonSample")
winreg.SetValueEx(subkey2, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
print(ShellExtension._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex\\ContextMenuHandlers\\PythonSample")
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellExtension._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/win32comext/shell/demos/servers/context_menu.py",
"copies": "10",
"size": "4442",
"license": "mit",
"hash": 9210508973588832000,
"line_mean": 41.3047619048,
"line_max": 90,
"alpha_frac": 0.6193156236,
"autogenerated": false,
"ratio": 3.7140468227424748,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9333362446342475,
"avg_score": null,
"num_lines": null
} |
# A sample context menu handler.
# Adds a menu item with sub menu to all files and folders, different options inside specified folder.
# When clicked a list of selected items is displayed.
#
# To demostrate:
# * Execute this script to register the context menu. `python context_menu.py --register`
# * Restart explorer.exe- in the task manager end process on explorer.exe. Then file > new task, then type explorer.exe
# * Open Windows Explorer, and browse to a file/directory.
# * Right-Click file/folder - locate and click on an option under 'Menu options'.
import os
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
import win32api
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.ContextMenu"
_reg_desc_ = "Python Sample Shell Extension (context menu)"
_reg_clsid_ = "{CED0336C-C9EE-4a7f-8D7F-C660393C381F}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods
def Initialize(self, folder, dataobj, hkey):
print "Init", folder, dataobj, hkey
win32gui.InitCommonControls()
self.brand= "Menu options"
self.folder= "C:\\Users\\Paul\\"
self.dataobj = dataobj
self.hicon= self.prep_menu_icon(r"C:\path\to\icon.ico")
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
print "QCM", hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags
# Query the items clicked on
files= self.getFilesSelected()
fname = files[0]
idCmd = idCmdFirst
isdir= os.path.isdir(fname)
in_folder= all([f_path.startswith(self.folder) for f_path in files])
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
menu= win32gui.CreatePopupMenu()
win32gui.InsertMenu(hMenu,indexMenu,win32con.MF_STRING|win32con.MF_BYPOSITION|win32con.MF_POPUP,menu,self.brand)
win32gui.SetMenuItemBitmaps(hMenu,menu,0,self.hicon,self.hicon)
# idCmd+=1
indexMenu+=1
if in_folder:
if len(files) == 1:
if isdir:
win32gui.InsertMenu(menu,0,win32con.MF_STRING,idCmd,"Item 1"); idCmd+=1
else:
win32gui.InsertMenu(menu,0,win32con.MF_STRING,idCmd,"Item 2")
win32gui.SetMenuItemBitmaps(menu,idCmd,0,self.hicon,self.hicon)
idCmd+=1
else:
win32gui.InsertMenu(menu,0,win32con.MF_STRING,idCmd,"Item 3")
win32gui.SetMenuItemBitmaps(menu,idCmd,0,self.hicon,self.hicon)
idCmd+=1
if idCmd > idCmdFirst:
win32gui.InsertMenu(menu,1,win32con.MF_SEPARATOR,0,None)
win32gui.InsertMenu(menu,2,win32con.MF_STRING,idCmd,"Item 4")
win32gui.SetMenuItemBitmaps(menu,idCmd,0,self.hicon,self.hicon)
idCmd+=1
win32gui.InsertMenu(menu,3,win32con.MF_STRING,idCmd,"Item 5")
win32gui.SetMenuItemBitmaps(menu,idCmd,0,self.hicon,self.hicon)
idCmd+=1
win32gui.InsertMenu(menu,4,win32con.MF_SEPARATOR,0,None)
win32gui.InsertMenu(menu,5,win32con.MF_STRING|win32con.MF_DISABLED,idCmd,"Item 6")
win32gui.SetMenuItemBitmaps(menu,idCmd,0,self.hicon,self.hicon)
idCmd+=1
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
return idCmd-idCmdFirst # Must return number of menu items we added.
def getFilesSelected(self):
format_etc = win32con.CF_HDROP, None, 1, -1, pythoncom.TYMED_HGLOBAL
sm = self.dataobj.GetData(format_etc)
num_files = shell.DragQueryFile(sm.data_handle, -1)
files= []
for i in xrange(num_files):
fpath= shell.DragQueryFile(sm.data_handle,i)
files.append(fpath)
return files
def prep_menu_icon(self, icon): #Couldn't get this to work with pngs, only ico
# First load the icon.
ico_x = win32api.GetSystemMetrics(win32con.SM_CXSMICON)
ico_y = win32api.GetSystemMetrics(win32con.SM_CYSMICON)
hicon = win32gui.LoadImage(0, icon, win32con.IMAGE_ICON, ico_x, ico_y, win32con.LR_LOADFROMFILE)
hdcBitmap = win32gui.CreateCompatibleDC(0)
hdcScreen = win32gui.GetDC(0)
hbm = win32gui.CreateCompatibleBitmap(hdcScreen, ico_x, ico_y)
hbmOld = win32gui.SelectObject(hdcBitmap, hbm)
# Fill the background.
brush = win32gui.GetSysColorBrush(win32con.COLOR_MENU)
win32gui.FillRect(hdcBitmap, (0, 0, 16, 16), brush)
# unclear if brush needs to be feed. Best clue I can find is:
# "GetSysColorBrush returns a cached brush instead of allocating a new
# one." - implies no DeleteObject
# draw the icon
win32gui.DrawIconEx(hdcBitmap, 0, 0, hicon, ico_x, ico_y, 0, 0, win32con.DI_NORMAL)
win32gui.SelectObject(hdcBitmap, hbmOld)
win32gui.DeleteDC(hdcBitmap)
return hbm
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
win32gui.MessageBox(hwnd, str(self.getFilesSelected()), "Wow", win32con.MB_OK)
def GetCommandString(self, cmd, typ):
# If GetCommandString returns the same string for all items then
# the shell seems to ignore all but one. This is even true in
# Win7 etc where there is no status bar (and hence this string seems
# ignored)
return "Hello from Python (cmd=%d)!!" % (cmd,)
def DllRegisterServer():
import _winreg
folder_key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"Folder\\shellex")
folder_subkey = _winreg.CreateKey(folder_key, "ContextMenuHandlers")
folder_subkey2 = _winreg.CreateKey(folder_subkey, "PythonSample")
_winreg.SetValueEx(folder_subkey2, None, 0, _winreg.REG_SZ,
ShellExtension._reg_clsid_)
file_key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"*\\shellex")
file_subkey = _winreg.CreateKey(file_key, "ContextMenuHandlers")
file_subkey2 = _winreg.CreateKey(file_subkey, "PythonSample")
_winreg.SetValueEx(file_subkey2, None, 0, _winreg.REG_SZ,
ShellExtension._reg_clsid_)
print ShellExtension._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
try:
folder_key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
"Folder\\shellex\\ContextMenuHandlers\\PythonSample")
file_key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\ContextMenuHandlers\\PythonSample")
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print ShellExtension._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer) | {
"repo_name": "geekpradd/Sorty",
"path": "context.py",
"copies": "1",
"size": "7153",
"license": "mit",
"hash": 8839989323853974000,
"line_mean": 40.3526011561,
"line_max": 120,
"alpha_frac": 0.660562002,
"autogenerated": false,
"ratio": 3.3440860215053765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45046480235053765,
"avg_score": null,
"num_lines": null
} |
# A sample context menu handler.
# Adds a 'Hello from Python' menu entry to .py files. When clicked, a
# simple message box is displayed.
#
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer, and browse to a directory with a .py file.
# * Right-Click on a .py file - locate and click on 'Hello from Python' on
# the context menu.
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.ContextMenu"
_reg_desc_ = "Python Sample Shell Extension (context menu)"
_reg_clsid_ = "{CED0336C-C9EE-4a7f-8D7F-C660393C381F}"
_com_interfaces_ = [shell.IID_IShellExtInit, shell.IID_IContextMenu]
_public_methods_ = shellcon.IContextMenu_Methods + shellcon.IShellExtInit_Methods
def Initialize(self, folder, dataobj, hkey):
print "Init", folder, dataobj, hkey
self.dataobj = dataobj
def QueryContextMenu(self, hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags):
print "QCM", hMenu, indexMenu, idCmdFirst, idCmdLast, uFlags
# Query the items clicked on
format_etc = win32con.CF_HDROP, None, 1, -1, pythoncom.TYMED_HGLOBAL
sm = self.dataobj.GetData(format_etc)
num_files = shell.DragQueryFile(sm.data_handle, -1)
if num_files>1:
msg = "&Hello from Python (with %d files selected)" % num_files
else:
fname = shell.DragQueryFile(sm.data_handle, 0)
msg = "&Hello from Python (with '%s' selected)" % fname
idCmd = idCmdFirst
items = ['First Python content menu item']
if (uFlags & 0x000F) == shellcon.CMF_NORMAL: # Check == here, since CMF_NORMAL=0
print "CMF_NORMAL..."
items.append(msg)
elif uFlags & shellcon.CMF_VERBSONLY:
print "CMF_VERBSONLY..."
items.append(msg + " - shortcut")
elif uFlags & shellcon.CMF_EXPLORE:
print "CMF_EXPLORE..."
items.append(msg + " - normal file, right-click in Explorer")
elif uFlags & CMF_DEFAULTONLY:
print "CMF_DEFAULTONLY...\r\n"
else:
print "** unknown flags", uFlags
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
for item in items:
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_STRING|win32con.MF_BYPOSITION,
idCmd, item)
indexMenu += 1
idCmd += 1
win32gui.InsertMenu(hMenu, indexMenu,
win32con.MF_SEPARATOR|win32con.MF_BYPOSITION,
0, None)
indexMenu += 1
return idCmd-idCmdFirst # Must return number of menu items we added.
def InvokeCommand(self, ci):
mask, hwnd, verb, params, dir, nShow, hotkey, hicon = ci
win32gui.MessageBox(hwnd, "Hello", "Wow", win32con.MB_OK)
def GetCommandString(self, cmd, typ):
# If GetCommandString returns the same string for all items then
# the shell seems to ignore all but one. This is even true in
# Win7 etc where there is no status bar (and hence this string seems
# ignored)
return "Hello from Python (cmd=%d)!!" % (cmd,)
def DllRegisterServer():
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex")
subkey = _winreg.CreateKey(key, "ContextMenuHandlers")
subkey2 = _winreg.CreateKey(subkey, "PythonSample")
_winreg.SetValueEx(subkey2, None, 0, _winreg.REG_SZ, ShellExtension._reg_clsid_)
print ShellExtension._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
try:
key = _winreg.DeleteKey(_winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex\\ContextMenuHandlers\\PythonSample")
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print ShellExtension._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "zhanqxun/cv_fish",
"path": "win32comext/shell/demos/servers/context_menu.py",
"copies": "4",
"size": "4546",
"license": "apache-2.0",
"hash": -5307188589560985000,
"line_mean": 41.2952380952,
"line_max": 90,
"alpha_frac": 0.6047074351,
"autogenerated": false,
"ratio": 3.7508250825082508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004324741054093283,
"num_lines": 105
} |
"""A sample custom HTTP server."""
import functools
import html
import traceback
import collect
import server
server.Logger.name = __file__
HTML_TMPL = '''\
<html>
<head>
<link rel="stylesheet" type="text/css" href="/myStyle.css"/>
</head>
<body id="consolas">
%s</body>
</html>
'''
LINK_HOME = '<a href="/">Home</a>'
app = server.app.App('0.0.0.0', 8080)
app.resolver.update_from_files_json('app.json')
@app.register('/myStyle.css')
def my_style():
status_code, headers, content = app._return_file(
collect.Path('myStyle.css')
)
headers['Content-Type'] = 'text/css'
return status_code, headers, content
def insert_body(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if isinstance(response, tuple):
status_code, headers, text = response
return status_code, headers, HTML_TMPL % text
else:
return HTML_TMPL % response
return wrapper
@app.register('/')
@insert_body
def index():
return '''\
<a href="/img.png"><img src="/img.png" width="250"/></a>
<form action="/" method="post">
<input id="consolas" type="text" name="url"><br/>
<input id="consolas" type="submit" value="Submit">
</form>
'''
@insert_body
def dir_landing_page(url_path, folder_path, recursive):
def contents():
yield folder_path.parent
yield folder_path
yield from folder_path
parts = []
for file in contents():
rel_path = file.relpath(folder_path)
new_url = url_path / rel_path
if recursive or file.is_file():
parts.append(f'''
<a href="{new_url}">{rel_path}</a>''')
inner = '<br/>'.join(parts)
return f'''\
<h1>{LINK_HOME}{url_path}</h1>
<p>{inner}
</p>
'''
for url_path, fs_path in app.resolver.dirs.items():
recursive = app.resolver.recursive(url_path)
def contents():
if recursive:
yield from fs_path.tree
else:
yield fs_path
for file in contents():
if not file.is_dir():
continue
rel_file = file.relpath(fs_path)
new_url = url_path / rel_file
app.register(new_url)(
functools.partial(dir_landing_page, new_url, file, recursive)
)
@app.register('/', 'post')
def index_post():
input = server.app.ActiveRequest.body['url']
new_url = collect.Path(input)
return 303, {'Location': str(new_url)}, ''
@app.register('/page')
def page():
return 307, {'Location': '/new'}, ''
@app.register('/new')
@insert_body
def new():
return f'''\
<p>
This is the new page. You may have been redirected.<br/>
{LINK_HOME}
</p>
'''
@app.register('/req', 'GET', 'POST')
def req_():
return (
200, {'Content-Type': 'text/plain'},
server.app.ActiveRequest.raw_request)
@app.register_exception(server.http.HTTPException)
def handle_http(error):
body = f'''\
<h1>{error.status_code} {error.reason}</h1>
<pre id="consolas">{html.escape(str(error.message))}</pre>
{LINK_HOME}
'''
return error.status_code, HTML_TMPL % body
@app.register_exception(Exception)
def handle_exc(error):
new_error = server.http.HTTPException(traceback.format_exc(), 500)
return handle_http(new_error)
print('ready')
if __name__ == '__main__':
app.run()
| {
"repo_name": "cheeseywhiz/cheeseywhiz",
"path": "socket/app.py",
"copies": "1",
"size": "3395",
"license": "mit",
"hash": -5685612508731616000,
"line_mean": 20.9032258065,
"line_max": 73,
"alpha_frac": 0.5902798233,
"autogenerated": false,
"ratio": 3.334970530451866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9425250353751866,
"avg_score": 0,
"num_lines": 155
} |
## A sample file for one hot encoding code ##
## Use with your different classifiers ##
import numpy as np
import pandas as pd
import time
from sklearn.preprocessing import OneHotEncoder
# Non feature
NON_FEATURE=['activity_id','people_id','date','people_date']
# Categorical data that is only label encoded
CATEGORICAL_DATA = ['people_char_1', 'people_char_2','people_group_1',
'people_char_3', 'people_char_4', 'people_char_5',
'people_char_6', 'people_char_7', 'people_char_8',
'people_char_9', 'activity_category',
'char_1', 'char_2', 'char_3', 'char_4', 'char_5', 'char_6',
'char_7', 'char_8', 'char_9', 'char_10']
# Already in a one-hot encoded form
CATEGORICAL_BINARY = ['people_char_10', 'people_char_11', 'people_char_12',
'people_char_13', 'people_char_14', 'people_char_15',
'people_char_16', 'people_char_17', 'people_char_18',
'people_char_19', 'people_char_20', 'people_char_21',
'people_char_22', 'people_char_23', 'people_char_24',
'people_char_25', 'people_char_26', 'people_char_27',
'people_char_28', 'people_char_29', 'people_char_30',
'people_char_31', 'people_char_32', 'people_char_33',
'people_char_34', 'people_char_35', 'people_char_36',
'people_char_37' ]
# Continuous categories
CONT = ['people_days', 'days',
'people_month', 'month',
'people_quarter', 'quarter',
'people_week', 'week',
'people_dayOfMonth', 'dayOfMonth',
'people_year', 'year',
'people_char_38']
# Path to people.csv from ReadHat Kaggle data set with reduced dimensions
FEATURE_FILE ='../Data/act_train_features_reduced.csv'
# Path to act_train.csv from RedHat Kaggle data set with reduced dimensions
OUTPUT ='../Data/act_train_output.csv'
def category_to_one_hot(dataset, non_feature, continuous_feature):
# Function to change labels of categories to one-hot encoding using scikit's OneHot Encoding sparse matrix
# pd.get_dummies(df) does the same, provides sweet header's as well but it kill's memory
ds = dataset.drop(non_feature, axis=1)
boolean_column = []
counter = 0
for column in ds.columns:
if column not in continuous_feature:
boolean_column.append(counter)
counter += 1
# boolean_column is not the column name but index
print("Done filtering columns...")
grd_enc = OneHotEncoder(categorical_features=boolean_column)
encoded_arr = grd_enc.fit_transform(ds)
return encoded_arr
# Read the data set. Note this dataset does not contain the 'outcome' columns
train_data_df = pd.read_csv(FEATURE_FILE,parse_dates=["date"])
train_data_df.sort_values(by=['activity_id'],ascending=True, inplace=True)
# Read the train data output
train_output = pd.read_csv(OUTPUT)
train_output.sort_values(by='activity_id',ascending=True, inplace=True)
### NOTE IT IS MORE MEMORY EFFICIENT IF YOU SPLIT THE DATA INTO TRAIN AND TEST SETS FIRST
### AND THEN DO THE ONE HOT ENCODING OTHERWISE PROGRAM WILL CRASH FOR SURE!
# Function to one hot encode all values
start = time.time()
## SAMPLE: without dropping char_10
train_arr = category_to_one_hot(train_data_df, NON_FEATURE, CONT)
## SAMPLE: try to run with char_10 first, if it does crash, you add it in NON_FEATURE and then run this code. Okay?
end = time.time()
print(end-start) | {
"repo_name": "BhavyaLight/kaggle-predicting-Red-Hat-Business-Value",
"path": "Initial_Classification_Models/one_hot_encoding_sample.py",
"copies": "1",
"size": "3544",
"license": "mit",
"hash": 532197132085245760,
"line_mean": 42.2317073171,
"line_max": 115,
"alpha_frac": 0.638261851,
"autogenerated": false,
"ratio": 3.4109720885466794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4549233939546679,
"avg_score": null,
"num_lines": null
} |
# a sample graph
graph = {'A': ['B', 'C','E'],
'B': ['A','C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F','D'],
'F': ['C']}
graph2 = {'A': ['B', 'C'],
'B': [ 'D'],
'C': ['E'],
'D': ['F'],
'E': [],
'F': []}
import collections
def checkcycle(graph):
q = collections.deque(['A'])
visited = {i:False for i in graph.keys()}
while q:
node = q.popleft()
if visited[node] == True:
return False
else:
for subnode in graph[node]:
q.append(subnode)
visited[node] = True
return True
print checkcycle(graph)
print checkcycle(graph2)
graph_tasks = { "wash the dishes" : ["have lunch"],
"cook food" : ["have lunch"],
"have lunch" : [],
"wash laundry" : ["dry laundry"],
"dry laundry" : ["fold laundry"],
"fold laundry" : [] }
def kahn_topsort(graph):
indegree = {i:0 for i in graph.keys()}
for u in graph:
for v in graph[u]:
indegree[v] += 1
q = collections.deque()
for u in indegree:
if indegree[u] == 0:
q.append(u)
L = []
while q:
u = q.popleft()
L.append(u)
for v in graph[u]:
indegree[v] -= 1
if indegree[v] == 0:
q.append(v)
if len(L) == len(graph):
return L
else:
return []
print kahn_topsort(graph_tasks)
| {
"repo_name": "quake0day/oj",
"path": "checkloop.py",
"copies": "1",
"size": "1566",
"license": "mit",
"hash": 7402504270148030000,
"line_mean": 23.46875,
"line_max": 51,
"alpha_frac": 0.4118773946,
"autogenerated": false,
"ratio": 3.3896103896103895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43014877842103894,
"avg_score": null,
"num_lines": null
} |
# a sample graph
graph = {'A': ['B', 'C','E'],
'B': ['A','C', 'D'],
'C': ['D'],
'D': ['C'],
'E': ['F','D'],
'F': ['C']}
class MyQUEUE: # just an implementation of a queue
def __init__(self):
self.holder = []
def enqueue(self,val):
self.holder.append(val)
def dequeue(self):
val = None
try:
val = self.holder[0]
if len(self.holder) == 1:
self.holder = []
else:
self.holder = self.holder[1:]
except:
pass
return val
def IsEmpty(self):
result = False
if len(self.holder) == 0:
result = True
return result
path_queue = MyQUEUE() # now we make a queue
arr = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0}
def BFS(graph,start,end,q):
temp_path = [start]
q.enqueue(temp_path)
while q.IsEmpty() == False:
tmp_path = q.dequeue()
last_node = tmp_path[len(tmp_path)-1]
if last_node == end:
print "VALID_PATH : ", tmp_path
for i in tmp_path:
arr[i] = arr[i] + 1
for link_node in graph[last_node]:
if link_node not in tmp_path:
new_path = []
new_path = tmp_path + [link_node]
q.enqueue(new_path)
BFS(graph,"A","D",path_queue)
print(arr) | {
"repo_name": "martindavid/code-sandbox",
"path": "algorithm/COMP90038/comp90038-assignment1/Test.py",
"copies": "1",
"size": "1450",
"license": "mit",
"hash": -5098249885432630000,
"line_mean": 22.4032258065,
"line_max": 54,
"alpha_frac": 0.4351724138,
"autogenerated": false,
"ratio": 3.4360189573459716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43711913711459716,
"avg_score": null,
"num_lines": null
} |
### a sample graph
##graph = {'A': ['B', 'C','E'],
## 'B': ['A','C', 'D'],
## 'C': ['D'],
## 'D': ['C'],
## 'E': ['D'],
## 'F': []}
fp = open('taxem.gdf')
text=fp.read()
pl=text.split('\n')
#print(p1)
flag=0
V=[]
graph={}
flag=0
for each_line in pl:
l=each_line.split(',')
if len(l)==2:
if flag!=0:
V.append(l[0])
graph[l[0]]=[]
flag=1
flag=0
for each_line in pl:
l=each_line.split(',')
if len(l)==5:
if flag!=0:
#print(l[0],l[1],l[3])
graph[l[0]].append(l[1])
flag=1
else:
continue
class MyQUEUE: # just an implementation of a queue
def __init__(self):
self.holder = []
def enqueue(self,val):
self.holder.append(val)
def dequeue(self):
val = None
try:
val = self.holder[0]
if len(self.holder) == 1:
self.holder = []
else:
self.holder = self.holder[1:]
except:
pass
return val
def IsEmpty(self):
result = False
if len(self.holder) == 0:
result = True
return result
path_queue = MyQUEUE() # now we make a queue
def BFS(graph,start,end,q):
temp_path = [start]
path_len='inf'
q.enqueue(temp_path)
while q.IsEmpty() == False:
tmp_path = q.dequeue()
last_node = tmp_path[len(tmp_path)-1]
print tmp_path
if last_node == end:
print "VALID_PATH : ",tmp_path
if path_len=='inf':
path_len=len(tmp_path)-1
else:
if path_len>len(tmp_path)-1:
path_len=len(tmp_path)-1
else:
print "0"
for link_node in graph[last_node]:
if link_node not in tmp_path:
#new_path = []
new_path = tmp_path + [link_node]
q.enqueue(new_path)
print path_len
return path_len
fp = open('db/introhier.gdf')
text=fp.read()
pl=text.split('\n')
A = [['inf' for x in range(len(V))] for x in range(len(V))]
flag=0
for each_line in pl:
l=each_line.split(',')
if len(l)==5:
if flag!=0:
#print(l[0],l[1],l[3])
A[int(l[0][1:])][int(l[1][1:])]=float(l[3])
flag=1
else:
continue
for x in range(len(V)):
for y in range(len(V)):
if A[x][y]!='inf':
path_len=BFS(graph,"v"+str(x),"v"+str(y),path_queue)
if A[x][y] != 'inf' and path_len!='inf':
A[x][y]=(A[x][y]*path_len)/6
fil_out=open("enhanced_semantic_net.gdf","w")
for each_line in pl:
l=each_line.split(',')
if len(l)==2:
fil_out.write(each_line+'\n')
fil_out.write("edgedef>node1,node2,directed,weight,labelvisible\n")
for i in range(len(V)):
for j in range(len(V)):
if A[i][j]!='inf':
str1="v"+str(i)+","+"v"+str(j)+","+"false"+","+str(A[i][j])+","+"true\n"
fil_out.write(str1)
fil_out.close()
| {
"repo_name": "amudalab/concept-graphs",
"path": "document retrieval/edge_enhancement.py",
"copies": "1",
"size": "3212",
"license": "mit",
"hash": -2325528783674935000,
"line_mean": 23.696,
"line_max": 84,
"alpha_frac": 0.4436488169,
"autogenerated": false,
"ratio": 3.0707456978967493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40143945147967497,
"avg_score": null,
"num_lines": null
} |
# A sample icon handler. Sets the icon for Python files to a random
# ICO file. ICO files are found in the Python directory - generally there will
# be 3 icons found.
#
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer, and browse to a directory with a .py file.
# * Note the pretty, random selection of icons!
import sys, os
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
import winerror
# Use glob to locate ico files, and random.choice to pick one.
import glob, random
ico_files = glob.glob(os.path.join(sys.prefix, "*.ico"))
if not ico_files:
ico_files = glob.glob(os.path.join(sys.prefix, "PC", "*.ico"))
if not ico_files:
print("WARNING: Can't find any icon files")
# Our shell extension.
IExtractIcon_Methods = "Extract GetIconLocation".split()
IPersistFile_Methods = "IsDirty Load Save SaveCompleted GetCurFile".split()
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.IconHandler"
_reg_desc_ = "Python Sample Shell Extension (icon handler)"
_reg_clsid_ = "{a97e32d7-3b78-448c-b341-418120ea9227}"
_com_interfaces_ = [shell.IID_IExtractIcon, pythoncom.IID_IPersistFile]
_public_methods_ = IExtractIcon_Methods + IPersistFile_Methods
def Load(self, filename, mode):
self.filename = filename
self.mode = mode
def GetIconLocation(self, flags):
# note - returning a single int will set the HRESULT (eg, S_FALSE,
# E_PENDING - see MS docs for details.
return random.choice(ico_files), 0, 0
def Extract(self, fname, index, size):
return winerror.S_FALSE
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex")
subkey = winreg.CreateKey(key, "IconHandler")
winreg.SetValueEx(subkey, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
print(ShellExtension._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"Python.File\\shellex\\IconHandler")
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellExtension._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "SublimeText/Pywin32",
"path": "lib/x32/win32comext/shell/demos/servers/icon_handler.py",
"copies": "10",
"size": "2600",
"license": "bsd-3-clause",
"hash": 8324274229896413000,
"line_mean": 36.1428571429,
"line_max": 81,
"alpha_frac": 0.6823076923,
"autogenerated": false,
"ratio": 3.641456582633053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012359830409552205,
"num_lines": 70
} |
# A sample implementation of IEmptyVolumeCache - see
# http://msdn2.microsoft.com/en-us/library/aa969271.aspx for an overview.
#
# * Execute this script to register the handler
# * Start the "disk cleanup" tool - look for "pywin32 compiled files"
import sys, os, stat, time
import pythoncom
from win32com.shell import shell, shellcon
from win32com.server.exception import COMException
import win32gui
import win32con
import winerror
# Our shell extension.
IEmptyVolumeCache_Methods = "Initialize GetSpaceUsed Purge ShowProperties Deactivate".split()
IEmptyVolumeCache2_Methods = "InitializeEx".split()
ico = os.path.join(sys.prefix, "py.ico")
if not os.path.isfile(ico):
ico = os.path.join(sys.prefix, "PC", "py.ico")
if not os.path.isfile(ico):
ico = None
print("Can't find python.ico - no icon will be installed")
class EmptyVolumeCache:
_reg_progid_ = "Python.ShellExtension.EmptyVolumeCache"
_reg_desc_ = "Python Sample Shell Extension (disk cleanup)"
_reg_clsid_ = "{EADD0777-2968-4c72-A999-2BF5F756259C}"
_reg_icon_ = ico
_com_interfaces_ = [shell.IID_IEmptyVolumeCache, shell.IID_IEmptyVolumeCache2]
_public_methods_ = IEmptyVolumeCache_Methods + IEmptyVolumeCache2_Methods
def Initialize(self, hkey, volume, flags):
# This should never be called, except on win98.
print("Unless we are on 98, Initialize call is unexpected!")
raise COMException(hresult=winerror.E_NOTIMPL)
def InitializeEx(self, hkey, volume, key_name, flags):
# Must return a tuple of:
# (display_name, description, button_name, flags)
print("InitializeEx called with", hkey, volume, key_name, flags)
self.volume = volume
if flags & shellcon.EVCF_SETTINGSMODE:
print("We are being run on a schedule")
# In this case, "because there is no opportunity for user
# feedback, only those files that are extremely safe to clean up
# should be touched. You should ignore the initialization
# method's pcwszVolume parameter and clean unneeded files
# regardless of what drive they are on."
self.volume = None # flag as 'any disk will do'
elif flags & shellcon.EVCF_OUTOFDISKSPACE:
# In this case, "the handler should be aggressive about deleting
# files, even if it results in a performance loss. However, the
# handler obviously should not delete files that would cause an
# application to fail or the user to lose data."
print("We are being run as we are out of disk-space")
else:
# This case is not documented - we are guessing :)
print("We are being run because the user asked")
# For the sake of demo etc, we tell the shell to only show us when
# there are > 0 bytes available. Our GetSpaceUsed will check the
# volume, so will return 0 when we are on a different disk
flags = shellcon.EVCF_DONTSHOWIFZERO | shellcon.EVCF_ENABLEBYDEFAULT
return ("pywin32 compiled files",
"Removes all .pyc and .pyo files in the pywin32 directories",
"click me!",
flags
)
def _GetDirectories(self):
root_dir = os.path.abspath(os.path.dirname(os.path.dirname(win32gui.__file__)))
if self.volume is not None and \
not root_dir.lower().startswith(self.volume.lower()):
return []
return [os.path.join(root_dir, p)
for p in ('win32', 'win32com', 'win32comext', 'isapi')]
def _WalkCallback(self, arg, directory, files):
# callback function for os.path.walk - no need to be member, but its
# close to the callers :)
callback, total_list = arg
for file in files:
fqn = os.path.join(directory, file).lower()
if file.endswith(".pyc") or file.endswith(".pyo"):
# See below - total_list == None means delete files,
# otherwise it is a list where the result is stored. Its a
# list simply due to the way os.walk works - only [0] is
# referenced
if total_list is None:
print("Deleting file", fqn)
# Should do callback.PurgeProcess - left as an exercise :)
os.remove(fqn)
else:
total_list[0] += os.stat(fqn)[stat.ST_SIZE]
# and callback to the tool
if callback:
# for the sake of seeing the progress bar do its thing,
# we take longer than we need to...
# ACK - for some bizarre reason this screws up the XP
# cleanup manager - clues welcome!! :)
## print "Looking in", directory, ", but waiting a while..."
## time.sleep(3)
# now do it
used = total_list[0]
callback.ScanProgress(used, 0, "Looking at " + fqn)
def GetSpaceUsed(self, callback):
total = [0] # See _WalkCallback above
try:
for d in self._GetDirectories():
os.path.walk(d, self._WalkCallback, (callback, total))
print("After looking in", d, "we have", total[0], "bytes")
except pythoncom.error as xxx_todo_changeme:
# This will be raised by the callback when the user selects 'cancel'.
(hr, msg, exc, arg) = xxx_todo_changeme.args
# This will be raised by the callback when the user selects 'cancel'.
if hr != winerror.E_ABORT:
raise # that's the documented error code!
print("User cancelled the operation")
return total[0]
def Purge(self, amt_to_free, callback):
print("Purging", amt_to_free, "bytes...")
# we ignore amt_to_free - it is generally what we returned for
# GetSpaceUsed
try:
for d in self._GetDirectories():
os.path.walk(d, self._WalkCallback, (callback, None))
except pythoncom.error as xxx_todo_changeme1:
# This will be raised by the callback when the user selects 'cancel'.
(hr, msg, exc, arg) = xxx_todo_changeme1.args
# This will be raised by the callback when the user selects 'cancel'.
if hr != winerror.E_ABORT:
raise # that's the documented error code!
print("User cancelled the operation")
def ShowProperties(self, hwnd):
raise COMException(hresult=winerror.E_NOTIMPL)
def Deactivate(self):
print("Deactivate called")
return 0
def DllRegisterServer():
# Also need to register specially in:
# HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches
# See link at top of file.
import winreg
kn = r"Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\%s" \
% (EmptyVolumeCache._reg_desc_,)
key = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE, kn)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, EmptyVolumeCache._reg_clsid_)
def DllUnregisterServer():
import winreg
kn = r"Software\Microsoft\Windows\CurrentVersion\Explorer\VolumeCaches\%s" \
% (EmptyVolumeCache._reg_desc_,)
try:
key = winreg.DeleteKey(winreg.HKEY_LOCAL_MACHINE, kn)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(EmptyVolumeCache._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(EmptyVolumeCache,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "int19h/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/servers/empty_volume_cache.py",
"copies": "10",
"size": "7884",
"license": "apache-2.0",
"hash": -1372622822476554500,
"line_mean": 44.8372093023,
"line_max": 93,
"alpha_frac": 0.6139015728,
"autogenerated": false,
"ratio": 4.038934426229508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9652835999029508,
"avg_score": null,
"num_lines": null
} |
"""A sample involving creation of persistent tables
"""
import os
from blaze import Table, fromiter, mean, std, params, open
from random import random
def build_table(table_name, rows):
"""build the table to use in our example.
if already built just open it"""
if not os.path.exists(table_name):
ds = 'x, {i: int64; f: float64}'
p = params(clevel=5, storage=table_name)
t = Table([], dshape=ds, params=p)
for i in xrange(rows):
t.append((i, random()))
t.commit()
else:
t = open(table_name)
return t
def build_array(array_name, rows):
if not os.path.exists(array_name):
ds = 'x, float'
p = params(clevel=5, storage=array_name)
t = fromiter((0.1*i for i in xrange(rows)),
dshape=ds, params=p)
t.commit()
else:
t = open(array_name)
return t
def test_simple():
table_name = './sample_tables/test_table'
# array_name = './sample_tables/test_array'
t = build_table(table_name, 100000)
# a = build_array(array_name, 100000)
print t
# print a.datashape
if __name__ == '__main__':
test_simple()
## Local Variables:
## mode: python
## coding: utf-8
## python-indent: 4
## tab-width: 4
## fill-column: 66
## End:
| {
"repo_name": "seibert/blaze-core",
"path": "samples/persist_example.py",
"copies": "1",
"size": "1296",
"license": "bsd-2-clause",
"hash": -4092616498115994600,
"line_mean": 20.9661016949,
"line_max": 58,
"alpha_frac": 0.5825617284,
"autogenerated": false,
"ratio": 3.2238805970149254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43064423254149253,
"avg_score": null,
"num_lines": null
} |
"""A sample job that prints string."""
import sys
import os.path
os.environ['NDSCHEDULER_SETTINGS_MODULE'] = 'settings_sched'
addpath = os.path.abspath("./ndscheduler")
if addpath not in sys.path:
sys.path.append(os.path.abspath("./ndscheduler"))
from ndscheduler.corescheduler import job
import WebMirror.TimedTriggers.RollingRewalkTriggers
import WebMirror.TimedTriggers.UrlTriggers
import WebMirror.TimedTriggers.QueueTriggers
import WebMirror.TimedTriggers.LocalFetchTriggers
import WebMirror.OfflineFilters.NewNetlocTracker as nnt
import WebMirror.util.StatusUpdater.Updater
import WebMirror.management.FeedDbManage
import Misc.HistoryAggregator.Consolidate
import common.management.WebMirrorManage
import common.management.RawMirrorManage
import Misc.NuForwarder.NuHeader
# import RawArchiver.TimedTriggers.RawRollingRewalkTrigger
import RawArchiver.TimedTriggers.RawUrlStartTrigger
class PythonJob():
invokable = "None"
@classmethod
def meta_info(cls):
return {
'job_class_string': '%s.%s' % (cls.__module__, cls.__name__),
'notes': 'Execute the scheduled job for %s' % cls.invokable,
'arguments': [],
'example_arguments': 'None',
}
def run(self):
assert self.invokable
instance = self.invokable()
instance.go()
print("Job %s has finished executing %s" % (self.__class__, self.invokable))
return
class PriorityDropper():
def go(self):
common.management.WebMirrorManage.exposed_drop_priorities()
class RawPriorityDropper():
def go(self):
common.management.RawMirrorManage.exposed_drop_priorities()
class RssHistoryPurgerRunner():
def go(self):
common.management.WebMirrorManage.exposed_clear_rss_history()
class NewUrlExtractorRunner():
def go(self):
mapdict = nnt.get_nu_head_urls()
mapdict_1 = nnt.get_wln_release_urls()
mapdict_2 = nnt.get_high_priority_urls()
mapdict_3 = nnt.get_distance_of_zero_urls()
print("NU Header urls: %s, wln URLs: %s, %s high priority items, %s with a distance of zero." % (len(mapdict), len(mapdict_1), len(mapdict_2), len(mapdict_3)))
for key, value in mapdict_1.items():
mapdict.setdefault(key, set())
mapdict[key].update(value)
for key, value in mapdict_2.items():
mapdict.setdefault(key, set())
mapdict[key].update(value)
for key, value in mapdict_3.items():
mapdict.setdefault(key, set())
mapdict[key].update(value)
print("Total items: %s" % (len(mapdict), ))
nnt.push_urls_into_table(mapdict)
nnt.filter_get_have_urls()
class NewUrlTitleLoader():
def go(self):
mapdict = nnt.get_nu_head_urls()
nnt.update_missing_new_with_title()
class RssTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.UrlTriggers.RssTriggerBase
class RollingRewalkTriggersBaseJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.RollingRewalkTriggers.RollingRewalkTriggersBase
class HourlyPageTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.UrlTriggers.HourlyPageTrigger
class EverySixHoursPageTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.UrlTriggers.EverySixHoursPageTrigger
class EveryOtherDayPageTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.UrlTriggers.EveryOtherDayPageTrigger
class NuQueueTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.QueueTriggers.NuQueueTrigger
class HourlyLocalFetchTriggerJob(PythonJob, job.JobBase):
invokable = WebMirror.TimedTriggers.LocalFetchTriggers.HourlyLocalFetchTrigger
class DbFlattenerJob(PythonJob, job.JobBase):
invokable = Misc.HistoryAggregator.Consolidate.DbFlattener
class RssFunctionSaverJob(PythonJob, job.JobBase):
invokable = WebMirror.management.FeedDbManage.RssFunctionSaver
class TransactionTruncatorJob(PythonJob, job.JobBase):
invokable = Misc.HistoryAggregator.Consolidate.TransactionTruncator
# class RollingRawRewalkTriggerJob(PythonJob, job.JobBase):
# invokable = RawArchiver.TimedTriggers.RawRollingRewalkTrigger.RollingRawRewalkTrigger
class RollingRawUrlTriggerJob(PythonJob, job.JobBase):
invokable = RawArchiver.TimedTriggers.RawUrlStartTrigger.RollingRawUrlStartTrigger
class NuHeaderJob(PythonJob, job.JobBase):
invokable = Misc.NuForwarder.NuHeader.NuHeader
class NuUpdateSenderJob(PythonJob, job.JobBase):
invokable = Misc.NuForwarder.NuHeader.NuUpdateSender
class WebMirrorPriorityDropper(PythonJob, job.JobBase):
invokable = PriorityDropper
class RawMirrorPriorityDropper(PythonJob, job.JobBase):
invokable = RawPriorityDropper
class RssHistoryPurger(PythonJob, job.JobBase):
invokable = RssHistoryPurgerRunner
class NewUrlExtractor(PythonJob, job.JobBase):
invokable = NewUrlExtractorRunner
class NewUrlTitleLoader(PythonJob, job.JobBase):
invokable = NewUrlTitleLoader
| {
"repo_name": "fake-name/ReadableWebProxy",
"path": "scheduled_jobs/python_job.py",
"copies": "1",
"size": "4739",
"license": "bsd-3-clause",
"hash": 3848232585909021700,
"line_mean": 31.0202702703,
"line_max": 161,
"alpha_frac": 0.7938383625,
"autogenerated": false,
"ratio": 3.047588424437299,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9203026964816179,
"avg_score": 0.02767996442422404,
"num_lines": 148
} |
# A sample of using Vista's IExplorerBrowser interfaces...
# Currently doesn't quite work:
# * CPU sits at 100% while running.
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
from win32com.server.util import wrap, unwrap
# event handler for the browser.
IExplorerBrowserEvents_Methods = """OnNavigationComplete OnNavigationFailed
OnNavigationPending OnViewCreated""".split()
class EventHandler:
_com_interfaces_ = [shell.IID_IExplorerBrowserEvents]
_public_methods_ = IExplorerBrowserEvents_Methods
def OnNavigationComplete(self, pidl):
print "OnNavComplete", pidl
def OnNavigationFailed(self, pidl):
print "OnNavigationFailed", pidl
def OnNavigationPending(self, pidl):
print "OnNavigationPending", pidl
def OnViewCreated(self, view):
print "OnViewCreated", view
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print "and look - its a Python implemented view!", pyview
except ValueError:
pass
class MainWindow:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_SIZE: self.OnSize,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "test_explorer_browser"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow( classAtom, "Python IExplorerBrowser demo", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
eb = pythoncom.CoCreateInstance(shellcon.CLSID_ExplorerBrowser, None, pythoncom.CLSCTX_ALL, shell.IID_IExplorerBrowser)
# as per MSDN docs, hook up events early
self.event_cookie = eb.Advise(wrap(EventHandler()))
eb.SetOptions(shellcon.EBO_SHOWFRAMES)
rect = win32gui.GetClientRect(self.hwnd)
# Set the flags such that the folders autoarrange and non web view is presented
flags = (shellcon.FVM_LIST, shellcon.FWF_AUTOARRANGE | shellcon.FWF_NOWEBVIEW)
eb.Initialize(self.hwnd, rect, (0, shellcon.FVM_DETAILS))
# And start browsing at the root of the namespace.
eb.BrowseToIDList([], shellcon.SBSP_ABSOLUTE)
#eb.FillFromObject(None, shellcon.EBF_NODROPTARGET);
#eb.SetEmptyText("No known folders yet...");
self.eb = eb
def OnCommand(self, hwnd, msg, wparam, lparam):
pass
def OnDestroy(self, hwnd, msg, wparam, lparam):
print "tearing down ExplorerBrowser..."
self.eb.Unadvise(self.event_cookie)
self.eb.Destroy()
self.eb = None
print "shutting down app..."
win32gui.PostQuitMessage(0)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self.eb.SetRect(None, (0, 0, x, y))
def main():
w=MainWindow()
win32gui.PumpMessages()
if __name__=='__main__':
main()
| {
"repo_name": "espadrine/opera",
"path": "chromium/src/third_party/python_26/Lib/site-packages/win32comext/shell/demos/explorer_browser.py",
"copies": "17",
"size": "3409",
"license": "bsd-3-clause",
"hash": 3744942420593854000,
"line_mean": 36.8777777778,
"line_max": 127,
"alpha_frac": 0.6456438838,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A sample of using Vista's IExplorerBrowser interfaces...
# Currently doesn't quite work:
# * CPU sits at 100% while running.
import sys
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
from win32com.server.util import wrap, unwrap
# event handler for the browser.
IExplorerBrowserEvents_Methods = """OnNavigationComplete OnNavigationFailed
OnNavigationPending OnViewCreated""".split()
class EventHandler:
_com_interfaces_ = [shell.IID_IExplorerBrowserEvents]
_public_methods_ = IExplorerBrowserEvents_Methods
def OnNavigationComplete(self, pidl):
print("OnNavComplete", pidl)
def OnNavigationFailed(self, pidl):
print("OnNavigationFailed", pidl)
def OnNavigationPending(self, pidl):
print("OnNavigationPending", pidl)
def OnViewCreated(self, view):
print("OnViewCreated", view)
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print("and look - its a Python implemented view!", pyview)
except ValueError:
pass
class MainWindow:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_SIZE: self.OnSize,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "test_explorer_browser"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow( classAtom, "Python IExplorerBrowser demo", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
eb = pythoncom.CoCreateInstance(shellcon.CLSID_ExplorerBrowser, None, pythoncom.CLSCTX_ALL, shell.IID_IExplorerBrowser)
# as per MSDN docs, hook up events early
self.event_cookie = eb.Advise(wrap(EventHandler()))
eb.SetOptions(shellcon.EBO_SHOWFRAMES)
rect = win32gui.GetClientRect(self.hwnd)
# Set the flags such that the folders autoarrange and non web view is presented
flags = (shellcon.FVM_LIST, shellcon.FWF_AUTOARRANGE | shellcon.FWF_NOWEBVIEW)
eb.Initialize(self.hwnd, rect, (0, shellcon.FVM_DETAILS))
if len(sys.argv)==2:
# If an arg was specified, ask the desktop parse it.
# You can pass anything explorer accepts as its '/e' argument -
# eg, "::{guid}\::{guid}" etc.
# "::{20D04FE0-3AEA-1069-A2D8-08002B30309D}" is "My Computer"
pidl = shell.SHGetDesktopFolder().ParseDisplayName(0, None, sys.argv[1])[1]
else:
# And start browsing at the root of the namespace.
pidl = []
eb.BrowseToIDList(pidl, shellcon.SBSP_ABSOLUTE)
# and for some reason the "Folder" view in the navigator pane doesn't
# magically synchronize itself - so let's do that ourself.
# Get the tree control.
sp = eb.QueryInterface(pythoncom.IID_IServiceProvider)
try:
tree = sp.QueryService(shell.IID_INameSpaceTreeControl,
shell.IID_INameSpaceTreeControl)
except pythoncom.com_error as exc:
# this should really only fail if no "nav" frame exists...
print("Strange - failed to get the tree control even though " \
"we asked for a EBO_SHOWFRAMES")
print(exc)
else:
# get the IShellItem for the selection.
si = shell.SHCreateItemFromIDList(pidl, shell.IID_IShellItem)
# set it to selected.
tree.SetItemState(si, shellcon.NSTCIS_SELECTED, shellcon.NSTCIS_SELECTED)
#eb.FillFromObject(None, shellcon.EBF_NODROPTARGET);
#eb.SetEmptyText("No known folders yet...");
self.eb = eb
def OnCommand(self, hwnd, msg, wparam, lparam):
pass
def OnDestroy(self, hwnd, msg, wparam, lparam):
print("tearing down ExplorerBrowser...")
self.eb.Unadvise(self.event_cookie)
self.eb.Destroy()
self.eb = None
print("shutting down app...")
win32gui.PostQuitMessage(0)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self.eb.SetRect(None, (0, 0, x, y))
def main():
w=MainWindow()
win32gui.PumpMessages()
if __name__=='__main__':
main()
| {
"repo_name": "int19h/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/explorer_browser.py",
"copies": "10",
"size": "4758",
"license": "apache-2.0",
"hash": -393237709399099460,
"line_mean": 39.6666666667,
"line_max": 127,
"alpha_frac": 0.6277847835,
"autogenerated": false,
"ratio": 3.5560538116591927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005529710527940913,
"num_lines": 117
} |
# A sample of using Vista's IExplorerBrowser interfaces...
# Currently doesn't quite work:
# * CPU sits at 100% while running.
import sys
import pythoncom
from win32com.shell import shell, shellcon
import win32gui, win32con, win32api
from win32com.server.util import wrap, unwrap
# event handler for the browser.
IExplorerBrowserEvents_Methods = """OnNavigationComplete OnNavigationFailed
OnNavigationPending OnViewCreated""".split()
class EventHandler:
_com_interfaces_ = [shell.IID_IExplorerBrowserEvents]
_public_methods_ = IExplorerBrowserEvents_Methods
def OnNavigationComplete(self, pidl):
print "OnNavComplete", pidl
def OnNavigationFailed(self, pidl):
print "OnNavigationFailed", pidl
def OnNavigationPending(self, pidl):
print "OnNavigationPending", pidl
def OnViewCreated(self, view):
print "OnViewCreated", view
# And if our demo view has been registered, it may well
# be that view!
try:
pyview = unwrap(view)
print "and look - its a Python implemented view!", pyview
except ValueError:
pass
class MainWindow:
def __init__(self):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_SIZE: self.OnSize,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "test_explorer_browser"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPEDWINDOW | win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow( classAtom, "Python IExplorerBrowser demo", style, \
0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, \
0, 0, hinst, None)
eb = pythoncom.CoCreateInstance(shellcon.CLSID_ExplorerBrowser, None, pythoncom.CLSCTX_ALL, shell.IID_IExplorerBrowser)
# as per MSDN docs, hook up events early
self.event_cookie = eb.Advise(wrap(EventHandler()))
eb.SetOptions(shellcon.EBO_SHOWFRAMES)
rect = win32gui.GetClientRect(self.hwnd)
# Set the flags such that the folders autoarrange and non web view is presented
flags = (shellcon.FVM_LIST, shellcon.FWF_AUTOARRANGE | shellcon.FWF_NOWEBVIEW)
eb.Initialize(self.hwnd, rect, (0, shellcon.FVM_DETAILS))
if len(sys.argv)==2:
# If an arg was specified, ask the desktop parse it.
# You can pass anything explorer accepts as its '/e' argument -
# eg, "::{guid}\::{guid}" etc.
# "::{20D04FE0-3AEA-1069-A2D8-08002B30309D}" is "My Computer"
pidl = shell.SHGetDesktopFolder().ParseDisplayName(0, None, sys.argv[1])[1]
else:
# And start browsing at the root of the namespace.
pidl = []
eb.BrowseToIDList(pidl, shellcon.SBSP_ABSOLUTE)
# and for some reason the "Folder" view in the navigator pane doesn't
# magically synchronize itself - so let's do that ourself.
# Get the tree control.
sp = eb.QueryInterface(pythoncom.IID_IServiceProvider)
try:
tree = sp.QueryService(shell.IID_INameSpaceTreeControl,
shell.IID_INameSpaceTreeControl)
except pythoncom.com_error, exc:
# this should really only fail if no "nav" frame exists...
print "Strange - failed to get the tree control even though " \
"we asked for a EBO_SHOWFRAMES"
print exc
else:
# get the IShellItem for the selection.
si = shell.SHCreateItemFromIDList(pidl, shell.IID_IShellItem)
# set it to selected.
tree.SetItemState(si, shellcon.NSTCIS_SELECTED, shellcon.NSTCIS_SELECTED)
#eb.FillFromObject(None, shellcon.EBF_NODROPTARGET);
#eb.SetEmptyText("No known folders yet...");
self.eb = eb
def OnCommand(self, hwnd, msg, wparam, lparam):
pass
def OnDestroy(self, hwnd, msg, wparam, lparam):
print "tearing down ExplorerBrowser..."
self.eb.Unadvise(self.event_cookie)
self.eb.Destroy()
self.eb = None
print "shutting down app..."
win32gui.PostQuitMessage(0)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self.eb.SetRect(None, (0, 0, x, y))
def main():
w=MainWindow()
win32gui.PumpMessages()
if __name__=='__main__':
main()
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32comext/shell/demos/explorer_browser.py",
"copies": "4",
"size": "4864",
"license": "bsd-3-clause",
"hash": -7940918099554217000,
"line_mean": 39.5726495726,
"line_max": 127,
"alpha_frac": 0.6136924342,
"autogenerated": false,
"ratio": 3.627143922445936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005130845435981482,
"num_lines": 117
} |
# A sample originally provided by Richard Bell, and modified by Mark Hammond.
# This sample demonstrates how to use COM events in a free-threaded world.
# In this world, there is no need to marshall calls across threads, so
# no message loops are needed at all. This means regular cross-thread
# sychronization can be used. In this sample we just wait on win32 event
# objects.
# See also ieEventsApartmentThreaded.py for how to do this in an
# aparment-threaded world, where thread-marshalling complicates things.
# NOTE: This example uses Internet Explorer, but it should not be considerd
# a "best-practices" for writing against IE events, but for working with
# events in general. For example:
# * The first OnDocumentComplete event is not a reliable indicator that the
# URL has completed loading
# * As we are demonstrating the most efficient way of handling events, when
# running this sample you will see an IE Windows briefly appear, but
# vanish without ever being repainted.
import sys
sys.coinit_flags=0 # specify free threading
import os
import win32api
import win32event
import win32com.client
import pythoncom
import time
# The print statements indicate that COM has actually started another thread
# and will deliver the events to that thread (ie, the events do not actually
# fire on our main thread.
class ExplorerEvents:
def __init__(self):
# We reuse this event for all events.
self.event = win32event.CreateEvent(None, 0, 0, None)
def OnDocumentComplete(self,
pDisp=pythoncom.Empty,
URL=pythoncom.Empty):
#
# Caution: Since the main thread and events thread(s) are different
# it may be necessary to serialize access to shared data. Because
# this is a simple test case, that is not required here. Your
# situation may be different. Caveat programmer.
#
thread = win32api.GetCurrentThreadId()
print("OnDocumentComplete event processed on thread %d"%thread)
# Set the event our main thread is waiting on.
win32event.SetEvent(self.event)
def OnQuit(self):
thread = win32api.GetCurrentThreadId()
print("OnQuit event processed on thread %d"%thread)
win32event.SetEvent(self.event)
def TestExplorerEvents():
iexplore = win32com.client.DispatchWithEvents(
"InternetExplorer.Application", ExplorerEvents)
thread = win32api.GetCurrentThreadId()
print('TestExplorerEvents created IE object on thread %d'%thread)
iexplore.Visible = 1
try:
iexplore.Navigate(win32api.GetFullPathName('..\\readme.htm'))
except pythoncom.com_error as details:
print("Warning - could not open the test HTML file", details)
# In this free-threaded example, we can simply wait until an event has
# been set - we will give it 2 seconds before giving up.
rc = win32event.WaitForSingleObject(iexplore.event, 2000)
if rc != win32event.WAIT_OBJECT_0:
print("Document load event FAILED to fire!!!")
iexplore.Quit()
# Now we can do the same thing to wait for exit!
# Although Quit generates events, in this free-threaded world we
# do *not* need to run any message pumps.
rc = win32event.WaitForSingleObject(iexplore.event, 2000)
if rc != win32event.WAIT_OBJECT_0:
print("OnQuit event FAILED to fire!!!")
iexplore = None
print("Finished the IE event sample!")
if __name__=='__main__':
TestExplorerEvents()
| {
"repo_name": "SublimeText/Pywin32",
"path": "lib/x64/win32com/demos/eventsFreeThreaded.py",
"copies": "10",
"size": "3527",
"license": "bsd-3-clause",
"hash": 6423254590164878000,
"line_mean": 39.0795454545,
"line_max": 77,
"alpha_frac": 0.7039977318,
"autogenerated": false,
"ratio": 3.9363839285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008342276327717968,
"num_lines": 88
} |
# A sample originally provided by Richard Bell, and modified by Mark Hammond.
# This sample demonstrates how to use COM events in an aparment-threaded
# world. In this world, COM itself ensures that all calls to and events
# from an object happen on the same thread that created the object, even
# if they originated from different threads. For this cross-thread
# marshalling to work, this main thread *must* run a "message-loop" (ie,
# a loop fetching and dispatching Windows messages). Without such message
# processing, dead-locks can occur.
# See also eventsFreeThreaded.py for how to do this in a free-threaded
# world where these marshalling considerations do not exist.
# NOTE: This example uses Internet Explorer, but it should not be considerd
# a "best-practices" for writing against IE events, but for working with
# events in general. For example:
# * The first OnDocumentComplete event is not a reliable indicator that the
# URL has completed loading
# * As we are demonstrating the most efficient way of handling events, when
# running this sample you will see an IE Windows briefly appear, but
# vanish without ever being repainted.
import sys
import os
import win32com.client
import win32api
import win32event
# sys.coinit_flags not set, so pythoncom initializes apartment-threaded.
import pythoncom
import time
class ExplorerEvents:
def __init__(self):
self.event = win32event.CreateEvent(None, 0, 0, None)
def OnDocumentComplete(self,
pDisp=pythoncom.Empty,
URL=pythoncom.Empty):
thread = win32api.GetCurrentThreadId()
print("OnDocumentComplete event processed on thread %d"%thread)
# Set the event our main thread is waiting on.
win32event.SetEvent(self.event)
def OnQuit(self):
thread = win32api.GetCurrentThreadId()
print("OnQuit event processed on thread %d"%thread)
win32event.SetEvent(self.event)
def WaitWhileProcessingMessages(event, timeout = 2):
start = time.clock()
while True:
# Wake 4 times a second - we can't just specify the
# full timeout here, as then it would reset for every
# message we process.
rc = win32event.MsgWaitForMultipleObjects( (event,), 0,
250,
win32event.QS_ALLEVENTS)
if rc == win32event.WAIT_OBJECT_0:
# event signalled - stop now!
return True
if (time.clock() - start) > timeout:
# Timeout expired.
return False
# must be a message.
pythoncom.PumpWaitingMessages()
def TestExplorerEvents():
iexplore = win32com.client.DispatchWithEvents(
"InternetExplorer.Application", ExplorerEvents)
thread = win32api.GetCurrentThreadId()
print('TestExplorerEvents created IE object on thread %d'%thread)
iexplore.Visible = 1
try:
iexplore.Navigate(win32api.GetFullPathName('..\\readme.htm'))
except pythoncom.com_error as details:
print("Warning - could not open the test HTML file", details)
# Wait for the event to be signalled while pumping messages.
if not WaitWhileProcessingMessages(iexplore.event):
print("Document load event FAILED to fire!!!")
iexplore.Quit()
#
# Give IE a chance to shutdown, else it can get upset on fast machines.
# Note, Quit generates events. Although this test does NOT catch them
# it is NECESSARY to pump messages here instead of a sleep so that the Quit
# happens properly!
if not WaitWhileProcessingMessages(iexplore.event):
print("OnQuit event FAILED to fire!!!")
iexplore = None
if __name__=='__main__':
TestExplorerEvents()
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/win32com/demos/eventsApartmentThreaded.py",
"copies": "10",
"size": "3752",
"license": "mit",
"hash": -326962401108284600,
"line_mean": 38.914893617,
"line_max": 79,
"alpha_frac": 0.6876332623,
"autogenerated": false,
"ratio": 4.030075187969925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004417058283668872,
"num_lines": 94
} |
# A sample originally provided by Richard Bell, and modified by Mark Hammond.
# This sample demonstrates how to use COM events in a free-threaded world.
# In this world, there is no need to marshall calls across threads, so
# no message loops are needed at all. This means regular cross-thread
# sychronization can be used. In this sample we just wait on win32 event
# objects.
# See also ieEventsApartmentThreaded.py for how to do this in an
# aparment-threaded world, where thread-marshalling complicates things.
# NOTE: This example uses Internet Explorer, but it should not be considerd
# a "best-practices" for writing against IE events, but for working with
# events in general. For example:
# * The first OnDocumentComplete event is not a reliable indicator that the
# URL has completed loading
# * As we are demonstrating the most efficient way of handling events, when
# running this sample you will see an IE Windows briefly appear, but
# vanish without ever being repainted.
import sys
sys.coinit_flags=0 # specify free threading
import os
import win32api
import win32event
import win32com.client
import pythoncom
import time
# The print statements indicate that COM has actually started another thread
# and will deliver the events to that thread (ie, the events do not actually
# fire on our main thread.
class ExplorerEvents:
def __init__(self):
# We reuse this event for all events.
self.event = win32event.CreateEvent(None, 0, 0, None)
def OnDocumentComplete(self,
pDisp=pythoncom.Empty,
URL=pythoncom.Empty):
#
# Caution: Since the main thread and events thread(s) are different
# it may be necessary to serialize access to shared data. Because
# this is a simple test case, that is not required here. Your
# situation may be different. Caveat programmer.
#
thread = win32api.GetCurrentThreadId()
print "OnDocumentComplete event processed on thread %d"%thread
# Set the event our main thread is waiting on.
win32event.SetEvent(self.event)
def OnQuit(self):
thread = win32api.GetCurrentThreadId()
print "OnQuit event processed on thread %d"%thread
win32event.SetEvent(self.event)
def TestExplorerEvents():
iexplore = win32com.client.DispatchWithEvents(
"InternetExplorer.Application", ExplorerEvents)
thread = win32api.GetCurrentThreadId()
print 'TestExplorerEvents created IE object on thread %d'%thread
iexplore.Visible = 1
try:
iexplore.Navigate(win32api.GetFullPathName('..\\readme.htm'))
except pythoncom.com_error, details:
print "Warning - could not open the test HTML file", details
# In this free-threaded example, we can simply wait until an event has
# been set - we will give it 2 seconds before giving up.
rc = win32event.WaitForSingleObject(iexplore.event, 2000)
if rc != win32event.WAIT_OBJECT_0:
print "Document load event FAILED to fire!!!"
iexplore.Quit()
# Now we can do the same thing to wait for exit!
# Although Quit generates events, in this free-threaded world we
# do *not* need to run any message pumps.
rc = win32event.WaitForSingleObject(iexplore.event, 2000)
if rc != win32event.WAIT_OBJECT_0:
print "OnQuit event FAILED to fire!!!"
iexplore = None
print "Finished the IE event sample!"
if __name__=='__main__':
TestExplorerEvents()
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32com/demos/eventsFreeThreaded.py",
"copies": "4",
"size": "3606",
"license": "bsd-3-clause",
"hash": -4151818755322583000,
"line_mean": 38.9772727273,
"line_max": 77,
"alpha_frac": 0.6880199667,
"autogenerated": false,
"ratio": 3.9452954048140043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00791912875343679,
"num_lines": 88
} |
# A sample originally provided by Richard Bell, and modified by Mark Hammond.
# This sample demonstrates how to use COM events in an aparment-threaded
# world. In this world, COM itself ensures that all calls to and events
# from an object happen on the same thread that created the object, even
# if they originated from different threads. For this cross-thread
# marshalling to work, this main thread *must* run a "message-loop" (ie,
# a loop fetching and dispatching Windows messages). Without such message
# processing, dead-locks can occur.
# See also eventsFreeThreaded.py for how to do this in a free-threaded
# world where these marshalling considerations do not exist.
# NOTE: This example uses Internet Explorer, but it should not be considerd
# a "best-practices" for writing against IE events, but for working with
# events in general. For example:
# * The first OnDocumentComplete event is not a reliable indicator that the
# URL has completed loading
# * As we are demonstrating the most efficient way of handling events, when
# running this sample you will see an IE Windows briefly appear, but
# vanish without ever being repainted.
import sys
import os
import win32com.client
import win32api
import win32event
# sys.coinit_flags not set, so pythoncom initializes apartment-threaded.
import pythoncom
import time
class ExplorerEvents:
def __init__(self):
self.event = win32event.CreateEvent(None, 0, 0, None)
def OnDocumentComplete(self,
pDisp=pythoncom.Empty,
URL=pythoncom.Empty):
thread = win32api.GetCurrentThreadId()
print "OnDocumentComplete event processed on thread %d"%thread
# Set the event our main thread is waiting on.
win32event.SetEvent(self.event)
def OnQuit(self):
thread = win32api.GetCurrentThreadId()
print "OnQuit event processed on thread %d"%thread
win32event.SetEvent(self.event)
def WaitWhileProcessingMessages(event, timeout = 2):
start = time.clock()
while True:
# Wake 4 times a second - we can't just specify the
# full timeout here, as then it would reset for every
# message we process.
rc = win32event.MsgWaitForMultipleObjects( (event,), 0,
250,
win32event.QS_ALLEVENTS)
if rc == win32event.WAIT_OBJECT_0:
# event signalled - stop now!
return True
if (time.clock() - start) > timeout:
# Timeout expired.
return False
# must be a message.
pythoncom.PumpWaitingMessages()
def TestExplorerEvents():
iexplore = win32com.client.DispatchWithEvents(
"InternetExplorer.Application", ExplorerEvents)
thread = win32api.GetCurrentThreadId()
print 'TestExplorerEvents created IE object on thread %d'%thread
iexplore.Visible = 1
try:
iexplore.Navigate(win32api.GetFullPathName('..\\readme.htm'))
except pythoncom.com_error, details:
print "Warning - could not open the test HTML file", details
# Wait for the event to be signalled while pumping messages.
if not WaitWhileProcessingMessages(iexplore.event):
print "Document load event FAILED to fire!!!"
iexplore.Quit()
#
# Give IE a chance to shutdown, else it can get upset on fast machines.
# Note, Quit generates events. Although this test does NOT catch them
# it is NECESSARY to pump messages here instead of a sleep so that the Quit
# happens properly!
if not WaitWhileProcessingMessages(iexplore.event):
print "OnQuit event FAILED to fire!!!"
iexplore = None
if __name__=='__main__':
TestExplorerEvents()
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32com/demos/eventsApartmentThreaded.py",
"copies": "4",
"size": "3838",
"license": "bsd-3-clause",
"hash": -2874611046484132400,
"line_mean": 38.829787234,
"line_max": 79,
"alpha_frac": 0.6717040125,
"autogenerated": false,
"ratio": 4.044257112750263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004291123830931732,
"num_lines": 94
} |
"""A sample script module for ScriptWdg.ScriptModuleWdg.
Counts up to a user-specified number with a user-specified
delay between each count.
The widgets that can be adjusted to control script behavior
while the script runs (a very good idea when offering widgets).
History:
2004-06-30 Rowen
"""
import RO.Wdg
def init(sr):
"""Run once when the script runner window is created.
"""
gr = RO.Wdg.Gridder(sr.master)
niterWdg = RO.Wdg.IntEntry(
sr.master,
minValue = 0,
maxValue = 99,
defValue = 10,
helpText = "number of iterations",
)
gr.gridWdg("# Iter", niterWdg)
delayWdg = RO.Wdg.FloatEntry(
sr.master,
minValue = 0,
maxValue = 99,
defValue = 0.5,
defFormat = "%.1f",
helpText = "delay between each iteration",
)
gr.gridWdg("Delay", delayWdg, "sec")
sr.globals.niterWdg = niterWdg
sr.globals.delayWdg = delayWdg
def run(sr):
"""The main script. Run when the Start button is pushed.
The widgets are read each time through to give the user
the maximum control. However, note that it is all too easy
to accidentally set the delay to 0 (causing the script
to finish instantly) while trying to adjust it.
This sort of trap is best avoided in real scripts.
"""
ii = 0
while ii < sr.globals.niterWdg.getNum():
ii+= 1
sr.showMsg(str(ii))
yield sr.waitMS(sr.globals.delayWdg.getNum() * 1000)
| {
"repo_name": "r-owen/stui",
"path": "TUI/Base/Wdg/TestScriptWdg.py",
"copies": "1",
"size": "1502",
"license": "bsd-3-clause",
"hash": -3928714615267388400,
"line_mean": 26.8148148148,
"line_max": 63,
"alpha_frac": 0.6358189081,
"autogenerated": false,
"ratio": 3.6723716381418092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808190546241809,
"avg_score": null,
"num_lines": null
} |
""" A sample script to try out coarse-graining in dimension 16
down to dimension 4. Feel free to run, modify, and take
snippets of it to use for other things!
Author: Olivia Di Matteo, 2017
"""
from pynitefields import *
from balthasar import *
import numpy as np
from math import sqrt
# Generate the finite fields F16 and F4
f16 = GaloisField(2, 4, [1, 1, 0, 0, 1])
f16.to_sdb([3, 7, 12, 13])
f4 = GaloisField(2, 2, [1, 1, 1])
f4.to_sdb([1, 2])
# Generate the MUBs
m = MUBs(f16)
# Generate the Wigner function
wf = WignerFunction(m)
# Create a state as a numpy vector
# In this case we choose two Bell states tensored together
# Make sure it's normalized!
s = np.zeros((1, 16))
s[0][0] = 1.0 /(2)
s[0][3] = 1.0/(2)
s[0][12] = 1.0/(2)
s[0][-1] = 1.0 / (2)
# Plot the Wigner function in a new window
wf.plot(s)
# Or plot it in a file
# wf.plot(s, 'wf_doublebell.png')
# Create a coarse-grained Wigner function using the 'general' method
cwf = CoarseWignerFunction(wf, f4)
# Print out the matrix verion and plot it
print(cwf.compute_wf(s))
cwf.plot(s)
# Print out the surviving displacement operators for incomplete tomo
from pprint import pprint
pprint(cwf.coarse_D)
# Coarse grain again by cosetting w.r.t. the subfield copy of F4 in F16
cwf_sub = CoarseWignerFunction(wf, f4, mode = 'subfield')
cwf_sub.plot(s)
pprint(cwf_sub.coarse_D)
| {
"repo_name": "glassnotes/Balthasar",
"path": "scripts/dim16.py",
"copies": "1",
"size": "1368",
"license": "bsd-3-clause",
"hash": -2578045299374809600,
"line_mean": 23.8727272727,
"line_max": 72,
"alpha_frac": 0.6915204678,
"autogenerated": false,
"ratio": 2.5958254269449714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8759921908114887,
"avg_score": 0.005484797326016902,
"num_lines": 55
} |
""" A sample script to try out coarse-graining in dimension 256
down to dimension 16. Feel free to run, modify, and take
snippets of it to use for other things!
As dimension 256 is rather large, we show here an example
where we do not compute the full MUB matrix table or
values of any Wigner functions, but rather just coarse-grain
to obtain a subset of the displacement operators.
The obtain of this scripts is a series of lists indexed by
a number - these numbers are the slopes of the rays in which
these surviving MUBs should be put in the coarse-grained phase-space.
Note that there are horizontal/vertical cases, as well as
precisely 15 intermediate cases (those corresponding to the
elements of the subfield F16 in F256). See our recent
paper on the topic for more details.
Author: Olivia Di Matteo, 2017
"""
from pynitefields import *
from balthasar import *
import numpy as np
from math import sqrt
# Generate the finite fields F16 and F4
f256 = GaloisField(2, 8, [1, 0, 1, 1, 1, 0, 0, 0, 1])
f256.to_sdb([5, 18, 30, 44, 106, 135, 147, 249])
f16 = GaloisField(2, 4, [1, 1, 0, 0, 1])
f16.to_sdb([3, 7, 12, 13])
# Generate the MUBs
# Will print out a warning because there are no matrices.
m = MUBs(f256, matrix = False)
# Generate the Wigner function
wf = WignerFunction(m)
# Create a coarse-grained Wigner function using the 'general' method
cwf = CoarseWignerFunction(wf, f16)
# Print out the surviving displacement operators for incomplete tomo
from pprint import pprint
pprint(cwf.coarse_D)
# Coarse grain again by cosetting w.r.t. the subfield copy of F16 in F256
cwf_sub = CoarseWignerFunction(wf, f16, mode = 'subfield')
pprint(cwf_sub.coarse_D)
| {
"repo_name": "glassnotes/Balthasar",
"path": "scripts/dim256.py",
"copies": "1",
"size": "1735",
"license": "bsd-3-clause",
"hash": 3244013891544819000,
"line_mean": 34.4081632653,
"line_max": 73,
"alpha_frac": 0.725648415,
"autogenerated": false,
"ratio": 3.2674199623352167,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9468342589125087,
"avg_score": 0.004945157642025708,
"num_lines": 49
} |
""" A sample script to try out coarse-graining in dimension 9
down to dimension 3. Feel free to run, modify, and take
snippets of it to use for other things!
Qudit coarse-graining currently works with a few hiccups
in terms of the plotting. I'll work on fixing these
eventually, but the mechanics of coarse-graining itself work!
Author: Olivia Di Matteo, 2017
"""
from pynitefields import *
from balthasar import *
import numpy as np
from math import sqrt
# Generate the finite fields F9 and F3
f9 = GaloisField(3, 2, [2, 1, 1])
# The self-dual basis in dimension 9 is *almost* self-dual
f9.to_sdb([2, 4])
f3 = GaloisField(3)
# Generate the MUBs
m = MUBs(f9)
# Generate the Wigner function
wf = WignerFunction(m)
# Create a state as a numpy vector
# Make sure it's normalized!
s = np.zeros((1, 9))
s[0][0] = 1.0 / sqrt(3)
s[0][4] = 1.0 / sqrt(3)
# Plot the Wigner function in a new window
wf.plot(s)
# Create a coarse-grained Wigner function using the 'general' method
cwf = CoarseWignerFunction(wf, f3)
# Print out the matrix verion and plot it
print(cwf.compute_wf(s))
cwf.plot(s)
# Print out the surviving displacement operators for incomplete tomo
from pprint import pprint
pprint(cwf.coarse_D)
# Coarse grain again by cosetting w.r.t. the subfield copy of F3 in F9
cwf_sub = CoarseWignerFunction(wf, f3, mode = 'subfield')
cwf_sub.plot(s)
pprint(cwf_sub.coarse_D)
| {
"repo_name": "glassnotes/Balthasar",
"path": "scripts/dim9.py",
"copies": "1",
"size": "1414",
"license": "bsd-3-clause",
"hash": 8175881246309898000,
"line_mean": 25.1851851852,
"line_max": 71,
"alpha_frac": 0.7171145686,
"autogenerated": false,
"ratio": 2.8336673346693386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4050781903269339,
"avg_score": null,
"num_lines": null
} |
# A sample shell column provider
# Mainly ported from MSDN article:
# Using Shell Column Handlers for Detailed File Information,
# Raymond Chen, Microsoft Corporation, February 2000
#
# To demostrate:
# * Execute this script to register the namespace.
# * Open Windows Explorer
# * Right-click an explorer column header - select "More"
# * Locate column 'pyc size' or 'pyo size', and add it to the view.
# This handler is providing that column data.
import sys, os, stat
import pythoncom
from win32com.shell import shell, shellcon
import commctrl
import winerror
from win32com.server.util import wrap
from pywintypes import IID
IPersist_Methods = ["GetClassID"]
IColumnProvider_Methods = IPersist_Methods + \
["Initialize", "GetColumnInfo", "GetItemData"]
class ColumnProvider:
_reg_progid_ = "Python.ShellExtension.ColumnProvider"
_reg_desc_ = "Python Sample Shell Extension (Column Provider)"
_reg_clsid_ = IID("{0F14101A-E05E-4070-BD54-83DFA58C3D68}")
_com_interfaces_ = [pythoncom.IID_IPersist,
shell.IID_IColumnProvider,
]
_public_methods_ = IColumnProvider_Methods
# IPersist
def GetClassID(self):
return self._reg_clsid_
# IColumnProvider
def Initialize(self, colInit):
flags, reserved, name = colInit
print("ColumnProvider initializing for file", name)
def GetColumnInfo(self, index):
# We support exactly 2 columns - 'pyc size' and 'pyo size'
if index in [0,1]:
# As per the MSDN sample, use our CLSID as the fmtid
if index==0:
ext = ".pyc"
else:
ext = ".pyo"
title = ext + " size"
description = "Size of compiled %s file" % ext
col_id = (self._reg_clsid_, # fmtid
index) # pid
col_info = (
col_id, # scid
pythoncom.VT_I4, # vt
commctrl.LVCFMT_RIGHT, # fmt
20, #cChars
shellcon.SHCOLSTATE_TYPE_INT | \
shellcon.SHCOLSTATE_SECONDARYUI, # csFlags
title,
description)
return col_info
return None # Indicate no more columns.
def GetItemData(self, colid, colData):
fmt_id, pid = colid
fmt_id==self._reg_clsid_
flags, attr, reserved, ext, name = colData
if ext.lower() not in [".py", ".pyw"]:
return None
if pid==0:
ext = ".pyc"
else:
ext = ".pyo"
check_file = os.path.splitext(name)[0] + ext
try:
st = os.stat(check_file)
return st[stat.ST_SIZE]
except OSError:
# No file
return None
def DllRegisterServer():
import winreg
# Special ColumnProvider key
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"Folder\\ShellEx\\ColumnHandlers\\" + \
str(ColumnProvider._reg_clsid_ ))
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ColumnProvider._reg_desc_)
print(ColumnProvider._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"Folder\\ShellEx\\ColumnHandlers\\" + \
str(ColumnProvider._reg_clsid_) )
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ColumnProvider._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ColumnProvider,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "SublimeText/Pywin32",
"path": "lib/x64/win32comext/shell/demos/servers/column_provider.py",
"copies": "10",
"size": "3883",
"license": "bsd-3-clause",
"hash": 2189172933603060500,
"line_mean": 35.980952381,
"line_max": 77,
"alpha_frac": 0.5804790111,
"autogenerated": false,
"ratio": 3.9907502569373072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009396063752299117,
"num_lines": 105
} |
# A sample shell copy hook.
# To demostrate:
# * Execute this script to register the context menu.
# * Open Windows Explorer
# * Attempt to move or copy a directory.
# * Note our hook's dialog is displayed.
import sys, os
import pythoncom
from win32com.shell import shell, shellcon
import win32gui
import win32con
import winerror
# Our shell extension.
class ShellExtension:
_reg_progid_ = "Python.ShellExtension.CopyHook"
_reg_desc_ = "Python Sample Shell Extension (copy hook)"
_reg_clsid_ = "{1845b6ba-2bbd-4197-b930-46d8651497c1}"
_com_interfaces_ = [shell.IID_ICopyHook]
_public_methods_ = ["CopyCallBack"]
def CopyCallBack(self, hwnd, func, flags,
srcName, srcAttr, destName, destAttr):
# This function should return:
# IDYES Allows the operation.
# IDNO Prevents the operation on this folder but continues with any other operations that have been approved (for example, a batch copy operation).
# IDCANCEL Prevents the current operation and cancels any pending operations.
print("CopyCallBack", hwnd, func, flags, srcName, srcAttr, destName, destAttr)
return win32gui.MessageBox(hwnd, "Allow operation?", "CopyHook",
win32con.MB_YESNO)
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ShellExtension._reg_clsid_)
print(ShellExtension._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"directory\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
try:
key = winreg.DeleteKey(winreg.HKEY_CLASSES_ROOT,
"*\\shellex\\CopyHookHandlers\\" +
ShellExtension._reg_desc_)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellExtension._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellExtension,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
#!/usr/bin/env python
| {
"repo_name": "zooba/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/servers/copy_hook.py",
"copies": "10",
"size": "2881",
"license": "apache-2.0",
"hash": 4430032873111504400,
"line_mean": 39.5774647887,
"line_max": 157,
"alpha_frac": 0.6282540784,
"autogenerated": false,
"ratio": 4.139367816091954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9767621894491955,
"avg_score": null,
"num_lines": null
} |
# A sample shell namespace view
# To demostrate:
# * Execute this script to register the namespace.
# * Open Windows Explorer, and locate the new "Python Path Shell Browser"
# folder off "My Computer"
# * Browse this tree - .py files are shown expandable, with classes and
# methods selectable. Selecting a Python file, or a class/method, will
# display the file using Scintilla.
# Known problems:
# * Classes and methods don't have icons - this is a demo, so we keep it small
# See icon_handler.py for examples of how to work with icons.
#
#
# Notes on PIDLs
# PIDLS are complicated, but fairly well documented in MSDN. If you need to
# do much with these shell extensions, you must understand their concept.
# Here is a short-course, as it applies to this sample:
# A PIDL identifies an item, much in the same way that a filename does
# (however, the shell is not limited to displaying "files").
# An "ItemID" is a single string, each being an item in the hierarchy.
# A "PIDL" is a list of these strings.
# All shell etc functions work with PIDLs, so even in the case where
# an ItemID is conceptually used, a 1-item list is always used.
# Conceptually, think of:
# pidl = pathname.split("\\") # pidl is a list of "parent" items.
# # each item is a string 'item id', but these are ever used directly
# As there is no concept of passing a single item, to open a file using only
# a relative filename, conceptually you would say:
# open_file([filename]) # Pass a single-itemed relative "PIDL"
# and continuing the analogy, a "listdir" type function would return a list
# of single-itemed lists - each list containing the relative PIDL of the child.
#
# Each PIDL entry is a binary string, and may contain any character. For
# PIDLs not created by you, they can not be interpreted - they are just
# blobs. PIDLs created by you (ie, children of your IShellFolder) can
# store and interpret the string however makes most sense for your application.
# (but within PIDL rules - they must be persistable, etc)
# There is no reason that pickled strings, for example, couldn't be used
# as an EntryID.
# This application takes a simple approach - each PIDL is a string of form
# "directory\0directory_name", "file\0file_name" or
# "object\0file_name\0class_name[.method_name"
# The first string in each example is literal (ie, the word 'directory',
# 'file' or 'object', and every other string is variable. We use '\0' as
# a field sep just 'cos we can (and 'cos it can't possibly conflict with the
# string content)
import sys, os
import _thread
import pyclbr
import pythoncom
import win32gui, win32gui_struct, win32api, win32con, winerror
import commctrl
from win32com.shell import shell, shellcon
from win32com.server.util import wrap, NewEnum
from win32com.server.exception import COMException
from win32com.util import IIDToInterfaceName
from pywin.scintilla import scintillacon
# Set this to 1 to cause debug version to be registered and used. A debug
# version will spew output to win32traceutil.
debug=0
if debug:
import win32traceutil
# markh is toying with an implementation that allows auto reload of a module
# if this attribute exists.
com_auto_reload = True
# Helper function to get a system IShellFolder interface, and the PIDL within
# that folder for an existing file/directory.
def GetFolderAndPIDLForPath(filename):
desktop = shell.SHGetDesktopFolder()
info = desktop.ParseDisplayName(0, None, os.path.abspath(filename))
cchEaten, pidl, attr = info
# We must walk the ID list, looking for one child at a time.
folder = desktop
while len(pidl) > 1:
this = pidl.pop(0)
folder = folder.BindToObject([this], None, shell.IID_IShellFolder)
# We are left with the pidl for the specific item. Leave it as
# a list, so it remains a valid PIDL.
return folder, pidl
# A cache of pyclbr module objects, so we only parse a given filename once.
clbr_modules = {} # Indexed by path, item is dict as returned from pyclbr
def get_clbr_for_file(path):
try:
objects = clbr_modules[path]
except KeyError:
dir, filename = os.path.split(path)
base, ext = os.path.splitext(filename)
objects = pyclbr.readmodule_ex(base, [dir])
clbr_modules[path] = objects
return objects
# Our COM interfaces.
# Base class for a shell folder.
# All child classes use a simple PIDL of the form:
# "object_type\0object_name[\0extra ...]"
class ShellFolderBase:
_com_interfaces_ = [shell.IID_IBrowserFrameOptions,
pythoncom.IID_IPersist,
shell.IID_IPersistFolder,
shell.IID_IShellFolder,
]
_public_methods_ = shellcon.IBrowserFrame_Methods + \
shellcon.IPersistFolder_Methods + \
shellcon.IShellFolder_Methods
def GetFrameOptions(self, mask):
#print "GetFrameOptions", self, mask
return 0
def ParseDisplayName(self, hwnd, reserved, displayName, attr):
print("ParseDisplayName", displayName)
# return cchEaten, pidl, attr
def BindToStorage(self, pidl, bc, iid):
print("BTS", iid, IIDToInterfaceName(iid))
def BindToObject(self, pidl, bc, iid):
# We may be passed a set of relative PIDLs here - ie
# [pidl_of_dir, pidl_of_child_dir, pidl_of_file, pidl_of_function]
# But each of our PIDLs keeps the fully qualified name anyway - so
# just jump directly to the last.
final_pidl = pidl[-1]
typ, extra = final_pidl.split('\0', 1)
if typ == "directory":
klass = ShellFolderDirectory
elif typ == "file":
klass = ShellFolderFile
elif typ == "object":
klass = ShellFolderObject
else:
raise RuntimeError("What is " + repr(typ))
ret = wrap(klass(extra), iid, useDispatcher = (debug>0))
return ret
# A ShellFolder for an object with CHILDREN on the file system
# Note that this means our "File" folder is *not* a 'FileSystem' folder,
# as it's children (functions and classes) are not on the file system.
#
class ShellFolderFileSystem(ShellFolderBase):
def _GetFolderAndPIDLForPIDL(self, my_idl):
typ, name = my_idl[0].split('\0')
return GetFolderAndPIDLForPath(name)
# Interface methods
def CompareIDs(self, param, id1, id2):
if id1 < id2:
return -1
if id1 == id2:
return 0
return 1
def GetUIObjectOf(self, hwndOwner, pidls, iid, inout):
# delegate to the shell.
assert len(pidls)==1, "oops - arent expecting more than one!"
pidl = pidls[0]
folder, child_pidl = self._GetFolderAndPIDLForPIDL(pidl)
try:
inout, ret = folder.GetUIObjectOf(hwndOwner, [child_pidl], iid,
inout, iid)
except pythoncom.com_error as xxx_todo_changeme:
(hr, desc, exc, arg) = xxx_todo_changeme.args
raise COMException(hresult=hr)
return inout, ret
# return object of IID
def GetDisplayNameOf(self, pidl, flags):
# delegate to the shell.
folder, child_pidl = self._GetFolderAndPIDLForPIDL(pidl)
ret = folder.GetDisplayNameOf(child_pidl, flags)
return ret
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
pidl = pidl[0] # ??
typ, name = pidl.split('\0')
flags = shellcon.SHGFI_ATTRIBUTES
rc, info = shell.SHGetFileInfo(name, 0, flags)
hIcon, iIcon, dwAttr, name, typeName = info
# All our items, even files, have sub-items
extras = shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR | \
shellcon.SFGAO_BROWSABLE
ret_flags &= (dwAttr | extras)
return ret_flags
class ShellFolderDirectory(ShellFolderFileSystem):
def __init__(self, path):
self.path = os.path.abspath(path)
def CreateViewObject(self, hwnd, iid):
# delegate to the shell.
folder, child_pidl = GetFolderAndPIDLForPath(self.path)
return folder.CreateViewObject(hwnd, iid)
def EnumObjects(self, hwndOwner, flags):
pidls = []
for fname in os.listdir(self.path):
fqn = os.path.join(self.path, fname)
if os.path.isdir(fqn):
type_name = "directory"
type_class = ShellFolderDirectory
else:
base, ext = os.path.splitext(fname)
if ext in [".py", ".pyw"]:
type_class = ShellFolderFile
type_name = "file"
else:
type_class = None
if type_class is not None:
pidls.append( [type_name + "\0" + fqn] )
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
final_pidl=pidl[-1]
full_fname=final_pidl.split('\0')[-1]
return os.path.split(full_fname)[-1]
def GetAttributesOf(self, pidls, attrFlags):
return shellcon.SFGAO_HASSUBFOLDER|shellcon.SFGAO_FOLDER|shellcon.SFGAO_FILESYSANCESTOR|shellcon.SFGAO_BROWSABLE
# As per comments above, even though this manages a file, it is *not* a
# ShellFolderFileSystem, as the children are not on the file system.
class ShellFolderFile(ShellFolderBase):
def __init__(self, path):
self.path = os.path.abspath(path)
def EnumObjects(self, hwndOwner, flags):
objects = get_clbr_for_file(self.path)
pidls = []
for name, ob in objects.items():
pidls.append( ["object\0" + self.path + "\0" + name] )
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
assert len(pidl)==1, "Expecting relative pidls"
pidl = pidl[0]
typ, filename, obname = pidl.split('\0')
obs = get_clbr_for_file(filename)
ob = obs[obname]
flags = shellcon.SFGAO_BROWSABLE | shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR
if hasattr(ob, "methods"):
flags |= shellcon.SFGAO_HASSUBFOLDER
ret_flags &= flags
return ret_flags
def GetDisplayNameOf(self, pidl, flags):
assert len(pidl)==1, "Expecting relative PIDL"
typ, fname, obname = pidl[0].split('\0')
fqname = os.path.splitext(fname)[0] + "." + obname
if flags & shellcon.SHGDN_INFOLDER:
ret = obname
else: # SHGDN_NORMAL is the default
ret = fqname
# No need to look at the SHGDN_FOR* modifiers.
return ret
def CreateViewObject(self, hwnd, iid):
return wrap(ScintillaShellView(hwnd, self.path), iid, useDispatcher=debug>0)
# A ShellFolder for our Python objects
class ShellFolderObject(ShellFolderBase):
def __init__(self, details):
self.path, details = details.split('\0')
if details.find(".")>0:
self.class_name, self.method_name = details.split(".")
else:
self.class_name = details
self.method_name = None
def CreateViewObject(self, hwnd, iid):
mod_objects = get_clbr_for_file(self.path)
object = mod_objects[self.class_name]
if self.method_name is None:
lineno = object.lineno
else:
lineno = object.methods[self.method_name]
return wrap(ScintillaShellView(hwnd, self.path, lineno),
iid, useDispatcher=debug>0)
def EnumObjects(self, hwndOwner, flags):
assert self.method_name is None, "Should not be enuming methods!"
mod_objects = get_clbr_for_file(self.path)
my_objects = mod_objects[self.class_name]
pidls = []
for func_name, lineno in my_objects.methods.items():
pidl = ["object\0" + self.path + "\0" +
self.class_name + "." + func_name]
pidls.append(pidl)
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
assert len(pidl)==1, "Expecting relative PIDL"
typ, fname, obname = pidl[0].split('\0')
class_name, method_name = obname.split(".")
fqname = os.path.splitext(fname)[0] + "." + obname
if flags & shellcon.SHGDN_INFOLDER:
ret = method_name
else: # SHGDN_NORMAL is the default
ret = fqname
# No need to look at the SHGDN_FOR* modifiers.
return ret
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
assert len(pidl)==1, "Expecting relative pidls"
flags = shellcon.SFGAO_BROWSABLE | shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR
ret_flags &= flags
return ret_flags
# The "Root" folder of our namespace. As all children are directories,
# it is derived from ShellFolderFileSystem
# This is the only COM object actually registered and externally created.
class ShellFolderRoot(ShellFolderFileSystem):
_reg_progid_ = "Python.ShellExtension.Folder"
_reg_desc_ = "Python Path Shell Browser"
_reg_clsid_ = "{f6287035-3074-4cb5-a8a6-d3c80e206944}"
def GetClassID(self):
return self._reg_clsid_
def Initialize(self, pidl):
# This is the PIDL of us, as created by the shell. This is our
# top-level ID. All other items under us have PIDLs defined
# by us - see the notes at the top of the file.
#print "Initialize called with pidl", repr(pidl)
self.pidl = pidl
def CreateViewObject(self, hwnd, iid):
return wrap(FileSystemView(self, hwnd), iid, useDispatcher=debug>0)
def EnumObjects(self, hwndOwner, flags):
items = [ ["directory\0" + p] for p in sys.path if os.path.isdir(p)]
return NewEnum(items, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
## return full path for sys.path dirs, since they don't appear under a parent folder
final_pidl=pidl[-1]
display_name=final_pidl.split('\0')[-1]
return display_name
# Simple shell view implementations
# Uses a builtin listview control to display simple lists of directories
# or filenames.
class FileSystemView:
_public_methods_ = shellcon.IShellView_Methods
_com_interfaces_ = [pythoncom.IID_IOleWindow,
shell.IID_IShellView,
]
def __init__(self, folder, hwnd):
self.hwnd_parent = hwnd # provided by explorer.
self.hwnd = None # intermediate window for catching command notifications.
self.hwnd_child = None # our ListView
self.activate_state = None
self.hmenu = None
self.browser = None
self.folder = folder
self.children = None
# IOleWindow
def GetWindow(self):
return self.hwnd
def ContextSensitiveHelp(self, enter_mode):
raise COMException(hresult=winerror.E_NOTIMPL)
# IShellView
def CreateViewWindow(self, prev, settings, browser, rect):
print("FileSystemView.CreateViewWindow", prev, settings, browser, rect)
self.cur_foldersettings = settings
self.browser = browser
self._CreateMainWindow(prev, settings, browser, rect)
self._CreateChildWindow(prev)
# This isn't part of the sample, but the most convenient place to
# test/demonstrate how you can get an IShellBrowser from a HWND
# (but ONLY when you are in the same process as the IShellBrowser!)
# Obviously it is not necessary here - we already have the browser!
browser_ad = win32gui.SendMessage(self.hwnd_parent, win32con.WM_USER+7, 0, 0)
browser_ob = pythoncom.ObjectFromAddress(browser_ad, shell.IID_IShellBrowser)
assert browser==browser_ob
# and make a call on the object to prove it doesn't die :)
assert browser.QueryActiveShellView()==browser_ob.QueryActiveShellView()
def _CreateMainWindow(self, prev, settings, browser, rect):
# Creates a parent window that hosts the view window. This window
# gets the control notifications etc sent from the child.
style = win32con.WS_CHILD | win32con.WS_VISIBLE #
wclass_name = "ShellViewDemo_DefView"
# Register the Window class.
wc = win32gui.WNDCLASS()
wc.hInstance = win32gui.dllhandle
wc.lpszClassName = wclass_name
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
try:
win32gui.RegisterClass(wc)
except win32gui.error as details:
# Should only happen when this module is reloaded
if details[0] != winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_CONTEXTMENU: self.OnContextMenu,
win32con.WM_SIZE: self.OnSize,
}
self.hwnd = win32gui.CreateWindow( wclass_name, "", style, \
rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1],
self.hwnd_parent, 0, win32gui.dllhandle, None)
win32gui.SetWindowLong(self.hwnd, win32con.GWL_WNDPROC, message_map)
print("View 's hwnd is", self.hwnd)
return self.hwnd
def _CreateChildWindow(self, prev):
# Creates the list view window.
assert self.hwnd_child is None, "already have a window"
assert self.cur_foldersettings is not None, "no settings"
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | \
commctrl.LVS_SHAREIMAGELISTS | commctrl.LVS_EDITLABELS
view_mode, view_flags = self.cur_foldersettings
if view_mode==shellcon.FVM_ICON:
style |= commctrl.LVS_ICON | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_SMALLICON:
style |= commctrl.LVS_SMALLICON | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_LIST:
style |= commctrl.LVS_LIST | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_DETAILS:
style |= commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE
else:
# XP 'thumbnails' etc
view_mode = shellcon.FVM_DETAILS
# Default to 'report'
style |= commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE
for f_flag, l_flag in [
(shellcon.FWF_SINGLESEL, commctrl.LVS_SINGLESEL),
(shellcon.FWF_ALIGNLEFT, commctrl.LVS_ALIGNLEFT),
(shellcon.FWF_SHOWSELALWAYS, commctrl.LVS_SHOWSELALWAYS),
]:
if view_flags & f_flag:
style |= l_flag
self.hwnd_child = win32gui.CreateWindowEx(
win32con.WS_EX_CLIENTEDGE,
"SysListView32", None, style,
0, 0, 0, 0,
self.hwnd, 1000, 0, None)
cr = win32gui.GetClientRect(self.hwnd)
win32gui.MoveWindow(self.hwnd_child,
0, 0, cr[2]-cr[0], cr[3]-cr[1],
True)
# Setup the columns for the view.
lvc, extras = win32gui_struct.PackLVCOLUMN(fmt=commctrl.LVCFMT_LEFT,
subItem=1,
text='Name',
cx=300)
win32gui.SendMessage(self.hwnd_child, commctrl.LVM_INSERTCOLUMN,
0, lvc)
lvc, extras = win32gui_struct.PackLVCOLUMN(fmt=commctrl.LVCFMT_RIGHT,
subItem=1,
text='Exists',
cx=50)
win32gui.SendMessage(self.hwnd_child, commctrl.LVM_INSERTCOLUMN,
1, lvc)
# and fill it with the content
self.Refresh()
def GetCurrentInfo(self):
return self.cur_foldersettings
def UIActivate(self, activate_state):
print("OnActivate")
def _OnActivate(self, activate_state):
if self.activate_state == activate_state:
return
self._OnDeactivate() # restore menu's first, if necessary.
if activate_state != shellcon.SVUIA_DEACTIVATE:
assert self.hmenu is None, "Should have destroyed it!"
self.hmenu = win32gui.CreateMenu()
widths = 0,0,0,0,0,0
# Ask explorer to add its standard items.
self.browser.InsertMenusSB(self.hmenu, widths)
# Merge with these standard items
self._MergeMenus(activate_state)
self.browser.SetMenuSB(self.hmenu, 0, self.hwnd);
self.activate_state = activate_state
def _OnDeactivate(self):
if self.browser is not None and self.hmenu is not None:
self.browser.SetMenuSB(0, 0, 0)
self.browser.RemoveMenusSB(self.hmenu)
win32gui.DestroyMenu(self.hmenu)
self.hmenu = None
self.hsubmenus = None
self.activate_state = shellcon.SVUIA_DEACTIVATE
def _MergeMenus(self, activate_state):
# Merge the operations we support into the top-level menus.
# NOTE: This function it *not* called each time the selection changes.
# SVUIA_ACTIVATE_FOCUS really means "have a selection?"
have_sel = activate_state == shellcon.SVUIA_ACTIVATE_FOCUS
# only do "file" menu here, and only 1 item on it!
mid = shellcon.FCIDM_MENU_FILE
# Get the hmenu for the menu
buf, extras = win32gui_struct.EmptyMENUITEMINFO(win32con.MIIM_SUBMENU)
win32gui.GetMenuItemInfo(self.hmenu,
mid,
False,
buf)
data = win32gui_struct.UnpackMENUITEMINFO(buf)
submenu = data[3]
print("Do someting with the file menu!")
def Refresh(self):
stateMask = commctrl.LVIS_SELECTED | commctrl.LVIS_DROPHILITED
state = 0
self.children = []
# Enumerate and store the child PIDLs
for cid in self.folder.EnumObjects(self.hwnd, 0):
self.children.append(cid)
for row_index, data in enumerate(self.children):
assert len(data)==1, "expecting just a child PIDL"
typ, path = data[0].split('\0')
desc = os.path.exists(path) and "Yes" or "No"
prop_vals = (path, desc)
# first col
data, extras = win32gui_struct.PackLVITEM(
item=row_index,
subItem=0,
text=prop_vals[0],
state=state,
stateMask=stateMask)
win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_INSERTITEM,
row_index, data)
# rest of the cols.
col_index = 1
for prop_val in prop_vals[1:]:
data, extras = win32gui_struct.PackLVITEM(
item=row_index,
subItem=col_index,
text=prop_val)
win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_SETITEM,
0, data)
col_index += 1
def SelectItem(self, pidl, flag):
# For the sake of brevity, we don't implement this yet.
# You would need to locate the index of the item in the shell-view
# with that PIDL, then ask the list-view to select it.
print("Please implement SelectItem for PIDL", pidl)
def GetItemObject(self, item_num, iid):
raise COMException(hresult=winerror.E_NOTIMPL)
def TranslateAccelerator(self, msg):
return winerror.S_FALSE
def DestroyViewWindow(self):
win32gui.DestroyWindow(self.hwnd)
self.hwnd = None
print("Destroyed view window")
# Message handlers.
def OnDestroy(self, hwnd, msg, wparam, lparam):
print("OnDestory")
def OnCommand(self, hwnd, msg, wparam, lparam):
print("OnCommand")
def OnNotify(self, hwnd, msg, wparam, lparam):
hwndFrom, idFrom, code = win32gui_struct.UnpackWMNOTIFY(lparam)
#print "OnNotify code=0x%x (0x%x, 0x%x)" % (code, wparam, lparam)
if code == commctrl.NM_SETFOCUS:
# Control got focus - Explorer may not know - tell it
if self.browser is not None:
self.browser.OnViewWindowActive(None)
# And do our menu thang
self._OnActivate(shellcon.SVUIA_ACTIVATE_FOCUS)
elif code == commctrl.NM_KILLFOCUS:
self._OnDeactivate()
elif code == commctrl.NM_DBLCLK:
# This DblClick implementation leaves a little to be desired :)
# It demonstrates some useful concepts, such as asking the
# folder for its context-menu and invoking a command from it.
# However, as our folder delegates IContextMenu to the shell
# itself, the end result is that the folder is opened in
# its "normal" place in Windows explorer rather than inside
# our shell-extension.
# Determine the selected items.
sel = []
n = -1
while 1:
n = win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_GETNEXTITEM,
n,
commctrl.LVNI_SELECTED)
if n==-1:
break
sel.append(self.children[n][-1:])
print("Selection is", sel)
hmenu = win32gui.CreateMenu()
try:
# Get the IContextMenu for the items.
inout, cm = self.folder.GetUIObjectOf(self.hwnd_parent, sel,
shell.IID_IContextMenu, 0)
# As per 'Q179911', we need to determine if the default operation
# should be 'open' or 'explore'
flags = shellcon.CMF_DEFAULTONLY
try:
self.browser.GetControlWindow(shellcon.FCW_TREE)
flags |= shellcon.CMF_EXPLORE
except pythoncom.com_error:
pass
# *sob* - delegating to the shell does work - but lands us
# in the original location. Q179911 also shows that
# ShellExecuteEx should work - but I can't make it work as
# described (XP: function call succeeds, but another thread
# shows a dialog with text of E_INVALID_PARAM, and new
# Explorer window opens with desktop view. Vista: function
# call succeeds, but no window created at all.
# On Vista, I'd love to get an IExplorerBrowser interface
# from the shell, but a QI fails, and although the
# IShellBrowser does appear to support IServiceProvider, I
# still can't get it
if 0:
id_cmd_first = 1 # TrackPopupMenu makes it hard to use 0
cm.QueryContextMenu(hmenu, 0, id_cmd_first, -1, flags)
# Find the default item in the returned menu.
cmd = win32gui.GetMenuDefaultItem(hmenu, False, 0)
if cmd == -1:
print("Oops: _doDefaultActionFor found no default menu")
else:
ci = 0, self.hwnd_parent, cmd-id_cmd_first, None, None, 0, 0, 0
cm.InvokeCommand(ci)
else:
rv = shell.ShellExecuteEx(
hwnd = self.hwnd_parent,
nShow=win32con.SW_NORMAL,
lpClass="folder",
lpVerb="explore",
lpIDList=sel[0])
print("ShellExecuteEx returned", rv)
finally:
win32gui.DestroyMenu(hmenu)
def OnContextMenu(self, hwnd, msg, wparam, lparam):
# Get the selected items.
pidls = []
n = -1
while 1:
n = win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_GETNEXTITEM,
n,
commctrl.LVNI_SELECTED)
if n==-1:
break
pidls.append(self.children[n][-1:])
spt = win32api.GetCursorPos()
if not pidls:
print("Ignoring background click")
return
# Get the IContextMenu for the items.
inout, cm = self.folder.GetUIObjectOf(self.hwnd_parent, pidls, shell.IID_IContextMenu, 0)
hmenu = win32gui.CreatePopupMenu()
sel = None
# As per 'Q179911', we need to determine if the default operation
# should be 'open' or 'explore'
try:
flags = 0
try:
self.browser.GetControlWindow(shellcon.FCW_TREE)
flags |= shellcon.CMF_EXPLORE
except pythoncom.com_error:
pass
id_cmd_first = 1 # TrackPopupMenu makes it hard to use 0
cm.QueryContextMenu(hmenu, 0, id_cmd_first, -1, flags)
tpm_flags = win32con.TPM_LEFTALIGN | win32con.TPM_RETURNCMD | \
win32con.TPM_RIGHTBUTTON
sel = win32gui.TrackPopupMenu(hmenu,
tpm_flags,
spt[0], spt[1],
0, self.hwnd, None)
print("TrackPopupMenu returned", sel)
finally:
win32gui.DestroyMenu(hmenu)
if sel:
ci = 0, self.hwnd_parent, sel-id_cmd_first, None, None, 0, 0, 0
cm.InvokeCommand(ci)
def OnSize(self, hwnd, msg, wparam, lparam):
#print "OnSize", self.hwnd_child, win32api.LOWORD(lparam), win32api.HIWORD(lparam)
if self.hwnd_child is not None:
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
win32gui.MoveWindow(self.hwnd_child, 0, 0, x, y, False)
# This uses scintilla to display a filename, and optionally jump to a line
# number.
class ScintillaShellView:
_public_methods_ = shellcon.IShellView_Methods
_com_interfaces_ = [pythoncom.IID_IOleWindow,
shell.IID_IShellView,
]
def __init__(self, hwnd, filename, lineno = None):
self.filename = filename
self.lineno = lineno
self.hwnd_parent = hwnd
self.hwnd = None
def _SendSci(self, msg, wparam=0, lparam=0):
return win32gui.SendMessage(self.hwnd, msg, wparam, lparam)
# IShellView
def CreateViewWindow(self, prev, settings, browser, rect):
print("ScintillaShellView.CreateViewWindow", prev, settings, browser, rect)
# Make sure scintilla.dll is loaded. If not, find it on sys.path
# (which it generally is for Pythonwin)
try:
win32api.GetModuleHandle("Scintilla.dll")
except win32api.error:
for p in sys.path:
fname = os.path.join(p, "Scintilla.dll")
if not os.path.isfile(fname):
fname = os.path.join(p, "Build", "Scintilla.dll")
if os.path.isfile(fname):
win32api.LoadLibrary(fname)
break
else:
raise RuntimeError("Can't find scintilla!")
style = win32con.WS_CHILD | win32con.WS_VSCROLL | \
win32con.WS_HSCROLL | win32con.WS_CLIPCHILDREN | \
win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow("Scintilla", "Scintilla", style,
rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1],
self.hwnd_parent, 1000, 0, None)
message_map = {
win32con.WM_SIZE: self.OnSize,
}
# win32gui.SetWindowLong(self.hwnd, win32con.GWL_WNDPROC, message_map)
file_data = file(self.filename, "U").read()
self._SetupLexer()
self._SendSci(scintillacon.SCI_ADDTEXT, len(file_data), file_data)
if self.lineno != None:
self._SendSci(scintillacon.SCI_GOTOLINE, self.lineno)
print("Scintilla's hwnd is", self.hwnd)
def _SetupLexer(self):
h = self.hwnd
styles = [
((0, 0, 200, 0, 0x808080), None, scintillacon.SCE_P_DEFAULT ),
((0, 2, 200, 0, 0x008000), None, scintillacon.SCE_P_COMMENTLINE ),
((0, 2, 200, 0, 0x808080), None, scintillacon.SCE_P_COMMENTBLOCK ),
((0, 0, 200, 0, 0x808000), None, scintillacon.SCE_P_NUMBER ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_STRING ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_CHARACTER ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_TRIPLE ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_TRIPLEDOUBLE),
((0, 0, 200, 0, 0x000000), 0x008080, scintillacon.SCE_P_STRINGEOL),
((0, 1, 200, 0, 0x800000), None, scintillacon.SCE_P_WORD),
((0, 1, 200, 0, 0xFF0000), None, scintillacon.SCE_P_CLASSNAME ),
((0, 1, 200, 0, 0x808000), None, scintillacon.SCE_P_DEFNAME),
((0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_OPERATOR),
((0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_IDENTIFIER ),
]
self._SendSci(scintillacon.SCI_SETLEXER, scintillacon.SCLEX_PYTHON, 0)
self._SendSci(scintillacon.SCI_SETSTYLEBITS, 5)
baseFormat = (-402653169, 0, 200, 0, 0, 0, 49, 'Courier New')
for f, bg, stylenum in styles:
self._SendSci(scintillacon.SCI_STYLESETFORE, stylenum, f[4])
self._SendSci(scintillacon.SCI_STYLESETFONT, stylenum, baseFormat[7])
if f[1] & 1: self._SendSci(scintillacon.SCI_STYLESETBOLD, stylenum, 1)
else: self._SendSci(scintillacon.SCI_STYLESETBOLD, stylenum, 0)
if f[1] & 2: self._SendSci(scintillacon.SCI_STYLESETITALIC, stylenum, 1)
else: self._SendSci(scintillacon.SCI_STYLESETITALIC, stylenum, 0)
self._SendSci(scintillacon.SCI_STYLESETSIZE, stylenum, int(baseFormat[2]/20))
if bg is not None:
self._SendSci(scintillacon.SCI_STYLESETBACK, stylenum, bg)
self._SendSci(scintillacon.SCI_STYLESETEOLFILLED, stylenum, 1) # Only needed for unclosed strings.
# IOleWindow
def GetWindow(self):
return self.hwnd
def UIActivate(self, activate_state):
print("OnActivate")
def DestroyViewWindow(self):
win32gui.DestroyWindow(self.hwnd)
self.hwnd = None
print("Destroyed scintilla window")
def TranslateAccelerator(self, msg):
return winerror.S_FALSE
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
win32gui.MoveWindow(self.hwnd, 0, 0, x, y, False)
def DllRegisterServer():
import winreg
key = winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolderRoot._reg_clsid_)
winreg.SetValueEx(key, None, 0, winreg.REG_SZ, ShellFolderRoot._reg_desc_)
# And special shell keys under our CLSID
key = winreg.CreateKey(winreg.HKEY_CLASSES_ROOT,
"CLSID\\" + ShellFolderRoot._reg_clsid_ + "\\ShellFolder")
# 'Attributes' is an int stored as a binary! use struct
attr = shellcon.SFGAO_FOLDER | shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_BROWSABLE
import struct
s = struct.pack("i", attr)
winreg.SetValueEx(key, "Attributes", 0, winreg.REG_BINARY, s)
print(ShellFolderRoot._reg_desc_, "registration complete.")
def DllUnregisterServer():
import winreg
try:
key = winreg.DeleteKey(winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolderRoot._reg_clsid_)
except WindowsError as details:
import errno
if details.errno != errno.ENOENT:
raise
print(ShellFolderRoot._reg_desc_, "unregistration complete.")
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellFolderRoot,
debug = debug,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/shell/demos/servers/shell_view.py",
"copies": "10",
"size": "37808",
"license": "apache-2.0",
"hash": 2170263165842384100,
"line_mean": 43.3235638921,
"line_max": 120,
"alpha_frac": 0.5844794752,
"autogenerated": false,
"ratio": 3.757503478433711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005864892585316687,
"num_lines": 853
} |
# A sample shell namespace view
# To demostrate:
# * Execute this script to register the namespace.
# * Open Windows Explorer, and locate the new "Python Path Shell Browser"
# folder off "My Computer"
# * Browse this tree - .py files are shown expandable, with classes and
# methods selectable. Selecting a Python file, or a class/method, will
# display the file using Scintilla.
# Known problems:
# * Classes and methods don't have icons - this is a demo, so we keep it small
# See icon_handler.py for examples of how to work with icons.
#
#
# Notes on PIDLs
# PIDLS are complicated, but fairly well documented in MSDN. If you need to
# do much with these shell extensions, you must understand their concept.
# Here is a short-course, as it applies to this sample:
# A PIDL identifies an item, much in the same way that a filename does
# (however, the shell is not limited to displaying "files").
# An "ItemID" is a single string, each being an item in the hierarchy.
# A "PIDL" is a list of these strings.
# All shell etc functions work with PIDLs, so even in the case where
# an ItemID is conceptually used, a 1-item list is always used.
# Conceptually, think of:
# pidl = pathname.split("\\") # pidl is a list of "parent" items.
# # each item is a string 'item id', but these are ever used directly
# As there is no concept of passing a single item, to open a file using only
# a relative filename, conceptually you would say:
# open_file([filename]) # Pass a single-itemed relative "PIDL"
# and continuing the analogy, a "listdir" type function would return a list
# of single-itemed lists - each list containing the relative PIDL of the child.
#
# Each PIDL entry is a binary string, and may contain any character. For
# PIDLs not created by you, they can not be interpreted - they are just
# blobs. PIDLs created by you (ie, children of your IShellFolder) can
# store and interpret the string however makes most sense for your application.
# (but within PIDL rules - they must be persistable, etc)
# There is no reason that pickled strings, for example, couldn't be used
# as an EntryID.
# This application takes a simple approach - each PIDL is a string of form
# "directory\0directory_name", "file\0file_name" or
# "object\0file_name\0class_name[.method_name"
# The first string in each example is literal (ie, the word 'directory',
# 'file' or 'object', and every other string is variable. We use '\0' as
# a field sep just 'cos we can (and 'cos it can't possibly conflict with the
# string content)
import sys, os
import thread
import pyclbr
import pythoncom
import win32gui, win32gui_struct, win32api, win32con, winerror
import commctrl
from win32com.shell import shell, shellcon
from win32com.server.util import wrap, NewEnum
from win32com.server.exception import COMException
from win32com.util import IIDToInterfaceName
from pywin.scintilla import scintillacon
# Set this to 1 to cause debug version to be registered and used. A debug
# version will spew output to win32traceutil.
debug=0
if debug:
import win32traceutil
# markh is toying with an implementation that allows auto reload of a module
# if this attribute exists.
com_auto_reload = True
# Helper function to get a system IShellFolder interface, and the PIDL within
# that folder for an existing file/directory.
def GetFolderAndPIDLForPath(filename):
desktop = shell.SHGetDesktopFolder()
info = desktop.ParseDisplayName(0, None, os.path.abspath(filename))
cchEaten, pidl, attr = info
# We must walk the ID list, looking for one child at a time.
folder = desktop
while len(pidl) > 1:
this = pidl.pop(0)
folder = folder.BindToObject([this], None, shell.IID_IShellFolder)
# We are left with the pidl for the specific item. Leave it as
# a list, so it remains a valid PIDL.
return folder, pidl
# A cache of pyclbr module objects, so we only parse a given filename once.
clbr_modules = {} # Indexed by path, item is dict as returned from pyclbr
def get_clbr_for_file(path):
try:
objects = clbr_modules[path]
except KeyError:
dir, filename = os.path.split(path)
base, ext = os.path.splitext(filename)
objects = pyclbr.readmodule_ex(base, [dir])
clbr_modules[path] = objects
return objects
# Our COM interfaces.
# Base class for a shell folder.
# All child classes use a simple PIDL of the form:
# "object_type\0object_name[\0extra ...]"
class ShellFolderBase:
_com_interfaces_ = [shell.IID_IBrowserFrameOptions,
pythoncom.IID_IPersist,
shell.IID_IPersistFolder,
shell.IID_IShellFolder,
]
_public_methods_ = shellcon.IBrowserFrame_Methods + \
shellcon.IPersistFolder_Methods + \
shellcon.IShellFolder_Methods
def GetFrameOptions(self, mask):
#print "GetFrameOptions", self, mask
return 0
def ParseDisplayName(self, hwnd, reserved, displayName, attr):
print "ParseDisplayName", displayName
# return cchEaten, pidl, attr
def BindToStorage(self, pidl, bc, iid):
print "BTS", iid, IIDToInterfaceName(iid)
def BindToObject(self, pidl, bc, iid):
# We may be passed a set of relative PIDLs here - ie
# [pidl_of_dir, pidl_of_child_dir, pidl_of_file, pidl_of_function]
# But each of our PIDLs keeps the fully qualified name anyway - so
# just jump directly to the last.
final_pidl = pidl[-1]
typ, extra = final_pidl.split('\0', 1)
if typ == "directory":
klass = ShellFolderDirectory
elif typ == "file":
klass = ShellFolderFile
elif typ == "object":
klass = ShellFolderObject
else:
raise RuntimeError("What is " + repr(typ))
ret = wrap(klass(extra), iid, useDispatcher = (debug>0))
return ret
# A ShellFolder for an object with CHILDREN on the file system
# Note that this means our "File" folder is *not* a 'FileSystem' folder,
# as it's children (functions and classes) are not on the file system.
#
class ShellFolderFileSystem(ShellFolderBase):
def _GetFolderAndPIDLForPIDL(self, my_idl):
typ, name = my_idl[0].split('\0')
return GetFolderAndPIDLForPath(name)
# Interface methods
def CompareIDs(self, param, id1, id2):
if id1 < id2:
return -1
if id1 == id2:
return 0
return 1
def GetUIObjectOf(self, hwndOwner, pidls, iid, inout):
# delegate to the shell.
assert len(pidls)==1, "oops - arent expecting more than one!"
pidl = pidls[0]
folder, child_pidl = self._GetFolderAndPIDLForPIDL(pidl)
try:
inout, ret = folder.GetUIObjectOf(hwndOwner, [child_pidl], iid,
inout, iid)
except pythoncom.com_error, (hr, desc, exc, arg):
raise COMException(hresult=hr)
return inout, ret
# return object of IID
def GetDisplayNameOf(self, pidl, flags):
# delegate to the shell.
folder, child_pidl = self._GetFolderAndPIDLForPIDL(pidl)
ret = folder.GetDisplayNameOf(child_pidl, flags)
return ret
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
pidl = pidl[0] # ??
typ, name = pidl.split('\0')
flags = shellcon.SHGFI_ATTRIBUTES
rc, info = shell.SHGetFileInfo(name, 0, flags)
hIcon, iIcon, dwAttr, name, typeName = info
# All our items, even files, have sub-items
extras = shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR | \
shellcon.SFGAO_BROWSABLE
ret_flags &= (dwAttr | extras)
return ret_flags
class ShellFolderDirectory(ShellFolderFileSystem):
def __init__(self, path):
self.path = os.path.abspath(path)
def CreateViewObject(self, hwnd, iid):
# delegate to the shell.
folder, child_pidl = GetFolderAndPIDLForPath(self.path)
return folder.CreateViewObject(hwnd, iid)
def EnumObjects(self, hwndOwner, flags):
pidls = []
for fname in os.listdir(self.path):
fqn = os.path.join(self.path, fname)
if os.path.isdir(fqn):
type_name = "directory"
type_class = ShellFolderDirectory
else:
base, ext = os.path.splitext(fname)
if ext in [".py", ".pyw"]:
type_class = ShellFolderFile
type_name = "file"
else:
type_class = None
if type_class is not None:
pidls.append( [type_name + "\0" + fqn] )
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
final_pidl=pidl[-1]
full_fname=final_pidl.split('\0')[-1]
return os.path.split(full_fname)[-1]
def GetAttributesOf(self, pidls, attrFlags):
return shellcon.SFGAO_HASSUBFOLDER|shellcon.SFGAO_FOLDER|shellcon.SFGAO_FILESYSANCESTOR|shellcon.SFGAO_BROWSABLE
# As per comments above, even though this manages a file, it is *not* a
# ShellFolderFileSystem, as the children are not on the file system.
class ShellFolderFile(ShellFolderBase):
def __init__(self, path):
self.path = os.path.abspath(path)
def EnumObjects(self, hwndOwner, flags):
objects = get_clbr_for_file(self.path)
pidls = []
for name, ob in objects.iteritems():
pidls.append( ["object\0" + self.path + "\0" + name] )
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
assert len(pidl)==1, "Expecting relative pidls"
pidl = pidl[0]
typ, filename, obname = pidl.split('\0')
obs = get_clbr_for_file(filename)
ob = obs[obname]
flags = shellcon.SFGAO_BROWSABLE | shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR
if hasattr(ob, "methods"):
flags |= shellcon.SFGAO_HASSUBFOLDER
ret_flags &= flags
return ret_flags
def GetDisplayNameOf(self, pidl, flags):
assert len(pidl)==1, "Expecting relative PIDL"
typ, fname, obname = pidl[0].split('\0')
fqname = os.path.splitext(fname)[0] + "." + obname
if flags & shellcon.SHGDN_INFOLDER:
ret = obname
else: # SHGDN_NORMAL is the default
ret = fqname
# No need to look at the SHGDN_FOR* modifiers.
return ret
def CreateViewObject(self, hwnd, iid):
return wrap(ScintillaShellView(hwnd, self.path), iid, useDispatcher=debug>0)
# A ShellFolder for our Python objects
class ShellFolderObject(ShellFolderBase):
def __init__(self, details):
self.path, details = details.split('\0')
if details.find(".")>0:
self.class_name, self.method_name = details.split(".")
else:
self.class_name = details
self.method_name = None
def CreateViewObject(self, hwnd, iid):
mod_objects = get_clbr_for_file(self.path)
object = mod_objects[self.class_name]
if self.method_name is None:
lineno = object.lineno
else:
lineno = object.methods[self.method_name]
return wrap(ScintillaShellView(hwnd, self.path, lineno),
iid, useDispatcher=debug>0)
def EnumObjects(self, hwndOwner, flags):
assert self.method_name is None, "Should not be enuming methods!"
mod_objects = get_clbr_for_file(self.path)
my_objects = mod_objects[self.class_name]
pidls = []
for func_name, lineno in my_objects.methods.iteritems():
pidl = ["object\0" + self.path + "\0" +
self.class_name + "." + func_name]
pidls.append(pidl)
return NewEnum(pidls, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
assert len(pidl)==1, "Expecting relative PIDL"
typ, fname, obname = pidl[0].split('\0')
class_name, method_name = obname.split(".")
fqname = os.path.splitext(fname)[0] + "." + obname
if flags & shellcon.SHGDN_INFOLDER:
ret = method_name
else: # SHGDN_NORMAL is the default
ret = fqname
# No need to look at the SHGDN_FOR* modifiers.
return ret
def GetAttributesOf(self, pidls, attrFlags):
ret_flags = -1
for pidl in pidls:
assert len(pidl)==1, "Expecting relative pidls"
flags = shellcon.SFGAO_BROWSABLE | shellcon.SFGAO_FOLDER | \
shellcon.SFGAO_FILESYSANCESTOR
ret_flags &= flags
return ret_flags
# The "Root" folder of our namespace. As all children are directories,
# it is derived from ShellFolderFileSystem
# This is the only COM object actually registered and externally created.
class ShellFolderRoot(ShellFolderFileSystem):
_reg_progid_ = "Python.ShellExtension.Folder"
_reg_desc_ = "Python Path Shell Browser"
_reg_clsid_ = "{f6287035-3074-4cb5-a8a6-d3c80e206944}"
def GetClassID(self):
return self._reg_clsid_
def Initialize(self, pidl):
# This is the PIDL of us, as created by the shell. This is our
# top-level ID. All other items under us have PIDLs defined
# by us - see the notes at the top of the file.
#print "Initialize called with pidl", repr(pidl)
self.pidl = pidl
def CreateViewObject(self, hwnd, iid):
return wrap(FileSystemView(self, hwnd), iid, useDispatcher=debug>0)
def EnumObjects(self, hwndOwner, flags):
items = [ ["directory\0" + p] for p in sys.path if os.path.isdir(p)]
return NewEnum(items, iid=shell.IID_IEnumIDList,
useDispatcher=(debug>0))
def GetDisplayNameOf(self, pidl, flags):
## return full path for sys.path dirs, since they don't appear under a parent folder
final_pidl=pidl[-1]
display_name=final_pidl.split('\0')[-1]
return display_name
# Simple shell view implementations
# Uses a builtin listview control to display simple lists of directories
# or filenames.
class FileSystemView:
_public_methods_ = shellcon.IShellView_Methods
_com_interfaces_ = [pythoncom.IID_IOleWindow,
shell.IID_IShellView,
]
def __init__(self, folder, hwnd):
self.hwnd_parent = hwnd # provided by explorer.
self.hwnd = None # intermediate window for catching command notifications.
self.hwnd_child = None # our ListView
self.activate_state = None
self.hmenu = None
self.browser = None
self.folder = folder
self.children = None
# IOleWindow
def GetWindow(self):
return self.hwnd
def ContextSensitiveHelp(self, enter_mode):
raise COMException(hresult=winerror.E_NOTIMPL)
# IShellView
def CreateViewWindow(self, prev, settings, browser, rect):
print "FileSystemView.CreateViewWindow", prev, settings, browser, rect
self.cur_foldersettings = settings
self.browser = browser
self._CreateMainWindow(prev, settings, browser, rect)
self._CreateChildWindow(prev)
# This isn't part of the sample, but the most convenient place to
# test/demonstrate how you can get an IShellBrowser from a HWND
# (but ONLY when you are in the same process as the IShellBrowser!)
# Obviously it is not necessary here - we already have the browser!
browser_ad = win32gui.SendMessage(self.hwnd_parent, win32con.WM_USER+7, 0, 0)
browser_ob = pythoncom.ObjectFromAddress(browser_ad, shell.IID_IShellBrowser)
assert browser==browser_ob
# and make a call on the object to prove it doesn't die :)
assert browser.QueryActiveShellView()==browser_ob.QueryActiveShellView()
def _CreateMainWindow(self, prev, settings, browser, rect):
# Creates a parent window that hosts the view window. This window
# gets the control notifications etc sent from the child.
style = win32con.WS_CHILD | win32con.WS_VISIBLE #
wclass_name = "ShellViewDemo_DefView"
# Register the Window class.
wc = win32gui.WNDCLASS()
wc.hInstance = win32gui.dllhandle
wc.lpszClassName = wclass_name
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
try:
win32gui.RegisterClass(wc)
except win32gui.error, details:
# Should only happen when this module is reloaded
if details[0] != winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_CONTEXTMENU: self.OnContextMenu,
win32con.WM_SIZE: self.OnSize,
}
self.hwnd = win32gui.CreateWindow( wclass_name, "", style, \
rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1],
self.hwnd_parent, 0, win32gui.dllhandle, None)
win32gui.SetWindowLong(self.hwnd, win32con.GWL_WNDPROC, message_map)
print "View 's hwnd is", self.hwnd
return self.hwnd
def _CreateChildWindow(self, prev):
# Creates the list view window.
assert self.hwnd_child is None, "already have a window"
assert self.cur_foldersettings is not None, "no settings"
style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | \
commctrl.LVS_SHAREIMAGELISTS | commctrl.LVS_EDITLABELS
view_mode, view_flags = self.cur_foldersettings
if view_mode==shellcon.FVM_ICON:
style |= commctrl.LVS_ICON | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_SMALLICON:
style |= commctrl.LVS_SMALLICON | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_LIST:
style |= commctrl.LVS_LIST | commctrl.LVS_AUTOARRANGE
elif view_mode==shellcon.FVM_DETAILS:
style |= commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE
else:
# XP 'thumbnails' etc
view_mode = shellcon.FVM_DETAILS
# Default to 'report'
style |= commctrl.LVS_REPORT | commctrl.LVS_AUTOARRANGE
for f_flag, l_flag in [
(shellcon.FWF_SINGLESEL, commctrl.LVS_SINGLESEL),
(shellcon.FWF_ALIGNLEFT, commctrl.LVS_ALIGNLEFT),
(shellcon.FWF_SHOWSELALWAYS, commctrl.LVS_SHOWSELALWAYS),
]:
if view_flags & f_flag:
style |= l_flag
self.hwnd_child = win32gui.CreateWindowEx(
win32con.WS_EX_CLIENTEDGE,
"SysListView32", None, style,
0, 0, 0, 0,
self.hwnd, 1000, 0, None)
cr = win32gui.GetClientRect(self.hwnd)
win32gui.MoveWindow(self.hwnd_child,
0, 0, cr[2]-cr[0], cr[3]-cr[1],
True)
# Setup the columns for the view.
lvc, extras = win32gui_struct.PackLVCOLUMN(fmt=commctrl.LVCFMT_LEFT,
subItem=1,
text='Name',
cx=300)
win32gui.SendMessage(self.hwnd_child, commctrl.LVM_INSERTCOLUMN,
0, lvc)
lvc, extras = win32gui_struct.PackLVCOLUMN(fmt=commctrl.LVCFMT_RIGHT,
subItem=1,
text='Exists',
cx=50)
win32gui.SendMessage(self.hwnd_child, commctrl.LVM_INSERTCOLUMN,
1, lvc)
# and fill it with the content
self.Refresh()
def GetCurrentInfo(self):
return self.cur_foldersettings
def UIActivate(self, activate_state):
print "OnActivate"
def _OnActivate(self, activate_state):
if self.activate_state == activate_state:
return
self._OnDeactivate() # restore menu's first, if necessary.
if activate_state != shellcon.SVUIA_DEACTIVATE:
assert self.hmenu is None, "Should have destroyed it!"
self.hmenu = win32gui.CreateMenu()
widths = 0,0,0,0,0,0
# Ask explorer to add its standard items.
self.browser.InsertMenusSB(self.hmenu, widths)
# Merge with these standard items
self._MergeMenus(activate_state)
self.browser.SetMenuSB(self.hmenu, 0, self.hwnd);
self.activate_state = activate_state
def _OnDeactivate(self):
if self.browser is not None and self.hmenu is not None:
self.browser.SetMenuSB(0, 0, 0)
self.browser.RemoveMenusSB(self.hmenu)
win32gui.DestroyMenu(self.hmenu)
self.hmenu = None
self.hsubmenus = None
self.activate_state = shellcon.SVUIA_DEACTIVATE
def _MergeMenus(self, activate_state):
# Merge the operations we support into the top-level menus.
# NOTE: This function it *not* called each time the selection changes.
# SVUIA_ACTIVATE_FOCUS really means "have a selection?"
have_sel = activate_state == shellcon.SVUIA_ACTIVATE_FOCUS
# only do "file" menu here, and only 1 item on it!
mid = shellcon.FCIDM_MENU_FILE
# Get the hmenu for the menu
buf, extras = win32gui_struct.EmptyMENUITEMINFO(win32con.MIIM_SUBMENU)
win32gui.GetMenuItemInfo(self.hmenu,
mid,
False,
buf)
data = win32gui_struct.UnpackMENUITEMINFO(buf)
submenu = data[3]
print "Do someting with the file menu!"
def Refresh(self):
stateMask = commctrl.LVIS_SELECTED | commctrl.LVIS_DROPHILITED
state = 0
self.children = []
# Enumerate and store the child PIDLs
for cid in self.folder.EnumObjects(self.hwnd, 0):
self.children.append(cid)
for row_index, data in enumerate(self.children):
assert len(data)==1, "expecting just a child PIDL"
typ, path = data[0].split('\0')
desc = os.path.exists(path) and "Yes" or "No"
prop_vals = (path, desc)
# first col
data, extras = win32gui_struct.PackLVITEM(
item=row_index,
subItem=0,
text=prop_vals[0],
state=state,
stateMask=stateMask)
win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_INSERTITEM,
row_index, data)
# rest of the cols.
col_index = 1
for prop_val in prop_vals[1:]:
data, extras = win32gui_struct.PackLVITEM(
item=row_index,
subItem=col_index,
text=prop_val)
win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_SETITEM,
0, data)
col_index += 1
def SelectItem(self, pidl, flag):
# For the sake of brevity, we don't implement this yet.
# You would need to locate the index of the item in the shell-view
# with that PIDL, then ask the list-view to select it.
print "Please implement SelectItem for PIDL", pidl
def GetItemObject(self, item_num, iid):
raise COMException(hresult=winerror.E_NOTIMPL)
def TranslateAccelerator(self, msg):
return winerror.S_FALSE
def DestroyViewWindow(self):
win32gui.DestroyWindow(self.hwnd)
self.hwnd = None
print "Destroyed view window"
# Message handlers.
def OnDestroy(self, hwnd, msg, wparam, lparam):
print "OnDestory"
def OnCommand(self, hwnd, msg, wparam, lparam):
print "OnCommand"
def OnNotify(self, hwnd, msg, wparam, lparam):
hwndFrom, idFrom, code = win32gui_struct.UnpackWMNOTIFY(lparam)
#print "OnNotify code=0x%x (0x%x, 0x%x)" % (code, wparam, lparam)
if code == commctrl.NM_SETFOCUS:
# Control got focus - Explorer may not know - tell it
if self.browser is not None:
self.browser.OnViewWindowActive(None)
# And do our menu thang
self._OnActivate(shellcon.SVUIA_ACTIVATE_FOCUS)
elif code == commctrl.NM_KILLFOCUS:
self._OnDeactivate()
elif code == commctrl.NM_DBLCLK:
# This DblClick implementation leaves a little to be desired :)
# It demonstrates some useful concepts, such as asking the
# folder for its context-menu and invoking a command from it.
# However, as our folder delegates IContextMenu to the shell
# itself, the end result is that the folder is opened in
# its "normal" place in Windows explorer rather than inside
# our shell-extension.
# Determine the selected items.
sel = []
n = -1
while 1:
n = win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_GETNEXTITEM,
n,
commctrl.LVNI_SELECTED)
if n==-1:
break
sel.append(self.children[n][-1:])
print "Selection is", sel
hmenu = win32gui.CreateMenu()
try:
# Get the IContextMenu for the items.
inout, cm = self.folder.GetUIObjectOf(self.hwnd_parent, sel,
shell.IID_IContextMenu, 0)
# As per 'Q179911', we need to determine if the default operation
# should be 'open' or 'explore'
flags = shellcon.CMF_DEFAULTONLY
try:
self.browser.GetControlWindow(shellcon.FCW_TREE)
flags |= shellcon.CMF_EXPLORE
except pythoncom.com_error:
pass
# *sob* - delegating to the shell does work - but lands us
# in the original location. Q179911 also shows that
# ShellExecuteEx should work - but I can't make it work as
# described (XP: function call succeeds, but another thread
# shows a dialog with text of E_INVALID_PARAM, and new
# Explorer window opens with desktop view. Vista: function
# call succeeds, but no window created at all.
# On Vista, I'd love to get an IExplorerBrowser interface
# from the shell, but a QI fails, and although the
# IShellBrowser does appear to support IServiceProvider, I
# still can't get it
if 0:
id_cmd_first = 1 # TrackPopupMenu makes it hard to use 0
cm.QueryContextMenu(hmenu, 0, id_cmd_first, -1, flags)
# Find the default item in the returned menu.
cmd = win32gui.GetMenuDefaultItem(hmenu, False, 0)
if cmd == -1:
print "Oops: _doDefaultActionFor found no default menu"
else:
ci = 0, self.hwnd_parent, cmd-id_cmd_first, None, None, 0, 0, 0
cm.InvokeCommand(ci)
else:
rv = shell.ShellExecuteEx(
hwnd = self.hwnd_parent,
nShow=win32con.SW_NORMAL,
lpClass="folder",
lpVerb="explore",
lpIDList=sel[0])
print "ShellExecuteEx returned", rv
finally:
win32gui.DestroyMenu(hmenu)
def OnContextMenu(self, hwnd, msg, wparam, lparam):
# Get the selected items.
pidls = []
n = -1
while 1:
n = win32gui.SendMessage(self.hwnd_child,
commctrl.LVM_GETNEXTITEM,
n,
commctrl.LVNI_SELECTED)
if n==-1:
break
pidls.append(self.children[n][-1:])
spt = win32api.GetCursorPos()
if not pidls:
print "Ignoring background click"
return
# Get the IContextMenu for the items.
inout, cm = self.folder.GetUIObjectOf(self.hwnd_parent, pidls, shell.IID_IContextMenu, 0)
hmenu = win32gui.CreatePopupMenu()
sel = None
# As per 'Q179911', we need to determine if the default operation
# should be 'open' or 'explore'
try:
flags = 0
try:
self.browser.GetControlWindow(shellcon.FCW_TREE)
flags |= shellcon.CMF_EXPLORE
except pythoncom.com_error:
pass
id_cmd_first = 1 # TrackPopupMenu makes it hard to use 0
cm.QueryContextMenu(hmenu, 0, id_cmd_first, -1, flags)
tpm_flags = win32con.TPM_LEFTALIGN | win32con.TPM_RETURNCMD | \
win32con.TPM_RIGHTBUTTON
sel = win32gui.TrackPopupMenu(hmenu,
tpm_flags,
spt[0], spt[1],
0, self.hwnd, None)
print "TrackPopupMenu returned", sel
finally:
win32gui.DestroyMenu(hmenu)
if sel:
ci = 0, self.hwnd_parent, sel-id_cmd_first, None, None, 0, 0, 0
cm.InvokeCommand(ci)
def OnSize(self, hwnd, msg, wparam, lparam):
#print "OnSize", self.hwnd_child, win32api.LOWORD(lparam), win32api.HIWORD(lparam)
if self.hwnd_child is not None:
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
win32gui.MoveWindow(self.hwnd_child, 0, 0, x, y, False)
# This uses scintilla to display a filename, and optionally jump to a line
# number.
class ScintillaShellView:
_public_methods_ = shellcon.IShellView_Methods
_com_interfaces_ = [pythoncom.IID_IOleWindow,
shell.IID_IShellView,
]
def __init__(self, hwnd, filename, lineno = None):
self.filename = filename
self.lineno = lineno
self.hwnd_parent = hwnd
self.hwnd = None
def _SendSci(self, msg, wparam=0, lparam=0):
return win32gui.SendMessage(self.hwnd, msg, wparam, lparam)
# IShellView
def CreateViewWindow(self, prev, settings, browser, rect):
print "ScintillaShellView.CreateViewWindow", prev, settings, browser, rect
# Make sure scintilla.dll is loaded. If not, find it on sys.path
# (which it generally is for Pythonwin)
try:
win32api.GetModuleHandle("Scintilla.dll")
except win32api.error:
for p in sys.path:
fname = os.path.join(p, "Scintilla.dll")
if not os.path.isfile(fname):
fname = os.path.join(p, "Build", "Scintilla.dll")
if os.path.isfile(fname):
win32api.LoadLibrary(fname)
break
else:
raise RuntimeError("Can't find scintilla!")
style = win32con.WS_CHILD | win32con.WS_VSCROLL | \
win32con.WS_HSCROLL | win32con.WS_CLIPCHILDREN | \
win32con.WS_VISIBLE
self.hwnd = win32gui.CreateWindow("Scintilla", "Scintilla", style,
rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1],
self.hwnd_parent, 1000, 0, None)
message_map = {
win32con.WM_SIZE: self.OnSize,
}
# win32gui.SetWindowLong(self.hwnd, win32con.GWL_WNDPROC, message_map)
file_data = file(self.filename, "U").read()
self._SetupLexer()
self._SendSci(scintillacon.SCI_ADDTEXT, len(file_data), file_data)
if self.lineno != None:
self._SendSci(scintillacon.SCI_GOTOLINE, self.lineno)
print "Scintilla's hwnd is", self.hwnd
def _SetupLexer(self):
h = self.hwnd
styles = [
((0, 0, 200, 0, 0x808080), None, scintillacon.SCE_P_DEFAULT ),
((0, 2, 200, 0, 0x008000), None, scintillacon.SCE_P_COMMENTLINE ),
((0, 2, 200, 0, 0x808080), None, scintillacon.SCE_P_COMMENTBLOCK ),
((0, 0, 200, 0, 0x808000), None, scintillacon.SCE_P_NUMBER ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_STRING ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_CHARACTER ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_TRIPLE ),
((0, 0, 200, 0, 0x008080), None, scintillacon.SCE_P_TRIPLEDOUBLE),
((0, 0, 200, 0, 0x000000), 0x008080, scintillacon.SCE_P_STRINGEOL),
((0, 1, 200, 0, 0x800000), None, scintillacon.SCE_P_WORD),
((0, 1, 200, 0, 0xFF0000), None, scintillacon.SCE_P_CLASSNAME ),
((0, 1, 200, 0, 0x808000), None, scintillacon.SCE_P_DEFNAME),
((0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_OPERATOR),
((0, 0, 200, 0, 0x000000), None, scintillacon.SCE_P_IDENTIFIER ),
]
self._SendSci(scintillacon.SCI_SETLEXER, scintillacon.SCLEX_PYTHON, 0)
self._SendSci(scintillacon.SCI_SETSTYLEBITS, 5)
baseFormat = (-402653169, 0, 200, 0, 0, 0, 49, 'Courier New')
for f, bg, stylenum in styles:
self._SendSci(scintillacon.SCI_STYLESETFORE, stylenum, f[4])
self._SendSci(scintillacon.SCI_STYLESETFONT, stylenum, baseFormat[7])
if f[1] & 1: self._SendSci(scintillacon.SCI_STYLESETBOLD, stylenum, 1)
else: self._SendSci(scintillacon.SCI_STYLESETBOLD, stylenum, 0)
if f[1] & 2: self._SendSci(scintillacon.SCI_STYLESETITALIC, stylenum, 1)
else: self._SendSci(scintillacon.SCI_STYLESETITALIC, stylenum, 0)
self._SendSci(scintillacon.SCI_STYLESETSIZE, stylenum, int(baseFormat[2]/20))
if bg is not None:
self._SendSci(scintillacon.SCI_STYLESETBACK, stylenum, bg)
self._SendSci(scintillacon.SCI_STYLESETEOLFILLED, stylenum, 1) # Only needed for unclosed strings.
# IOleWindow
def GetWindow(self):
return self.hwnd
def UIActivate(self, activate_state):
print "OnActivate"
def DestroyViewWindow(self):
win32gui.DestroyWindow(self.hwnd)
self.hwnd = None
print "Destroyed scintilla window"
def TranslateAccelerator(self, msg):
return winerror.S_FALSE
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
win32gui.MoveWindow(self.hwnd, 0, 0, x, y, False)
def DllRegisterServer():
import _winreg
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolderRoot._reg_clsid_)
_winreg.SetValueEx(key, None, 0, _winreg.REG_SZ, ShellFolderRoot._reg_desc_)
# And special shell keys under our CLSID
key = _winreg.CreateKey(_winreg.HKEY_CLASSES_ROOT,
"CLSID\\" + ShellFolderRoot._reg_clsid_ + "\\ShellFolder")
# 'Attributes' is an int stored as a binary! use struct
attr = shellcon.SFGAO_FOLDER | shellcon.SFGAO_HASSUBFOLDER | \
shellcon.SFGAO_BROWSABLE
import struct
s = struct.pack("i", attr)
_winreg.SetValueEx(key, "Attributes", 0, _winreg.REG_BINARY, s)
print ShellFolderRoot._reg_desc_, "registration complete."
def DllUnregisterServer():
import _winreg
try:
key = _winreg.DeleteKey(_winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\" \
"Explorer\\Desktop\\Namespace\\" + \
ShellFolderRoot._reg_clsid_)
except WindowsError, details:
import errno
if details.errno != errno.ENOENT:
raise
print ShellFolderRoot._reg_desc_, "unregistration complete."
if __name__=='__main__':
from win32com.server import register
register.UseCommandLine(ShellFolderRoot,
debug = debug,
finalize_register = DllRegisterServer,
finalize_unregister = DllUnregisterServer)
| {
"repo_name": "ntuecon/server",
"path": "pyenv/Lib/site-packages/win32comext/shell/demos/servers/shell_view.py",
"copies": "4",
"size": "38597",
"license": "bsd-3-clause",
"hash": 492079119250081300,
"line_mean": 43.3016431925,
"line_max": 120,
"alpha_frac": 0.5717024639,
"autogenerated": false,
"ratio": 3.792571484720448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6364273948620448,
"avg_score": null,
"num_lines": null
} |
"""A sample socket server and client using SSPI authentication and encryption.
You must run with either 'client' or 'server' as arguments. A server must be
running before a client can connect.
To use with Kerberos you should include in the client options
--target-spn=username, where 'username' is the user under which the server is
being run.
Running either the client or server as a different user can be informative.
A command-line such as the following may be useful:
`runas /user:{user} {fqp}\python.exe {fqp}\socket_server.py --wait client|server`
{fqp} should specify the relevant fully-qualified path names.
To use 'runas' with Kerberos, the client program will need to
specify --target-spn with the username under which the *server* is running.
See the SSPI documentation for more details.
"""
import sys
import struct
import socketserver
import win32api
import http.client
import traceback
import win32security
import sspi, sspicon
import optparse # sorry, this demo needs 2.3+
options = None # set to optparse object.
def GetUserName():
try:
return win32api.GetUserName()
except win32api.error as details:
# Seeing 'access denied' errors here for non-local users (presumably
# without permission to login locally). Get the fully-qualified
# username, although a side-effect of these permission-denied errors
# is a lack of Python codecs - so printing the Unicode value fails.
# So just return the repr(), and avoid codecs completely.
return repr(win32api.GetUserNameEx(win32api.NameSamCompatible))
# Send a simple "message" over a socket - send the number of bytes first,
# then the string. Ditto for receive.
def _send_msg(s, m):
s.send(struct.pack("i", len(m)))
s.send(m)
def _get_msg(s):
size_data = s.recv(struct.calcsize("i"))
if not size_data:
return None
cb = struct.unpack("i", size_data)[0]
return s.recv(cb)
class SSPISocketServer(socketserver.TCPServer):
def __init__(self, *args, **kw):
socketserver.TCPServer.__init__(self, *args, **kw)
self.sa = sspi.ServerAuth(options.package)
def verify_request(self, sock, ca):
# Do the sspi auth dance
self.sa.reset()
while 1:
data = _get_msg(sock)
if data is None:
return False
try:
err, sec_buffer = self.sa.authorize(data)
except sspi.error as details:
print("FAILED to authorize client:", details)
return False
if err==0:
break
_send_msg(sock, sec_buffer[0].Buffer)
return True
def process_request(self, request, client_address):
# An example using the connection once it is established.
print("The server is running as user", GetUserName())
self.sa.ctxt.ImpersonateSecurityContext()
try:
print("Having conversation with client as user", GetUserName())
while 1:
# we need to grab 2 bits of data - the encrypted data, and the
# 'key'
data = _get_msg(request)
key = _get_msg(request)
if data is None or key is None:
break
data = self.sa.decrypt(data, key)
print("Client sent:", repr(data))
finally:
self.sa.ctxt.RevertSecurityContext()
self.close_request(request)
print("The server is back to user", GetUserName())
def serve():
s = SSPISocketServer(("localhost", options.port), None)
print("Running test server...")
s.serve_forever()
def sspi_client():
c = http.client.HTTPConnection("localhost", options.port)
c.connect()
# Do the auth dance.
ca = sspi.ClientAuth(options.package, targetspn=options.target_spn)
data = None
while 1:
err, out_buf = ca.authorize(data)
_send_msg(c.sock, out_buf[0].Buffer)
if err==0:
break
data = _get_msg(c.sock)
print("Auth dance complete - sending a few encryted messages")
# Assume out data is sensitive - encrypt the message.
for data in "Hello from the client".split():
blob, key = ca.encrypt(data)
_send_msg(c.sock, blob)
_send_msg(c.sock, key)
c.sock.close()
print("Client completed.")
if __name__=='__main__':
parser = optparse.OptionParser("%prog [options] client|server",
description=__doc__)
parser.add_option("", "--package", action="store", default="NTLM",
help="The SSPI package to use (eg, Kerberos) - default is NTLM")
parser.add_option("", "--target-spn", action="store",
help="""The target security provider name to use. The
string contents are security-package specific. For
example, 'Kerberos' or 'Negotiate' require the server
principal name (SPN) (ie, the username) of the remote
process. For NTLM this must be blank.""")
parser.add_option("", "--port", action="store", default="8181",
help="The port number to use (default=8181)")
parser.add_option("", "--wait", action="store_true",
help="""Cause the program to wait for input just before
terminating. Useful when using via runas to see
any error messages before termination.
""")
options, args = parser.parse_args()
try:
options.port = int(options.port)
except (ValueError, TypeError):
parser.error("--port must be an integer")
try:
try:
if not args:
args = ['']
if args[0]=="client":
sspi_client()
elif args[0]=="server":
serve()
else:
parser.error("You must supply 'client' or 'server' - " \
"use --help for details")
except KeyboardInterrupt:
pass
except SystemExit:
pass
except:
traceback.print_exc()
finally:
if options.wait:
input("Press enter to continue")
| {
"repo_name": "huguesv/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/Demos/security/sspi/socket_server.py",
"copies": "6",
"size": "6331",
"license": "apache-2.0",
"hash": -2615661431223653400,
"line_mean": 34.5674157303,
"line_max": 86,
"alpha_frac": 0.5899541937,
"autogenerated": false,
"ratio": 4.140614780902551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006851751279296602,
"num_lines": 178
} |
"""A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from typing import FrozenSet
from typing import Set
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: Set = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: Set = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable):
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: FrozenSet = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: FrozenSet = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {obj.__class__.__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = None
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatterMixin:
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
| {
"repo_name": "mitsuhiko/jinja2",
"path": "src/jinja2/sandbox.py",
"copies": "3",
"size": "14590",
"license": "bsd-3-clause",
"hash": 8760527259261118000,
"line_mean": 33.6555819477,
"line_max": 88,
"alpha_frac": 0.6120630569,
"autogenerated": false,
"ratio": 4.414523449319213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6526586506219213,
"avg_score": null,
"num_lines": null
} |
"""A sandbox layer that ensures unsafe operations cannot be performed.
Useful when the template itself comes from an untrusted source.
"""
import operator
import types
import typing as t
from _string import formatter_field_name_split # type: ignore
from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
from .runtime import Context
from .runtime import Undefined
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES: t.Set[str] = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES: t.Set[str] = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec: t.Tuple[t.Tuple[t.Type, t.FrozenSet[str]], ...] = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
def inspect_format_method(callable: t.Callable) -> t.Optional[str]:
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
return None
def safe_range(*args: int) -> range:
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f: F) -> F:
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True # type: ignore
return f
def is_internal_attribute(obj: t.Any, attr: str) -> bool:
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj: t.Any, attr: str) -> bool:
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table: t.Dict[str, t.Callable[[t.Any, t.Any], t.Any]] = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table: t.Dict[str, t.Callable[[t.Any], t.Any]] = {
"+": operator.pos,
"-": operator.neg,
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops: t.FrozenSet[str] = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops: t.FrozenSet[str] = frozenset()
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
super().__init__(*args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj: t.Any) -> bool:
"""Check if an object is safely callable. By default callables
are considered safe unless decorated with :func:`unsafe`.
This also recognizes the Django convention of setting
``func.alters_data = True``.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(
self, context: Context, operator: str, left: t.Any, right: t.Any
) -> t.Any:
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context: Context, operator: str, arg: t.Any) -> t.Any:
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(
self, obj: t.Any, argument: t.Union[str, t.Any]
) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj: t.Any, attribute: str) -> t.Union[t.Any, Undefined]:
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj: t.Any, attribute: str) -> Undefined:
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {type(obj).__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(
self,
s: str,
args: t.Tuple[t.Any, ...],
kwargs: t.Dict[str, t.Any],
format_func: t.Optional[t.Callable] = None,
) -> str:
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
formatter: SandboxedFormatter
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, escape=s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = ()
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(
__self, # noqa: B902
__context: Context,
__obj: t.Any,
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj: t.Any, attr: str, value: t.Any) -> bool:
if not super().is_safe_attribute(obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatter(Formatter):
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
super().__init__(**kwargs) # type: ignore
def get_field(
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedEscapeFormatter(SandboxedFormatter, EscapeFormatter):
pass
| {
"repo_name": "pallets/jinja",
"path": "src/jinja2/sandbox.py",
"copies": "1",
"size": "14600",
"license": "bsd-3-clause",
"hash": 8450062638735576000,
"line_mean": 33.1121495327,
"line_max": 88,
"alpha_frac": 0.5996575342,
"autogenerated": false,
"ratio": 4.199022145527754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298679679727754,
"avg_score": null,
"num_lines": null
} |
# A sandpile is a square matrix of natural numbers between 0 and 3,
# representing how many grains of sand there is on each square. To add two
# sandpiles, just start by adding the two matrices element by element. Except
# the matrix you generate might not be a sandpile, if one of its element is
# higher than 3 you must transform this matrix into a sandpile, and this is how
# it is done :
# - If a square has 4 grains of sand or more, it "loses" four and distributes
# it to its four neighbors (if the square touches an edge, the grain of sand
# is lost)
# - Keep doing that to all the squares with 4 grains or more until all the
# squares have 3 grains or less
# Example :
# 000 000 000 010
# 020 + 020 = 040 -> 101
# 000 000 000 010
N = range(int(input()))
A = {i + j*1j:int(z) for i in N for j, z in enumerate(input())}
B = {i + j*1j:int(z) for i in N for j, z in enumerate(input())}
cpile = {z: A[z] + B[z] for z in set(A)}
while any(i > 3 for i in cpile.values()):
for z, val in cpile.items():
if val > 3:
cpile[z] -= 4
for k in range(4):
try:
cpile[z + 1j**k] += 1
except KeyError:
pass
for i in N:
line = ''.join(str(cpile[i + j*1j]) for j in N)
print(line) | {
"repo_name": "Pouf/CodingCompetition",
"path": "CG/community_sandpile-addition.py",
"copies": "1",
"size": "1347",
"license": "mit",
"hash": 3485347989170768000,
"line_mean": 34.4594594595,
"line_max": 79,
"alpha_frac": 0.5909428359,
"autogenerated": false,
"ratio": 3.068337129840547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9099312905773487,
"avg_score": 0.011993411993411992,
"num_lines": 37
} |
"""As an optional feature, we can try to keep track of the assets used. This
makes it easier to run a manual rebuild.
In the future, it can also help deciding when an asset needs an automatic
update. For instance:
- We might auto update when the hash changes, wheras the old hash to
compare against will be stored by the tracker.
- We might use the tracked assets to compare timestamps if that turns
out to be faster than a stat() call.
- Right now, if asset properties like the applied filters or the list
of source files change, without causing the source timestamp to
change, the update will not be automatically picked up. As those
information could be tracked and then be used to detect changes.
"""
from conf import settings
def get_tracker(name=None):
"""Return a callable(output, sources) that returns True if the file
``output``, based on the files in the list ``sources`` needs to be
recreated.
See the ``TRACK_ASSETS`` setting for more information.
"""
if not name:
name = settings.TRACK_ASSETS
try:
return {
None: None,
False: None,
"model": track_via_model,
"cache": track_via_cache,
}[name]
except KeyError:
raise ValueError('Tracking option "%s" is not valid.' % name)
def track_via_model(sourcefiles, outputfile, filter_name):
raise NotImplementedError()
"""touched_time = current_ts()
asset, created = Asset.objects.get_or_create(outputfile,
[sourcefiles, filter_name, touched_time])
if not created:
asset.sourcefiles = sourcefiles
asset.filter_name = filter_name
asset.touched_mtime = touched_time
asset.save()"""
def track_via_cache(sourcefiles, outputfile, filter_name):
raise NotImplementedError()
| {
"repo_name": "maxcutler/Courant-News",
"path": "courant/core/assets/tracker.py",
"copies": "1",
"size": "1912",
"license": "bsd-3-clause",
"hash": -3546514615250589000,
"line_mean": 34.0754716981,
"line_max": 76,
"alpha_frac": 0.6480125523,
"autogenerated": false,
"ratio": 4.3752860411899315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 53
} |
"""Asap module Collector.
Defines a filter collecting all information about parallel atoms on the master.
"""
import asap3
import numpy as np
class Collector:
"Atoms-like filter collecting information on the master node."
def __init__(self, atoms, master=None):
self.atoms = atoms
self.comm = atoms.get_comm()
if master is None:
master = (self.comm.rank == 0)
self.master = master
self.constraints = self.atoms.constraints
def __len__(self):
n = self.atoms.get_number_of_atoms()
if self.master:
return n
else:
return 0
def get_number_of_atoms(self):
return self.atoms.get_number_of_atoms()
def get_positions(self):
return self.collect(self.atoms.get_positions)
def get_forces(self):
return self.collect(self.atoms.get_forces)
def get_momenta(self):
return self.collect(self.atoms.get_momenta)
def get_atomic_numbers(self):
return self.collect(self.atoms.get_atomic_numbers)
def get_tags(self):
return self.collect(self.atoms.get_tags)
def get_potential_energy(self):
return self.atoms.get_potential_energy()
def get_cell(self):
return self.atoms.get_cell()
def get_calculator(self):
return self.atoms.get_calculator()
def get_stress(self):
return self.atoms.get_stress()
def get_pbc(self):
return self.atoms.get_pbc()
def get_info(self):
return self.atoms.info
def get_charges(self):
raise NotImplementedError
def get_array(self, label):
return self.collect(lambda a=self.atoms, l=label: a.get_array(l))
def has(self, name):
"""Check for existance of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'.
"""
if name in ['positions', 'tags', 'momenta', 'numbers']:
return self.atoms.has(name)
else:
return False
def collect(self, method):
"Collect data from all cpus onto the master."
ids = self.atoms.get_ids()
data = method()
n = self.atoms.get_number_of_atoms()
if self.master:
shape = (n,) + data.shape[1:]
result = np.zeros(shape, data.dtype)
for cpu in range(self.comm.size):
if cpu != 0:
# Receive from cpu
nrecv = np.zeros(1, int)
self.comm.receive(nrecv, cpu)
nrecv = nrecv[0]
ids = np.zeros(nrecv, int)
data = np.zeros((nrecv,) + result.shape[1:], result.dtype)
self.comm.receive(ids, cpu)
self.comm.receive(data, cpu)
result[ids] = data
return result
else:
assert(len(data) == len(ids))
nsend = np.array([len(ids)])
self.comm.send(nsend, 0)
self.comm.send(ids, 0)
self.comm.send(data, 0)
return np.zeros((0,)+data.shape[1:], dtype=data.dtype)
def _cant_set_pbc(self, pbc):
"Fake set_pbc method."
raise NotImplementedError("Cannot change PBC of a Collector instance.")
pbc = property(get_pbc, _cant_set_pbc, "The boundary conditions attribute")
def _cant_set_numbers(self, z):
"Fake set_atomic_numbers method."
raise NotImplementedError(
"Cannot change atomic numbers of a Collector instance.")
numbers = property(get_atomic_numbers, _cant_set_numbers,
"Atomic numbers as a property")
def _cant_set_info(self, info):
"Cannot set info attribute of Collector instance"
raise NotImplementedError("Cannot set info attribute of Collector instance")
info = property(get_info, _cant_set_info, "The info dictionary")
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Internal/Collector.py",
"copies": "1",
"size": "3941",
"license": "mit",
"hash": 3258346386822080000,
"line_mean": 30.0393700787,
"line_max": 84,
"alpha_frac": 0.5724435422,
"autogenerated": false,
"ratio": 3.8713163064833007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9847679808342706,
"avg_score": 0.019216008068118913,
"num_lines": 127
} |
"""Asap module ParallelListOfAtoms.
Defines the parallel list of atoms object (`ParallelAtoms`), and a factory
method for creating them (`MakeParallelAtoms`).
Importing this module also installs a Python exit function causing
MPI_Abort to be called if an uncaught exception occurs.
"""
__docformat__ = "restructuredtext en"
import ase
import ase.units
from asap3 import _asap
import asap3.mpi
import numpy as np
import cPickle, cStringIO
import sys, time
import ase.parallel
class ParallelAtoms(ase.Atoms):
"""Atoms class for parallel Asap simulations.
It is recommended to create ParallelAtoms objects using
`MakeParallelAtoms`.
"""
parallel = 1
def __init__(self, nCells, comm, atoms, cell=None, pbc=None,
distribute=True):
"""Create a ParallelAtoms object.
WARNING: ParallelAtoms object should normally not be created
explicitly. Use MakeParallelAtoms instead.
"""
self.nCells = np.array(nCells, int)
self.comm = comm
if self.nCells.shape != (3,):
raise ValueError, "ParallelAtoms: nCells must be 3 integers."
# Extract all data from the atoms object. Not nice :-)
self.arrays = {}
for name in atoms.arrays.keys():
self.arrays[name] = atoms.arrays[name].copy()
assert self.arrays["positions"].dtype == np.dtype(float)
assert self.arrays["positions"].shape == (len(atoms), 3)
assert self.arrays["numbers"].shape == (len(atoms),)
if cell is not None:
self.cell = cell
else:
self.cell = atoms.get_cell()
if pbc is not None:
self.pbc = pbc
else:
self.pbc = atoms.get_pbc()
self.adsorbate_info = {}
self.info = {}
self.ghosts = {}
self.ghosts["positions"] = np.zeros((0,3), float)
self.ghosts["numbers"] = np.zeros(0, self.arrays["numbers"].dtype)
# Now make the IDs
mynatoms = np.array([len(self)])
natoms_all = np.zeros(self.comm.size, int)
self.comm.all_gather(mynatoms, natoms_all)
if not self.arrays.has_key("ID"):
firstID = sum(natoms_all[:self.comm.rank])
self.arrays["ID"] = np.arange(firstID, firstID+len(atoms))
self.total_number_of_atoms = sum(natoms_all)
# Atoms should have no constraints
self.set_constraint(None)
if distribute:
self.distribute()
def distribute(self):
_asap.DistributeAtoms(self)
def get_number_of_atoms(self):
n = len(self)
return self.comm.sum(n)
def get_list_of_elements(self):
"""Get a list of elements.
The list is cached to prevent unnecessary communication.
"""
try:
return self.listofelements
except AttributeError:
z = self.get_atomic_numbers()
present = np.zeros(100, int)
zmax = z.max()
zmin = z.min()
present[zmin] = present[zmax] = 1
for i in range(zmin+1, zmax):
if np.equal(z, i).any():
present[i] = 1
self.comm.sum(present)
self.listofelements = []
for i, p in enumerate(present):
if p:
self.listofelements.append(i)
return self.listofelements
def set_atomic_numbers(self, numbers):
"""Set the atomic numbers."""
try:
# Discard the cached list of elements
del self.listofelements
except AttributeError:
pass
ase.Atoms.set_atomic_numbers(self, numbers)
def get_ids(self):
"""Get the atom IDs in a parallel simulation."""
return self.arrays["ID"].copy()
def is_master(self):
"""Return 1 on the master node, 0 on all other nodes."""
return (self.comm.rank == 0)
def get_comm(self):
return self.comm
def wrap_calculator(self, calc):
"Make an ASAP calculator compatible with parallel simulations."
try:
parallelOK = calc.supports_parallel()
except AttributeError:
parallelOK = False
if not parallelOK:
raise ValueError, "The calculator does not support parallel ASAP calculations."
return _asap.ParallelPotential(calc)
def set_calculator(self, calc, wrap=True):
"""Sets the calculator in a way compatible with parallel simulations.
calc:
The Calculator to be used. Normally only Asap calculators will work.
wrap (optional, default=True):
Indicates if a calculator should be wrapped in a ParallelCalculator object.
Wrapping is the default, and should almost always be used, the only exception
being if the Calculator is implemented as a Python object wrapping an Asap
calculator, in which case the Asap calculator should first be wrapped in
a ParallelCalculator object (use atoms.wrap_calculator) and this one should then
be used by the Python calculator. The Python calculator is then attached
without being wrapped again.
"""
if wrap:
parcalc = self.wrap_calculator(calc)
else:
parcalc = calc
ase.Atoms.set_calculator(self, parcalc)
def get_kinetic_energy(self):
local_ekin = ase.Atoms.get_kinetic_energy(self)
return self.comm.sum(local_ekin)
def get_temperature(self):
"""Get the temperature. in Kelvin"""
ekin = self.get_kinetic_energy() / self.get_number_of_atoms()
return ekin / (1.5 * ase.units.kB)
def get_ghost_positions(self):
return self.ghosts['positions'].copy()
def get_ghost_atomic_numbers(self):
return self.ghosts['numbers'].copy()
def MakeParallelAtoms(atoms, nCells, cell=None, pbc=None,
distribute=True):
"""Build parallel simulation from serial lists of atoms.
Call simultaneously on all processors. Each processor having
atoms should pass a list of atoms as the first argument, or None
if this processor does not contribute with any atoms. If the
cell and/or pbc arguments are given, they must be given on
all processors, and be identical. If it is not given, a supercell
is attempted to be extracted from the atoms on the processor with
lowest rank.
This is the preferred method for creating parallel simulations.
"""
import cPickle, cStringIO
mpi = asap3.mpi
#comm = mpi.world.duplicate()
comm = mpi.world
# Sanity check: is the node layout reasonable
nNodes = nCells[0] * nCells[1] * nCells[2]
if nNodes != comm.size:
raise RuntimeError("Wrong number of CPUs: %d != %d*%d*%d" %
(comm.size, nCells[0], nCells[1], nCells[2]))
t1 = np.zeros((3,))
t2 = np.zeros((3,))
comm.min(t1)
comm.max(t2)
if (t1[0] != t2[0] or t1[1] != t2[1] or t1[2] != t2[2]):
raise RuntimeError, "CPU layout inconsistent."
# If pbc and/or cell are given, they may be shorthands in need of
# expansion.
if pbc:
try:
plen = len(pbc)
except TypeError:
# It is a scalar, interpret as a boolean.
if pbc:
pbc = (1,1,1)
else:
pbc = (0,0,0)
else:
if plen != 3:
raise ValueError, "pbc must be a scalar or a 3-sequence."
if cell:
cell = array(cell) # Make sure it is a numeric array.
if cell.shape == (3,):
cell = array([[cell[0], 0, 0],
[0, cell[1], 0],
[0, 0, cell[2]]])
elif cell.shape != (3,3):
raise ValueError, "Unit cell must be a 3x3 matrix or a 3-vector."
# Find the lowest CPU with atoms, and let that one distribute
# which data it has. All other CPUs check for consistency.
if atoms is None:
hasdata = None
mynum = comm.size
else:
hasdata = {}
for name in atoms.arrays.keys():
datatype = np.sctype2char(atoms.arrays[name])
shape = atoms.arrays[name].shape[1:]
hasdata[name] = (datatype, shape)
mynum = comm.rank
if pbc is None:
pbc = atoms.get_pbc()
if cell is None:
cell = atoms.get_cell()
root = comm.min(mynum) # The first CPU with atoms
# Now send hasdata, cell and pbc to all other CPUs
package = cPickle.dumps((hasdata, cell, pbc), 2)
package = comm.broadcast_string(package, root)
rootdata, rootcell, rootpbc = cPickle.loads(package)
if rootdata is None or len(rootdata) == 0:
raise ValueError, "No data from 'root' atoms. Empty atoms?!?"
# Check for consistent cell and pbc arguments
if cell is not None:
if rootcell is None:
raise TypeError, "Cell given on another processor than the atoms."
if (cell.ravel() - rootcell.ravel()).max() > 1e-12:
raise ValueError, "Inconsistent cell specification."
else:
cell = rootcell # May still be None
if pbc is not None:
if rootpbc is None:
raise TypeError, "PBC given on another processor than the atoms."
if (pbc != rootpbc).any():
raise ValueError, "Inconsistent pbc specification."
else:
pbc = rootpbc
# Check for consistent atoms data
if hasdata is not None:
if hasdata != rootdata:
raise ValueError, "Atoms do not contain the sama data on different processors."
if "positions" not in rootdata:
raise ValueError, "Atoms do not have positions!"
# Create empty atoms
if atoms is None:
atoms = ase.Atoms(cell=cell, pbc=pbc)
for name in rootdata.keys():
if atoms.arrays.has_key(name):
assert np.sctype2char(atoms.arrays[name]) == rootdata[name][0]
assert len(atoms.arrays[name]) == 0
else:
shape = (0,) + rootdata[name][1]
atoms.arrays[name] = np.zeros(shape, rootdata[name][0])
return ParallelAtoms(nCells, comm, atoms, cell=cell, pbc=pbc,
distribute=distribute)
# A cleanup function should call MPI_Abort if python crashes to
# terminate the processes on the other nodes.
ase.parallel.register_parallel_cleanup_function()
# _oldexitfunc = getattr(sys, "exitfunc", None)
# def _asap_cleanup(lastexit = _oldexitfunc, sys=sys, time=time,
# comm = asap3.mpi.world):
# error = getattr(sys, "last_type", None)
# if error:
# sys.stdout.flush()
# sys.stderr.write("ASAP CLEANUP (node " + str(comm.rank) +
# "): " + str(error) +
# " occurred. Calling MPI_Abort!\n")
# sys.stderr.flush()
# # Give other nodes a moment to crash by themselves (perhaps
# # producing helpful error messages).
# time.sleep(3)
# comm.abort(42)
# if lastexit:
# lastexit()
# sys.exitfunc = _asap_cleanup
# END OF PARALLEL STUFF
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Internal/ParallelListOfAtoms.py",
"copies": "1",
"size": "11264",
"license": "mit",
"hash": 6657647467417787000,
"line_mean": 34.7587301587,
"line_max": 92,
"alpha_frac": 0.5886008523,
"autogenerated": false,
"ratio": 3.874785001719986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99208408754203,
"avg_score": 0.008508995719937094,
"num_lines": 315
} |
"""Asap utility functions
This module defines the following functions:
PrintVersion
"""
__docformat__ = "restructuredtext en"
from asap3.Internal.Builtins import _asap, get_version, get_short_version
from asap3 import __file__ as _asapfile
import ase
import sys
import os
def print_version(level = 0):
"""Print the version number of the loaded version of Asap.
If the optional argument is 1, also prints the pathnames of the
most important files.
"""
try:
compiledfile = _asap.__file__
except AttributeError:
compiledfile = "<built-in>"
print get_version()
if level >= 1:
print " Python module:", _asapfile
print " C++ module: ", compiledfile
print " ase module: ", ase.__file__
def DebugOutput(filename, stdout=1, nomaster=False, sync=True):
"""Debugging output on each node goes to a different file.
Redirect stderr to a different file on each node. The filename should
contain %d, which is replaced by the node number. The file is opened
with minimal buffering, and stderr (standard error) is redirected to
it (also for C/C++ extensions). If the optional argument stdout is
true (the default), Python's sys.stdout is also redirected to the
same file. Standard output for C/C++ extensions is never touched.
This is mainly useful for parallel simulations.
"""
if stdout:
sys.stdout = sys.stderr
try:
import asap3.mpi
node = asap3.mpi.world.rank
except (AttributeError, ImportError):
node = 0
if nomaster and node == 0:
return
flag = os.O_WRONLY|os.O_CREAT|os.O_TRUNC
if sync:
flag = flag|os.O_SYNC
newerror = os.open((filename % (node,)), flag, 0660)
os.dup2(newerror, sys.stderr.fileno())
# This Python file must NOT go away. Attach it to the sys module.
sys._AsapStandardError = newerror
def print_memory(txt, a=None):
import asap3.mpi
procfile = open("/proc/self/status")
vmsize = vmpeak = vmdata = vmrss = -1
for line in procfile:
words = line.split()
if words[0] == "VmSize:":
vmsize = int(words[1])
elif words[0] == "VmPeak:":
vmpeak = int(words[1])
elif words[0] == "VmData:":
vmdata = int(words[1])
elif words[0] == "VmRSS:":
vmrss = int(words[1])
print >>sys.stderr, "Memory [proc %d '%s']: %d MB total (%d MB peak, %d MB data, %d MB rss)" % (
asap3.mpi.world.rank, txt, (vmsize+512) / 1024,
(vmpeak+512) / 1024, (vmdata+512) / 1024, (vmrss+512)/1024)
procfile.close()
if a is not None:
memory_usage(a)
def memory_usage(obj, total=True):
"""Print the memory usage of some kinds of objects.
Supported objects are: atoms, EMT calculators and neighbor lists.
"""
mem = 0
if hasattr(obj, "arrays"):
mem += _memory_usage_atoms(obj)
try:
calc = obj.get_calculator()
except AttributeError:
calc = None
if calc is not None:
mem += memory_usage(calc, total=False)
elif hasattr(obj, "print_memory"):
mem += obj.print_memory()
else:
print "*MEM* Memory usage of this object is not supported:", obj
return 0
if total:
print "*MEM* Total %d MB." % (mem,)
return mem
def _memory_usage_atoms(atoms):
arr = atoms.arrays
mem = 0
nat = len(atoms)
nvar = 0
megabyte = 1024*1024
for k in arr.keys():
mem += arr[k].size * arr[k].itemsize
nvar += 1
gmem = 0
gvar = 0
if hasattr(atoms, "ghosts"):
arr = atoms.ghosts
for k in arr.keys():
gmem += arr[k].size * arr[k].itemsize
gvar += 1
mem = (mem + gmem + megabyte/2) / megabyte
gmem = (gmem + megabyte/2) / megabyte
print "*MEM* Atoms %d MB. [ %d atoms, %d arrays, %d gh_arr of %d MB ]" % (
mem, nat, nvar, gvar, gmem)
return mem
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Internal/UtilityFunctions.py",
"copies": "1",
"size": "3989",
"license": "mit",
"hash": -36163814213309500,
"line_mean": 30.912,
"line_max": 100,
"alpha_frac": 0.5968914515,
"autogenerated": false,
"ratio": 3.486888111888112,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45837795633881123,
"avg_score": null,
"num_lines": null
} |
# As a simple demo of pure functional programming,
# let's do some math using only functions
# We encode numbers so that a number `n`
# is the following function, lambda f: lambda x: f^n(x),
# where f is a function, and x is some value
# `zero` would apply f to x zero times, so would just return x
zero = lambda f: lambda x: x
# And `one` would apply f to x one time, so would return f(x)
one = lambda f: lambda x: f(x)
# In order to decode a given number, n,
# we need to provide f, and x.
# f needs to be the transistion function between two numbers,
# the addition of 1, and x needs to be the results of zero applications
# of f, which is 0
unchurch = lambda n: n(lambda m: m + 1)(0)
# Let's do some sanity testing
assert unchurch(zero) == 0
assert unchurch(one) == 1
# To get the next number, the "successor",
# we just call `f` one more time around the encoded value
succ = lambda n: lambda f: lambda x: f(n(f)(x))
# We're going to manipulate these, so let's make a
# helper function to determine equality
def equals(x, y):
assert unchurch(x) == unchurch(y)
equals(succ(zero), one)
# Let's define some more constants for easier reading
two = succ(one)
three = succ(two)
four = succ(three)
five = succ(four)
six = succ(five)
# Given that f is the transition function,
# m + n would be the m applications of f to n
plus = lambda m: lambda n: lambda f: lambda x: m(f)(n(f)(x))
equals(plus(two)(three), five)
# Multiplication of m * n would be would be m applications of
# n applications of f
mult = lambda m: lambda n: lambda f: lambda x: m(n(f))(x)
equals(mult(two)(three), six)
# Exponenets of m^n would be the multiplication of m applied n times.
exp = lambda m: lambda n: n(mult(m))(one)
equals(exp(two)(two), four)
| {
"repo_name": "joshbohde/functional_python",
"path": "church.py",
"copies": "1",
"size": "1749",
"license": "bsd-3-clause",
"hash": -2412475290551750000,
"line_mean": 29.1551724138,
"line_max": 71,
"alpha_frac": 0.6935391652,
"autogenerated": false,
"ratio": 3.0155172413793103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.420905640657931,
"avg_score": null,
"num_lines": null
} |
# As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import asynchat
import asyncore
import codecs
import contextlib
import decimal
import errno
import fractions
import itertools
import locale
import mmap
import os
import pickle
import platform
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import sysconfig
import time
import unittest
import uuid
import warnings
from test import support
try:
import threading
except ImportError:
threading = None
try:
import resource
except ImportError:
resource = None
try:
import fcntl
except ImportError:
fcntl = None
from test.script_helper import assert_python_ok
# Detect whether we're on a Linux system that uses the (now outdated
# and unmaintained) linuxthreads threading library. There's an issue
# when combining linuxthreads with a failed execv call: see
# http://bugs.python.org/issue4970.
if hasattr(sys, 'thread_info') and sys.thread_info.version:
USING_LINUXTHREADS = sys.thread_info.version.startswith("linuxthreads")
else:
USING_LINUXTHREADS = False
# Issue #14110: Some tests fail on FreeBSD if the user is in the wheel group.
HAVE_WHEEL_GROUP = sys.platform.startswith('freebsd') and os.getgid() == 0
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.lexists(support.TESTFN):
os.unlink(support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assertTrue(os.access(support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
self.skipTest("couldn't allocate two consecutive fds")
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, b"a")
@support.cpython_only
def test_rename(self):
path = support.TESTFN
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
def test_read(self):
with open(support.TESTFN, "w+b") as fobj:
fobj.write(b"spam")
fobj.flush()
fd = fobj.fileno()
os.lseek(fd, 0, 0)
s = os.read(fd, 4)
self.assertEqual(type(s), bytes)
self.assertEqual(s, b"spam")
def test_write(self):
# os.write() accepts bytes- and buffer-like objects but not strings
fd = os.open(support.TESTFN, os.O_CREAT | os.O_WRONLY)
self.assertRaises(TypeError, os.write, fd, "beans")
os.write(fd, b"bacon\n")
os.write(fd, bytearray(b"eggs\n"))
os.write(fd, memoryview(b"spam\n"))
os.close(fd)
with open(support.TESTFN, "rb") as fobj:
self.assertEqual(fobj.read().splitlines(),
[b"bacon", b"eggs", b"spam"])
def write_windows_console(self, *args):
retcode = subprocess.call(args,
# use a new console to not flood the test output
creationflags=subprocess.CREATE_NEW_CONSOLE,
# use a shell to hide the console window (SW_HIDE)
shell=True)
self.assertEqual(retcode, 0)
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the Windows console')
def test_write_windows_console(self):
# Issue #11395: the Windows console returns an error (12: not enough
# space error) on writing into stdout if stdout mode is binary and the
# length is greater than 66,000 bytes (or less, depending on heap
# usage).
code = "print('x' * 100000)"
self.write_windows_console(sys.executable, "-c", code)
self.write_windows_console(sys.executable, "-u", "-c", code)
def fdopen_helper(self, *args):
fd = os.open(support.TESTFN, os.O_RDONLY)
f = os.fdopen(fd, *args)
f.close()
def test_fdopen(self):
fd = os.open(support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(fd)
self.fdopen_helper()
self.fdopen_helper('r')
self.fdopen_helper('r', 100)
def test_replace(self):
TESTFN2 = support.TESTFN + ".2"
with open(support.TESTFN, 'w') as f:
f.write("1")
with open(TESTFN2, 'w') as f:
f.write("2")
self.addCleanup(os.unlink, TESTFN2)
os.replace(support.TESTFN, TESTFN2)
self.assertRaises(FileNotFoundError, os.stat, support.TESTFN)
with open(TESTFN2, 'r') as f:
self.assertEqual(f.read(), "1")
def test_open_keywords(self):
f = os.open(path=__file__, flags=os.O_RDONLY, mode=0o777,
dir_fd=None)
os.close(f)
def test_symlink_keywords(self):
symlink = support.get_attribute(os, "symlink")
try:
symlink(src='target', dst=support.TESTFN,
target_is_directory=False, dir_fd=None)
except (NotImplementedError, OSError):
pass # No OS support or unprivileged user
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
self.fname = os.path.join(support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write(b"ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(support.TESTFN)
@unittest.skipUnless(hasattr(os, 'stat'), 'test needs os.stat()')
def check_stat_attributes(self, fname):
result = os.stat(fname)
# Make sure direct access works
self.assertEqual(result[stat.ST_SIZE], 3)
self.assertEqual(result.st_size, 3)
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEqual(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assertIn(attr, members)
# Make sure that the st_?time and st_?time_ns fields roughly agree
# (they should always agree up to around tens-of-microseconds)
for name in 'st_atime st_mtime st_ctime'.split():
floaty = int(getattr(result, name) * 100000)
nanosecondy = getattr(result, name + "_ns") // 10000
self.assertAlmostEqual(floaty, nanosecondy, delta=2)
try:
result[200]
self.fail("No exception raised")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.st_rdev = 1
self.fail("No exception raised")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_stat_attributes(self):
self.check_stat_attributes(self.fname)
def test_stat_attributes_bytes(self):
try:
fname = self.fname.encode(sys.getfilesystemencoding())
except UnicodeEncodeError:
self.skipTest("cannot encode %a for the filesystem" % self.fname)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.check_stat_attributes(fname)
def test_stat_result_pickle(self):
result = os.stat(self.fname)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'stat_result', p)
if proto < 4:
self.assertIn(b'cos\nstat_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(hasattr(os, 'statvfs'), 'test needs os.statvfs()')
def test_statvfs_attributes(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
# Make sure direct access works
self.assertEqual(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEqual(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception raised")
except AttributeError:
pass
try:
result.parrot = 1
self.fail("No exception raised")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception raised")
except TypeError:
pass
# Use the constructor with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs_result_pickle(self):
try:
result = os.statvfs(self.fname)
except OSError as e:
# On AtheOS, glibc always returns ENOSYS
if e.errno == errno.ENOSYS:
self.skipTest('os.statvfs() failed with ENOSYS')
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(result, proto)
self.assertIn(b'statvfs_result', p)
if proto < 4:
self.assertIn(b'cos\nstatvfs_result\n', p)
unpickled = pickle.loads(p)
self.assertEqual(result, unpickled)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except FileNotFoundError:
self.skipTest(r'c:\pagefile.sys does not exist')
except OSError as e:
self.fail("Could not stat pagefile.sys")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_15261(self):
# Verify that stat'ing a closed fd does not cause crash
r, w = os.pipe()
try:
os.stat(r) # should not raise error
finally:
os.close(r)
os.close(w)
with self.assertRaises(OSError) as ctx:
os.stat(r)
self.assertEqual(ctx.exception.errno, errno.EBADF)
class UtimeTests(unittest.TestCase):
def setUp(self):
self.dirname = support.TESTFN
self.fname = os.path.join(self.dirname, "f1")
self.addCleanup(support.rmtree, self.dirname)
os.mkdir(self.dirname)
with open(self.fname, 'wb') as fp:
fp.write(b"ABC")
def restore_float_times(state):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
os.stat_float_times(state)
# ensure that st_atime and st_mtime are float
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
old_float_times = os.stat_float_times(-1)
self.addCleanup(restore_float_times, old_float_times)
os.stat_float_times(True)
def support_subsecond(self, filename):
# Heuristic to check if the filesystem supports timestamp with
# subsecond resolution: check if float and int timestamps are different
st = os.stat(filename)
return ((st.st_atime != st[7])
or (st.st_mtime != st[8])
or (st.st_ctime != st[9]))
def _test_utime(self, set_time, filename=None):
if not filename:
filename = self.fname
support_subsecond = self.support_subsecond(filename)
if support_subsecond:
# Timestamp with a resolution of 1 microsecond (10^-6).
#
# The resolution of the C internal function used by os.utime()
# depends on the platform: 1 sec, 1 us, 1 ns. Writing a portable
# test with a resolution of 1 ns requires more work:
# see the issue #15745.
atime_ns = 1002003000 # 1.002003 seconds
mtime_ns = 4005006000 # 4.005006 seconds
else:
# use a resolution of 1 second
atime_ns = 5 * 10**9
mtime_ns = 8 * 10**9
set_time(filename, (atime_ns, mtime_ns))
st = os.stat(filename)
if support_subsecond:
self.assertAlmostEqual(st.st_atime, atime_ns * 1e-9, delta=1e-6)
self.assertAlmostEqual(st.st_mtime, mtime_ns * 1e-9, delta=1e-6)
else:
self.assertEqual(st.st_atime, atime_ns * 1e-9)
self.assertEqual(st.st_mtime, mtime_ns * 1e-9)
self.assertEqual(st.st_atime_ns, atime_ns)
self.assertEqual(st.st_mtime_ns, mtime_ns)
def test_utime(self):
def set_time(filename, ns):
# test the ns keyword parameter
os.utime(filename, ns=ns)
self._test_utime(set_time)
@staticmethod
def ns_to_sec(ns):
# Convert a number of nanosecond (int) to a number of seconds (float).
# Round towards infinity by adding 0.5 nanosecond to avoid rounding
# issue, os.utime() rounds towards minus infinity.
return (ns * 1e-9) + 0.5e-9
def test_utime_by_indexed(self):
# pass times as floating point seconds as the second indexed parameter
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test utimensat(timespec), utimes(timeval), utime(utimbuf)
# or utime(time_t)
os.utime(filename, (atime, mtime))
self._test_utime(set_time)
def test_utime_by_times(self):
def set_time(filename, ns):
atime_ns, mtime_ns = ns
atime = self.ns_to_sec(atime_ns)
mtime = self.ns_to_sec(mtime_ns)
# test the times keyword parameter
os.utime(filename, times=(atime, mtime))
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks,
"follow_symlinks support for utime required "
"for this test.")
def test_utime_nofollow_symlinks(self):
def set_time(filename, ns):
# use follow_symlinks=False to test utimensat(timespec)
# or lutimes(timeval)
os.utime(filename, ns=ns, follow_symlinks=False)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_fd,
"fd support for utime required for this test.")
def test_utime_fd(self):
def set_time(filename, ns):
with open(filename, 'wb') as fp:
# use a file descriptor to test futimens(timespec)
# or futimes(timeval)
os.utime(fp.fileno(), ns=ns)
self._test_utime(set_time)
@unittest.skipUnless(os.utime in os.supports_dir_fd,
"dir_fd support for utime required for this test.")
def test_utime_dir_fd(self):
def set_time(filename, ns):
dirname, name = os.path.split(filename)
dirfd = os.open(dirname, os.O_RDONLY)
try:
# pass dir_fd to test utimensat(timespec) or futimesat(timeval)
os.utime(name, dir_fd=dirfd, ns=ns)
finally:
os.close(dirfd)
self._test_utime(set_time)
def test_utime_directory(self):
def set_time(filename, ns):
# test calling os.utime() on a directory
os.utime(filename, ns=ns)
self._test_utime(set_time, filename=self.dirname)
def _test_utime_current(self, set_time):
# Get the system clock
current = time.time()
# Call os.utime() to set the timestamp to the current system clock
set_time(self.fname)
if not self.support_subsecond(self.fname):
delta = 1.0
else:
# On Windows, the usual resolution of time.time() is 15.6 ms
delta = 0.020
st = os.stat(self.fname)
msg = ("st_time=%r, current=%r, dt=%r"
% (st.st_mtime, current, st.st_mtime - current))
self.assertAlmostEqual(st.st_mtime, current,
delta=delta, msg=msg)
def test_utime_current(self):
def set_time(filename):
# Set to the current time in the new way
os.utime(self.fname)
self._test_utime_current(set_time)
def test_utime_current_old(self):
def set_time(filename):
# Set to the current time in the old explicit way.
os.utime(self.fname, None)
self._test_utime_current(set_time)
def get_file_system(self, path):
if sys.platform == 'win32':
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_unicode_buffer("", 100)
ok = kernel32.GetVolumeInformationW(root, None, 0,
None, None, None,
buf, len(buf))
if ok:
return buf.value
# return None if the filesystem is unknown
def test_large_time(self):
# Many filesystems are limited to the year 2038. At least, the test
# pass with NTFS filesystem.
if self.get_file_system(self.dirname) != "NTFS":
self.skipTest("requires NTFS")
large = 5000000000 # some day in 2128
os.utime(self.fname, (large, large))
self.assertEqual(os.stat(self.fname).st_mtime, large)
def test_utime_invalid_arguments(self):
# seconds and nanoseconds parameters are mutually exclusive
with self.assertRaises(ValueError):
os.utime(self.fname, (5, 5), ns=(5, 5))
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def setUp(self):
self.__save = dict(os.environ)
if os.supports_bytes_environ:
self.__saveb = dict(os.environb)
for key, value in self._reference().items():
os.environ[key] = value
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
if os.supports_bytes_environ:
os.environb.clear()
os.environb.update(self.__saveb)
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
# Bug 1110478
@unittest.skipUnless(os.path.exists('/bin/sh'), 'requires /bin/sh')
def test_update2(self):
os.environ.clear()
os.environ.update(HELLO="World")
with os.popen("/bin/sh -c 'echo $HELLO'") as popen:
value = popen.read().strip()
self.assertEqual(value, "World")
@unittest.skipUnless(os.path.exists('/bin/sh'), 'requires /bin/sh')
def test_os_popen_iter(self):
with os.popen(
"/bin/sh -c 'echo \"line1\nline2\nline3\"'") as popen:
it = iter(popen)
self.assertEqual(next(it), "line1\n")
self.assertEqual(next(it), "line2\n")
self.assertEqual(next(it), "line3\n")
self.assertRaises(StopIteration, next, it)
# Verify environ keys and values from the OS are of the
# correct str type.
def test_keyvalue_types(self):
for key, val in os.environ.items():
self.assertEqual(type(key), str)
self.assertEqual(type(val), str)
def test_items(self):
for key, value in self._reference().items():
self.assertEqual(os.environ.get(key), value)
# Issue 7310
def test___repr__(self):
"""Check that the repr() of os.environ looks like environ({...})."""
env = os.environ
self.assertEqual(repr(env), 'environ({{{}}})'.format(', '.join(
'{!r}: {!r}'.format(key, value)
for key, value in env.items())))
def test_get_exec_path(self):
defpath_list = os.defpath.split(os.pathsep)
test_path = ['/monty', '/python', '', '/flying/circus']
test_env = {'PATH': os.pathsep.join(test_path)}
saved_environ = os.environ
try:
os.environ = dict(test_env)
# Test that defaulting to os.environ works.
self.assertSequenceEqual(test_path, os.get_exec_path())
self.assertSequenceEqual(test_path, os.get_exec_path(env=None))
finally:
os.environ = saved_environ
# No PATH environment variable
self.assertSequenceEqual(defpath_list, os.get_exec_path({}))
# Empty PATH environment variable
self.assertSequenceEqual(('',), os.get_exec_path({'PATH':''}))
# Supplied PATH environment variable
self.assertSequenceEqual(test_path, os.get_exec_path(test_env))
if os.supports_bytes_environ:
# env cannot contain 'PATH' and b'PATH' keys
try:
# ignore BytesWarning warning
with warnings.catch_warnings(record=True):
mixed_env = {'PATH': '1', b'PATH': b'2'}
except BytesWarning:
# mixed_env cannot be created with python -bb
pass
else:
self.assertRaises(ValueError, os.get_exec_path, mixed_env)
# bytes key and/or value
self.assertSequenceEqual(os.get_exec_path({b'PATH': b'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({b'PATH': 'abc'}),
['abc'])
self.assertSequenceEqual(os.get_exec_path({'PATH': b'abc'}),
['abc'])
@unittest.skipUnless(os.supports_bytes_environ,
"os.environb required for this test.")
def test_environb(self):
# os.environ -> os.environb
value = 'euro\u20ac'
try:
value_bytes = value.encode(sys.getfilesystemencoding(),
'surrogateescape')
except UnicodeEncodeError:
msg = "U+20AC character is not encodable to %s" % (
sys.getfilesystemencoding(),)
self.skipTest(msg)
os.environ['unicode'] = value
self.assertEqual(os.environ['unicode'], value)
self.assertEqual(os.environb[b'unicode'], value_bytes)
# os.environb -> os.environ
value = b'\xff'
os.environb[b'bytes'] = value
self.assertEqual(os.environb[b'bytes'], value)
value_str = value.decode(sys.getfilesystemencoding(), 'surrogateescape')
self.assertEqual(os.environ['bytes'], value_str)
# On FreeBSD < 7 and OS X < 10.6, unsetenv() doesn't return a value (issue
# #13415).
@support.requires_freebsd_version(7)
@support.requires_mac_ver(10, 6)
def test_unset_error(self):
if sys.platform == "win32":
# an environment variable is limited to 32,767 characters
key = 'x' * 50000
self.assertRaises(ValueError, os.environ.__delitem__, key)
else:
# "=" is not allowed in a variable name
key = 'key='
self.assertRaises(OSError, os.environ.__delitem__, key)
def test_key_type(self):
missing = 'missingkey'
self.assertNotIn(missing, os.environ)
with self.assertRaises(KeyError) as cm:
os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
with self.assertRaises(KeyError) as cm:
del os.environ[missing]
self.assertIs(cm.exception.args[0], missing)
self.assertTrue(cm.exception.__suppress_context__)
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
# Wrapper to hide minor differences between os.walk and os.fwalk
# to tests both functions with the same code base
def walk(self, directory, topdown=True, follow_symlinks=False):
walk_it = os.walk(directory,
topdown=topdown,
followlinks=follow_symlinks)
for root, dirs, files in walk_it:
yield (root, dirs, files)
def setUp(self):
join = os.path.join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# broken_link
# TEST2/
# tmp4 a lone file
self.walk_path = join(support.TESTFN, "TEST1")
self.sub1_path = join(self.walk_path, "SUB1")
self.sub11_path = join(self.sub1_path, "SUB11")
sub2_path = join(self.walk_path, "SUB2")
tmp1_path = join(self.walk_path, "tmp1")
tmp2_path = join(self.sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
self.link_path = join(sub2_path, "link")
t2_path = join(support.TESTFN, "TEST2")
tmp4_path = join(support.TESTFN, "TEST2", "tmp4")
broken_link_path = join(sub2_path, "broken_link")
# Create stuff.
os.makedirs(self.sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = open(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if support.can_symlink():
os.symlink(os.path.abspath(t2_path), self.link_path)
os.symlink('broken', broken_link_path, True)
self.sub2_tree = (sub2_path, ["link"], ["broken_link", "tmp3"])
else:
self.sub2_tree = (sub2_path, [], ["tmp3"])
def test_walk_topdown(self):
# Walk top-down.
all = list(os.walk(self.walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
all[3 - 2 * flipped][-1].sort()
self.assertEqual(all[0], (self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (self.sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], self.sub2_tree)
def test_walk_prune(self):
# Prune the search.
all = []
for root, dirs, files in self.walk(self.walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0],
(self.walk_path, ["SUB2"], ["tmp1"]))
all[1][-1].sort()
self.assertEqual(all[1], self.sub2_tree)
def test_walk_bottom_up(self):
# Walk bottom-up.
all = list(self.walk(self.walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
all[2 - 2 * flipped][-1].sort()
self.assertEqual(all[3],
(self.walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped],
(self.sub11_path, [], []))
self.assertEqual(all[flipped + 1],
(self.sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped],
self.sub2_tree)
def test_walk_symlink(self):
if not support.can_symlink():
self.skipTest("need symlink support")
# Walk, following symlinks.
walk_it = self.walk(self.walk_path, follow_symlinks=True)
for root, dirs, files in walk_it:
if root == self.link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(support.TESTFN)
@unittest.skipUnless(hasattr(os, 'fwalk'), "Test needs os.fwalk()")
class FwalkTests(WalkTests):
"""Tests for os.fwalk()."""
def walk(self, directory, topdown=True, follow_symlinks=False):
walk_it = os.fwalk(directory,
topdown=topdown,
follow_symlinks=follow_symlinks)
for root, dirs, files, root_fd in walk_it:
yield (root, dirs, files)
def _compare_to_walk(self, walk_kwargs, fwalk_kwargs):
"""
compare with walk() results.
"""
walk_kwargs = walk_kwargs.copy()
fwalk_kwargs = fwalk_kwargs.copy()
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
walk_kwargs.update(topdown=topdown, followlinks=follow_symlinks)
fwalk_kwargs.update(topdown=topdown, follow_symlinks=follow_symlinks)
expected = {}
for root, dirs, files in os.walk(**walk_kwargs):
expected[root] = (set(dirs), set(files))
for root, dirs, files, rootfd in os.fwalk(**fwalk_kwargs):
self.assertIn(root, expected)
self.assertEqual(expected[root], (set(dirs), set(files)))
def test_compare_to_walk(self):
kwargs = {'top': support.TESTFN}
self._compare_to_walk(kwargs, kwargs)
def test_dir_fd(self):
try:
fd = os.open(".", os.O_RDONLY)
walk_kwargs = {'top': support.TESTFN}
fwalk_kwargs = walk_kwargs.copy()
fwalk_kwargs['dir_fd'] = fd
self._compare_to_walk(walk_kwargs, fwalk_kwargs)
finally:
os.close(fd)
def test_yields_correct_dir_fd(self):
# check returned file descriptors
for topdown, follow_symlinks in itertools.product((True, False), repeat=2):
args = support.TESTFN, topdown, None
for root, dirs, files, rootfd in os.fwalk(*args, follow_symlinks=follow_symlinks):
# check that the FD is valid
os.fstat(rootfd)
# redundant check
os.stat(rootfd)
# check that listdir() returns consistent information
self.assertEqual(set(os.listdir(rootfd)), set(dirs) | set(files))
def test_fd_leak(self):
# Since we're opening a lot of FDs, we must be careful to avoid leaks:
# we both check that calling fwalk() a large number of times doesn't
# yield EMFILE, and that the minimum allocated FD hasn't changed.
minfd = os.dup(1)
os.close(minfd)
for i in range(256):
for x in os.fwalk(support.TESTFN):
pass
newfd = os.dup(1)
self.addCleanup(os.close, newfd)
self.assertEqual(newfd, minfd)
def tearDown(self):
# cleanup
for root, dirs, files, rootfd in os.fwalk(support.TESTFN, topdown=False):
for name in files:
os.unlink(name, dir_fd=rootfd)
for name in dirs:
st = os.stat(name, dir_fd=rootfd, follow_symlinks=False)
if stat.S_ISDIR(st.st_mode):
os.rmdir(name, dir_fd=rootfd)
else:
os.unlink(name, dir_fd=rootfd)
os.rmdir(support.TESTFN)
class MakedirTests(unittest.TestCase):
def setUp(self):
os.mkdir(support.TESTFN)
def test_makedir(self):
base = support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.assertRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def test_exist_ok_existing_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
mode = 0o777
old_mask = os.umask(0o022)
os.makedirs(path, mode)
self.assertRaises(OSError, os.makedirs, path, mode)
self.assertRaises(OSError, os.makedirs, path, mode, exist_ok=False)
os.makedirs(path, 0o776, exist_ok=True)
os.makedirs(path, mode=mode, exist_ok=True)
os.umask(old_mask)
# Issue #25583: A drive root could raise PermissionError on Windows
os.makedirs(os.path.abspath('/'), exist_ok=True)
@unittest.skipUnless(hasattr(os, 'chown'), 'test needs os.chown')
def test_chown_uid_gid_arguments_must_be_index(self):
stat = os.stat(support.TESTFN)
uid = stat.st_uid
gid = stat.st_gid
for value in (-1.0, -1j, decimal.Decimal(-1), fractions.Fraction(-2, 2)):
self.assertRaises(TypeError, os.chown, support.TESTFN, value, gid)
self.assertRaises(TypeError, os.chown, support.TESTFN, uid, value)
self.assertIsNone(os.chown(support.TESTFN, uid, gid))
self.assertIsNone(os.chown(support.TESTFN, -1, -1))
def test_exist_ok_s_isgid_directory(self):
path = os.path.join(support.TESTFN, 'dir1')
S_ISGID = stat.S_ISGID
mode = 0o777
old_mask = os.umask(0o022)
try:
existing_testfn_mode = stat.S_IMODE(
os.lstat(support.TESTFN).st_mode)
try:
os.chmod(support.TESTFN, existing_testfn_mode | S_ISGID)
except PermissionError:
raise unittest.SkipTest('Cannot set S_ISGID for dir.')
if (os.lstat(support.TESTFN).st_mode & S_ISGID != S_ISGID):
raise unittest.SkipTest('No support for S_ISGID dir mode.')
# The os should apply S_ISGID from the parent dir for us, but
# this test need not depend on that behavior. Be explicit.
os.makedirs(path, mode | S_ISGID)
# http://bugs.python.org/issue14992
# Should not fail when the bit is already set.
os.makedirs(path, mode, exist_ok=True)
# remove the bit.
os.chmod(path, stat.S_IMODE(os.lstat(path).st_mode) & ~S_ISGID)
# May work even when the bit is not already set when demanded.
os.makedirs(path, mode | S_ISGID, exist_ok=True)
finally:
os.umask(old_mask)
def test_exist_ok_existing_regular_file(self):
base = support.TESTFN
path = os.path.join(support.TESTFN, 'dir1')
f = open(path, 'w')
f.write('abc')
f.close()
self.assertRaises(OSError, os.makedirs, path)
self.assertRaises(OSError, os.makedirs, path, exist_ok=False)
self.assertRaises(OSError, os.makedirs, path, exist_ok=True)
os.remove(path)
def tearDown(self):
path = os.path.join(support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class RemoveDirsTests(unittest.TestCase):
def setUp(self):
os.makedirs(support.TESTFN)
def tearDown(self):
support.rmtree(support.TESTFN)
def test_remove_all(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertFalse(os.path.exists(dira))
self.assertFalse(os.path.exists(support.TESTFN))
def test_remove_partial(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
with open(os.path.join(dira, 'file.txt'), 'w') as f:
f.write('text')
os.removedirs(dirb)
self.assertFalse(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
def test_remove_nothing(self):
dira = os.path.join(support.TESTFN, 'dira')
os.mkdir(dira)
dirb = os.path.join(dira, 'dirb')
os.mkdir(dirb)
with open(os.path.join(dirb, 'file.txt'), 'w') as f:
f.write('text')
with self.assertRaises(OSError):
os.removedirs(dirb)
self.assertTrue(os.path.exists(dirb))
self.assertTrue(os.path.exists(dira))
self.assertTrue(os.path.exists(support.TESTFN))
class DevNullTests(unittest.TestCase):
def test_devnull(self):
with open(os.devnull, 'wb') as f:
f.write(b'hello')
f.close()
with open(os.devnull, 'rb') as f:
self.assertEqual(f.read(), b'')
class URandomTests(unittest.TestCase):
def test_urandom_length(self):
self.assertEqual(len(os.urandom(0)), 0)
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
def test_urandom_value(self):
data1 = os.urandom(16)
data2 = os.urandom(16)
self.assertNotEqual(data1, data2)
def get_urandom_subprocess(self, count):
code = '\n'.join((
'import os, sys',
'data = os.urandom(%s)' % count,
'sys.stdout.buffer.write(data)',
'sys.stdout.buffer.flush()'))
out = assert_python_ok('-c', code)
stdout = out[1]
self.assertEqual(len(stdout), 16)
return stdout
def test_urandom_subprocess(self):
data1 = self.get_urandom_subprocess(16)
data2 = self.get_urandom_subprocess(16)
self.assertNotEqual(data1, data2)
HAVE_GETENTROPY = (sysconfig.get_config_var('HAVE_GETENTROPY') == 1)
@unittest.skipIf(HAVE_GETENTROPY,
"getentropy() does not use a file descriptor")
class URandomFDTests(unittest.TestCase):
@unittest.skipUnless(resource, "test requires the resource module")
def test_urandom_failure(self):
# Check urandom() failing when it is not able to open /dev/random.
# We spawn a new process to make the test more robust (if getrlimit()
# failed to restore the file descriptor limit after this, the whole
# test suite would crash; this actually happened on the OS X Tiger
# buildbot).
code = """if 1:
import errno
import os
import resource
soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (1, hard_limit))
try:
os.urandom(16)
except OSError as e:
assert e.errno == errno.EMFILE, e.errno
else:
raise AssertionError("OSError not raised")
"""
assert_python_ok('-c', code)
def test_urandom_fd_closed(self):
# Issue #21207: urandom() should reopen its fd to /dev/urandom if
# closed.
code = """if 1:
import os
import sys
os.urandom(4)
os.closerange(3, 256)
sys.stdout.buffer.write(os.urandom(4))
"""
rc, out, err = assert_python_ok('-Sc', code)
def test_urandom_fd_reopened(self):
# Issue #21207: urandom() should detect its fd to /dev/urandom
# changed to something else, and reopen it.
with open(support.TESTFN, 'wb') as f:
f.write(b"x" * 256)
self.addCleanup(os.unlink, support.TESTFN)
code = """if 1:
import os
import sys
os.urandom(4)
for fd in range(3, 256):
try:
os.close(fd)
except OSError:
pass
else:
# Found the urandom fd (XXX hopefully)
break
os.closerange(3, 256)
with open({TESTFN!r}, 'rb') as f:
os.dup2(f.fileno(), fd)
sys.stdout.buffer.write(os.urandom(4))
sys.stdout.buffer.write(os.urandom(4))
""".format(TESTFN=support.TESTFN)
rc, out, err = assert_python_ok('-Sc', code)
self.assertEqual(len(out), 8)
self.assertNotEqual(out[0:4], out[4:8])
rc, out2, err2 = assert_python_ok('-Sc', code)
self.assertEqual(len(out2), 8)
self.assertNotEqual(out2, out)
@contextlib.contextmanager
def _execvpe_mockup(defpath=None):
"""
Stubs out execv and execve functions when used as context manager.
Records exec calls. The mock execv and execve functions always raise an
exception as they would normally never return.
"""
# A list of tuples containing (function name, first arg, args)
# of calls to execv or execve that have been made.
calls = []
def mock_execv(name, *args):
calls.append(('execv', name, args))
raise RuntimeError("execv called")
def mock_execve(name, *args):
calls.append(('execve', name, args))
raise OSError(errno.ENOTDIR, "execve called")
try:
orig_execv = os.execv
orig_execve = os.execve
orig_defpath = os.defpath
os.execv = mock_execv
os.execve = mock_execve
if defpath is not None:
os.defpath = defpath
yield calls
finally:
os.execv = orig_execv
os.execve = orig_execve
os.defpath = orig_defpath
class ExecTests(unittest.TestCase):
@unittest.skipIf(USING_LINUXTHREADS,
"avoid triggering a linuxthreads bug: see issue #4970")
def test_execvpe_with_bad_program(self):
self.assertRaises(OSError, os.execvpe, 'no such app-',
['no such app-'], None)
def test_execvpe_with_bad_arglist(self):
self.assertRaises(ValueError, os.execvpe, 'notepad', [], None)
@unittest.skipUnless(hasattr(os, '_execvpe'),
"No internal os._execvpe function to test.")
def _test_internal_execvpe(self, test_type):
program_path = os.sep + 'absolutepath'
if test_type is bytes:
program = b'executable'
fullpath = os.path.join(os.fsencode(program_path), program)
native_fullpath = fullpath
arguments = [b'progname', 'arg1', 'arg2']
else:
program = 'executable'
arguments = ['progname', 'arg1', 'arg2']
fullpath = os.path.join(program_path, program)
if os.name != "nt":
native_fullpath = os.fsencode(fullpath)
else:
native_fullpath = fullpath
env = {'spam': 'beans'}
# test os._execvpe() with an absolute path
with _execvpe_mockup() as calls:
self.assertRaises(RuntimeError,
os._execvpe, fullpath, arguments)
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], ('execv', fullpath, (arguments,)))
# test os._execvpe() with a relative path:
# os.get_exec_path() returns defpath
with _execvpe_mockup(defpath=program_path) as calls:
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env)))
# test os._execvpe() with a relative path:
# os.get_exec_path() reads the 'PATH' variable
with _execvpe_mockup() as calls:
env_path = env.copy()
if test_type is bytes:
env_path[b'PATH'] = program_path
else:
env_path['PATH'] = program_path
self.assertRaises(OSError,
os._execvpe, program, arguments, env=env_path)
self.assertEqual(len(calls), 1)
self.assertSequenceEqual(calls[0],
('execve', native_fullpath, (arguments, env_path)))
def test_internal_execvpe_str(self):
self._test_internal_execvpe(str)
if os.name != "nt":
self._test_internal_execvpe(bytes)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(OSError, os.rename, support.TESTFN, support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(OSError, os.remove, support.TESTFN)
def test_chdir(self):
self.assertRaises(OSError, os.chdir, support.TESTFN)
def test_mkdir(self):
f = open(support.TESTFN, "w")
try:
self.assertRaises(OSError, os.mkdir, support.TESTFN)
finally:
f.close()
os.unlink(support.TESTFN)
def test_utime(self):
self.assertRaises(OSError, os.utime, support.TESTFN, None)
def test_chmod(self):
self.assertRaises(OSError, os.chmod, support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "dup", "fdopen", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
try:
f(support.make_bad_fd(), *args)
except OSError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("%r didn't raise an OSError with a bad file descriptor"
% f)
@unittest.skipUnless(hasattr(os, 'isatty'), 'test needs os.isatty()')
def test_isatty(self):
self.assertEqual(os.isatty(support.make_bad_fd()), False)
@unittest.skipUnless(hasattr(os, 'closerange'), 'test needs os.closerange()')
def test_closerange(self):
fd = support.make_bad_fd()
# Make sure none of the descriptors we are about to close are
# currently valid (issue 6542).
for i in range(10):
try: os.fstat(fd+i)
except OSError:
pass
else:
break
if i < 2:
raise unittest.SkipTest(
"Unable to acquire a range of invalid file descriptors")
self.assertEqual(os.closerange(fd, fd + i-1), None)
@unittest.skipUnless(hasattr(os, 'dup2'), 'test needs os.dup2()')
def test_dup2(self):
self.check(os.dup2, 20)
@unittest.skipUnless(hasattr(os, 'fchmod'), 'test needs os.fchmod()')
def test_fchmod(self):
self.check(os.fchmod, 0)
@unittest.skipUnless(hasattr(os, 'fchown'), 'test needs os.fchown()')
def test_fchown(self):
self.check(os.fchown, -1, -1)
@unittest.skipUnless(hasattr(os, 'fpathconf'), 'test needs os.fpathconf()')
def test_fpathconf(self):
self.check(os.pathconf, "PC_NAME_MAX")
self.check(os.fpathconf, "PC_NAME_MAX")
@unittest.skipUnless(hasattr(os, 'ftruncate'), 'test needs os.ftruncate()')
def test_ftruncate(self):
self.check(os.truncate, 0)
self.check(os.ftruncate, 0)
@unittest.skipUnless(hasattr(os, 'lseek'), 'test needs os.lseek()')
def test_lseek(self):
self.check(os.lseek, 0, 0)
@unittest.skipUnless(hasattr(os, 'read'), 'test needs os.read()')
def test_read(self):
self.check(os.read, 1)
@unittest.skipUnless(hasattr(os, 'readv'), 'test needs os.readv()')
def test_readv(self):
buf = bytearray(10)
self.check(os.readv, [buf])
@unittest.skipUnless(hasattr(os, 'tcsetpgrp'), 'test needs os.tcsetpgrp()')
def test_tcsetpgrpt(self):
self.check(os.tcsetpgrp, 0)
@unittest.skipUnless(hasattr(os, 'write'), 'test needs os.write()')
def test_write(self):
self.check(os.write, b" ")
@unittest.skipUnless(hasattr(os, 'writev'), 'test needs os.writev()')
def test_writev(self):
self.check(os.writev, [b'abc'])
class LinkTests(unittest.TestCase):
def setUp(self):
self.file1 = support.TESTFN
self.file2 = os.path.join(support.TESTFN + "2")
def tearDown(self):
for file in (self.file1, self.file2):
if os.path.exists(file):
os.unlink(file)
def _test_link(self, file1, file2):
with open(file1, "w") as f1:
f1.write("test")
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
os.link(file1, file2)
with open(file1, "r") as f1, open(file2, "r") as f2:
self.assertTrue(os.path.sameopenfile(f1.fileno(), f2.fileno()))
def test_link(self):
self._test_link(self.file1, self.file2)
def test_link_bytes(self):
self._test_link(bytes(self.file1, sys.getfilesystemencoding()),
bytes(self.file2, sys.getfilesystemencoding()))
def test_unicode_name(self):
try:
os.fsencode("\xf1")
except UnicodeError:
raise unittest.SkipTest("Unable to encode for this platform.")
self.file1 += "\xf1"
self.file2 = self.file1 + "2"
self._test_link(self.file1, self.file2)
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class PosixUidGidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'setuid'), 'test needs os.setuid()')
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setgid'), 'test needs os.setgid()')
def test_setgid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
@unittest.skipUnless(hasattr(os, 'seteuid'), 'test needs os.seteuid()')
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setegid'), 'test needs os.setegid()')
def test_setegid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(OSError, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'test needs os.setreuid()')
def test_setreuid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setreuid(-1,-1);sys.exit(0)'])
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid(self):
if os.getuid() != 0 and not HAVE_WHEEL_GROUP:
self.assertRaises(OSError, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
@unittest.skipUnless(hasattr(os, 'setregid'), 'test needs os.setregid()')
def test_setregid_neg1(self):
# Needs to accept -1. We run this in a subprocess to avoid
# altering the test runner's process state (issue8045).
subprocess.check_call([
sys.executable, '-c',
'import os,sys;os.setregid(-1,-1);sys.exit(0)'])
@unittest.skipIf(sys.platform == "win32", "Posix specific tests")
class Pep383Tests(unittest.TestCase):
def setUp(self):
if support.TESTFN_UNENCODABLE:
self.dir = support.TESTFN_UNENCODABLE
elif support.TESTFN_NONASCII:
self.dir = support.TESTFN_NONASCII
else:
self.dir = support.TESTFN
self.bdir = os.fsencode(self.dir)
bytesfn = []
def add_filename(fn):
try:
fn = os.fsencode(fn)
except UnicodeEncodeError:
return
bytesfn.append(fn)
add_filename(support.TESTFN_UNICODE)
if support.TESTFN_UNENCODABLE:
add_filename(support.TESTFN_UNENCODABLE)
if support.TESTFN_NONASCII:
add_filename(support.TESTFN_NONASCII)
if not bytesfn:
self.skipTest("couldn't create any non-ascii filename")
self.unicodefn = set()
os.mkdir(self.dir)
try:
for fn in bytesfn:
support.create_empty_file(os.path.join(self.bdir, fn))
fn = os.fsdecode(fn)
if fn in self.unicodefn:
raise ValueError("duplicate filename")
self.unicodefn.add(fn)
except:
shutil.rmtree(self.dir)
raise
def tearDown(self):
shutil.rmtree(self.dir)
def test_listdir(self):
expected = self.unicodefn
found = set(os.listdir(self.dir))
self.assertEqual(found, expected)
# test listdir without arguments
current_directory = os.getcwd()
try:
os.chdir(os.sep)
self.assertEqual(set(os.listdir()), set(os.listdir(os.sep)))
finally:
os.chdir(current_directory)
def test_open(self):
for fn in self.unicodefn:
f = open(os.path.join(self.dir, fn), 'rb')
f.close()
@unittest.skipUnless(hasattr(os, 'statvfs'),
"need os.statvfs()")
def test_statvfs(self):
# issue #9645
for fn in self.unicodefn:
# should not fail with file not found error
fullname = os.path.join(self.dir, fn)
os.statvfs(fullname)
def test_stat(self):
for fn in self.unicodefn:
os.stat(os.path.join(self.dir, fn))
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32KillTests(unittest.TestCase):
def _kill(self, sig):
# Start sys.executable as a subprocess and communicate from the
# subprocess to the parent that the interpreter is ready. When it
# becomes ready, send *sig* via os.kill to the subprocess and check
# that the return code is equal to *sig*.
import ctypes
from ctypes import wintypes
import msvcrt
# Since we can't access the contents of the process' stdout until the
# process has exited, use PeekNamedPipe to see what's inside stdout
# without waiting. This is done so we can tell that the interpreter
# is started and running at a point where it could handle a signal.
PeekNamedPipe = ctypes.windll.kernel32.PeekNamedPipe
PeekNamedPipe.restype = wintypes.BOOL
PeekNamedPipe.argtypes = (wintypes.HANDLE, # Pipe handle
ctypes.POINTER(ctypes.c_char), # stdout buf
wintypes.DWORD, # Buffer size
ctypes.POINTER(wintypes.DWORD), # bytes read
ctypes.POINTER(wintypes.DWORD), # bytes avail
ctypes.POINTER(wintypes.DWORD)) # bytes left
msg = "running"
proc = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('{}');"
"sys.stdout.flush();"
"input()".format(msg)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
self.addCleanup(proc.stdout.close)
self.addCleanup(proc.stderr.close)
self.addCleanup(proc.stdin.close)
count, max = 0, 100
while count < max and proc.poll() is None:
# Create a string buffer to store the result of stdout from the pipe
buf = ctypes.create_string_buffer(len(msg))
# Obtain the text currently in proc.stdout
# Bytes read/avail/left are left as NULL and unused
rslt = PeekNamedPipe(msvcrt.get_osfhandle(proc.stdout.fileno()),
buf, ctypes.sizeof(buf), None, None, None)
self.assertNotEqual(rslt, 0, "PeekNamedPipe failed")
if buf.value:
self.assertEqual(msg, buf.value.decode())
break
time.sleep(0.1)
count += 1
else:
self.fail("Did not receive communication from the subprocess")
os.kill(proc.pid, sig)
self.assertEqual(proc.wait(), sig)
def test_kill_sigterm(self):
# SIGTERM doesn't mean anything special, but make sure it works
self._kill(signal.SIGTERM)
def test_kill_int(self):
# os.kill on Windows can take an int which gets set as the exit code
self._kill(100)
def _kill_with_event(self, event, name):
tagname = "test_os_%s" % uuid.uuid1()
m = mmap.mmap(-1, 1, tagname)
m[0] = 0
# Run a script which has console control handling enabled.
proc = subprocess.Popen([sys.executable,
os.path.join(os.path.dirname(__file__),
"win_console_handler.py"), tagname],
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
# Let the interpreter startup before we send signals. See #3137.
count, max = 0, 100
while count < max and proc.poll() is None:
if m[0] == 1:
break
time.sleep(0.1)
count += 1
else:
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("Subprocess didn't finish initialization")
os.kill(proc.pid, event)
# proc.send_signal(event) could also be done here.
# Allow time for the signal to be passed and the process to exit.
time.sleep(0.5)
if not proc.poll():
# Forcefully kill the process if we weren't able to signal it.
os.kill(proc.pid, signal.SIGINT)
self.fail("subprocess did not stop on {}".format(name))
@unittest.skip("subprocesses aren't inheriting Ctrl+C property")
def test_CTRL_C_EVENT(self):
from ctypes import wintypes
import ctypes
# Make a NULL value by creating a pointer with no argument.
NULL = ctypes.POINTER(ctypes.c_int)()
SetConsoleCtrlHandler = ctypes.windll.kernel32.SetConsoleCtrlHandler
SetConsoleCtrlHandler.argtypes = (ctypes.POINTER(ctypes.c_int),
wintypes.BOOL)
SetConsoleCtrlHandler.restype = wintypes.BOOL
# Calling this with NULL and FALSE causes the calling process to
# handle Ctrl+C, rather than ignore it. This property is inherited
# by subprocesses.
SetConsoleCtrlHandler(NULL, 0)
self._kill_with_event(signal.CTRL_C_EVENT, "CTRL_C_EVENT")
def test_CTRL_BREAK_EVENT(self):
self._kill_with_event(signal.CTRL_BREAK_EVENT, "CTRL_BREAK_EVENT")
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32ListdirTests(unittest.TestCase):
"""Test listdir on Windows."""
def setUp(self):
self.created_paths = []
for i in range(2):
dir_name = 'SUB%d' % i
dir_path = os.path.join(support.TESTFN, dir_name)
file_name = 'FILE%d' % i
file_path = os.path.join(support.TESTFN, file_name)
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write("I'm %s and proud of it. Blame test_os.\n" % file_path)
self.created_paths.extend([dir_name, file_name])
self.created_paths.sort()
def tearDown(self):
shutil.rmtree(support.TESTFN)
def test_listdir_no_extended_path(self):
"""Test when the path is not an "extended" path."""
# unicode
self.assertEqual(
sorted(os.listdir(support.TESTFN)),
self.created_paths)
# bytes
self.assertEqual(
sorted(os.listdir(os.fsencode(support.TESTFN))),
[os.fsencode(path) for path in self.created_paths])
def test_listdir_extended_path(self):
"""Test when the path starts with '\\\\?\\'."""
# See: http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath
# unicode
path = '\\\\?\\' + os.path.abspath(support.TESTFN)
self.assertEqual(
sorted(os.listdir(path)),
self.created_paths)
# bytes
path = b'\\\\?\\' + os.fsencode(os.path.abspath(support.TESTFN))
self.assertEqual(
sorted(os.listdir(path)),
[os.fsencode(path) for path in self.created_paths])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
@support.skip_unless_symlink
class Win32SymlinkTests(unittest.TestCase):
filelink = 'filelinktest'
filelink_target = os.path.abspath(__file__)
dirlink = 'dirlinktest'
dirlink_target = os.path.dirname(filelink_target)
missing_link = 'missing link'
def setUp(self):
assert os.path.exists(self.dirlink_target)
assert os.path.exists(self.filelink_target)
assert not os.path.exists(self.dirlink)
assert not os.path.exists(self.filelink)
assert not os.path.exists(self.missing_link)
def tearDown(self):
if os.path.exists(self.filelink):
os.remove(self.filelink)
if os.path.exists(self.dirlink):
os.rmdir(self.dirlink)
if os.path.lexists(self.missing_link):
os.remove(self.missing_link)
def test_directory_link(self):
os.symlink(self.dirlink_target, self.dirlink)
self.assertTrue(os.path.exists(self.dirlink))
self.assertTrue(os.path.isdir(self.dirlink))
self.assertTrue(os.path.islink(self.dirlink))
self.check_stat(self.dirlink, self.dirlink_target)
def test_file_link(self):
os.symlink(self.filelink_target, self.filelink)
self.assertTrue(os.path.exists(self.filelink))
self.assertTrue(os.path.isfile(self.filelink))
self.assertTrue(os.path.islink(self.filelink))
self.check_stat(self.filelink, self.filelink_target)
def _create_missing_dir_link(self):
'Create a "directory" link to a non-existent target'
linkname = self.missing_link
if os.path.lexists(linkname):
os.remove(linkname)
target = r'c:\\target does not exist.29r3c740'
assert not os.path.exists(target)
target_is_dir = True
os.symlink(target, linkname, target_is_dir)
def test_remove_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# For compatibility with Unix, os.remove will check the
# directory status and call RemoveDirectory if the symlink
# was created with target_is_dir==True.
os.remove(self.missing_link)
@unittest.skip("currently fails; consider for improvement")
def test_isdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider having isdir return true for directory links
self.assertTrue(os.path.isdir(self.missing_link))
@unittest.skip("currently fails; consider for improvement")
def test_rmdir_on_directory_link_to_missing_target(self):
self._create_missing_dir_link()
# consider allowing rmdir to remove directory links
os.rmdir(self.missing_link)
def check_stat(self, link, target):
self.assertEqual(os.stat(link), os.stat(target))
self.assertNotEqual(os.lstat(link), os.stat(link))
bytes_link = os.fsencode(link)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertEqual(os.stat(bytes_link), os.stat(target))
self.assertNotEqual(os.lstat(bytes_link), os.stat(bytes_link))
def test_12084(self):
level1 = os.path.abspath(support.TESTFN)
level2 = os.path.join(level1, "level2")
level3 = os.path.join(level2, "level3")
try:
os.mkdir(level1)
os.mkdir(level2)
os.mkdir(level3)
file1 = os.path.abspath(os.path.join(level1, "file1"))
with open(file1, "w") as f:
f.write("file1")
orig_dir = os.getcwd()
try:
os.chdir(level2)
link = os.path.join(level2, "link")
os.symlink(os.path.relpath(file1), "link")
self.assertIn("link", os.listdir(os.getcwd()))
# Check os.stat calls from the same dir as the link
self.assertEqual(os.stat(file1), os.stat("link"))
# Check os.stat calls from a dir below the link
os.chdir(level1)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
# Check os.stat calls from a dir above the link
os.chdir(level3)
self.assertEqual(os.stat(file1),
os.stat(os.path.relpath(link)))
finally:
os.chdir(orig_dir)
except OSError as err:
self.fail(err)
finally:
os.remove(file1)
shutil.rmtree(level1)
@support.skip_unless_symlink
class NonLocalSymlinkTests(unittest.TestCase):
def setUp(self):
"""
Create this structure:
base
\___ some_dir
"""
os.makedirs('base/some_dir')
def tearDown(self):
shutil.rmtree('base')
def test_directory_link_nonlocal(self):
"""
The symlink target should resolve relative to the link, not relative
to the current directory.
Then, link base/some_link -> base/some_dir and ensure that some_link
is resolved as a directory.
In issue13772, it was discovered that directory detection failed if
the symlink target was not specified relative to the current
directory, which was a defect in the implementation.
"""
src = os.path.join('base', 'some_link')
os.symlink('some_dir', src)
assert os.path.isdir(src)
class FSEncodingTests(unittest.TestCase):
def test_nop(self):
self.assertEqual(os.fsencode(b'abc\xff'), b'abc\xff')
self.assertEqual(os.fsdecode('abc\u0141'), 'abc\u0141')
def test_identity(self):
# assert fsdecode(fsencode(x)) == x
for fn in ('unicode\u0141', 'latin\xe9', 'ascii'):
try:
bytesfn = os.fsencode(fn)
except UnicodeEncodeError:
continue
self.assertEqual(os.fsdecode(bytesfn), fn)
class DeviceEncodingTests(unittest.TestCase):
def test_bad_fd(self):
# Return None when an fd doesn't actually exist.
self.assertIsNone(os.device_encoding(123456))
@unittest.skipUnless(os.isatty(0) and (sys.platform.startswith('win') or
(hasattr(locale, 'nl_langinfo') and hasattr(locale, 'CODESET'))),
'test requires a tty and either Windows or nl_langinfo(CODESET)')
def test_device_encoding(self):
encoding = os.device_encoding(0)
self.assertIsNotNone(encoding)
self.assertTrue(codecs.lookup(encoding))
class PidTests(unittest.TestCase):
@unittest.skipUnless(hasattr(os, 'getppid'), "test needs os.getppid")
def test_getppid(self):
p = subprocess.Popen([sys.executable, '-c',
'import os; print(os.getppid())'],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
# We are the parent of our subprocess
self.assertEqual(int(stdout), os.getpid())
# The introduction of this TestCase caused at least two different errors on
# *nix buildbots. Temporarily skip this to let the buildbots move along.
@unittest.skip("Skip due to platform/environment differences on *NIX buildbots")
@unittest.skipUnless(hasattr(os, 'getlogin'), "test needs os.getlogin")
class LoginTests(unittest.TestCase):
def test_getlogin(self):
user_name = os.getlogin()
self.assertNotEqual(len(user_name), 0)
@unittest.skipUnless(hasattr(os, 'getpriority') and hasattr(os, 'setpriority'),
"needs os.getpriority and os.setpriority")
class ProgramPriorityTests(unittest.TestCase):
"""Tests for os.getpriority() and os.setpriority()."""
def test_set_get_priority(self):
base = os.getpriority(os.PRIO_PROCESS, os.getpid())
os.setpriority(os.PRIO_PROCESS, os.getpid(), base + 1)
try:
new_prio = os.getpriority(os.PRIO_PROCESS, os.getpid())
if base >= 19 and new_prio <= 19:
raise unittest.SkipTest(
"unable to reliably test setpriority at current nice level of %s" % base)
else:
self.assertEqual(new_prio, base + 1)
finally:
try:
os.setpriority(os.PRIO_PROCESS, os.getpid(), base)
except OSError as err:
if err.errno != errno.EACCES:
raise
if threading is not None:
class SendfileTestServer(asyncore.dispatcher, threading.Thread):
class Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.in_buffer = []
self.closed = False
self.push(b"220 ready\r\n")
def handle_read(self):
data = self.recv(4096)
self.in_buffer.append(data)
def get_data(self):
return b''.join(self.in_buffer)
def handle_close(self):
self.close()
self.closed = True
def handle_error(self):
raise
def __init__(self, address):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
self._active = False
self._active_lock = threading.Lock()
# --- public API
@property
def running(self):
return self._active
def start(self):
assert not self.running
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def stop(self):
assert self.running
self._active = False
self.join()
def wait(self):
# wait for handler connection to be closed, then stop the server
while not getattr(self.handler_instance, "closed", False):
time.sleep(0.001)
self.stop()
# --- internals
def run(self):
self._active = True
self.__flag.set()
while self._active and asyncore.socket_map:
self._active_lock.acquire()
asyncore.loop(timeout=0.001, count=1)
self._active_lock.release()
asyncore.close_all()
def handle_accept(self):
conn, addr = self.accept()
self.handler_instance = self.Handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
@unittest.skipUnless(threading is not None, "test needs threading module")
@unittest.skipUnless(hasattr(os, 'sendfile'), "test needs os.sendfile()")
class TestSendfile(unittest.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KB
SUPPORT_HEADERS_TRAILERS = not sys.platform.startswith("linux") and \
not sys.platform.startswith("solaris") and \
not sys.platform.startswith("sunos")
requires_headers_trailers = unittest.skipUnless(SUPPORT_HEADERS_TRAILERS,
'requires headers and trailers support')
@classmethod
def setUpClass(cls):
with open(support.TESTFN, "wb") as f:
f.write(cls.DATA)
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def setUp(self):
self.server = SendfileTestServer((support.HOST, 0))
self.server.start()
self.client = socket.socket()
self.client.connect((self.server.host, self.server.port))
self.client.settimeout(1)
# synchronize by waiting for "220 ready" response
self.client.recv(1024)
self.sockno = self.client.fileno()
self.file = open(support.TESTFN, 'rb')
self.fileno = self.file.fileno()
def tearDown(self):
self.file.close()
self.client.close()
if self.server.running:
self.server.stop()
def sendfile_wrapper(self, sock, file, offset, nbytes, headers=[], trailers=[]):
"""A higher level wrapper representing how an application is
supposed to use sendfile().
"""
while 1:
try:
if self.SUPPORT_HEADERS_TRAILERS:
return os.sendfile(sock, file, offset, nbytes, headers,
trailers)
else:
return os.sendfile(sock, file, offset, nbytes)
except OSError as err:
if err.errno == errno.ECONNRESET:
# disconnected
raise
elif err.errno in (errno.EAGAIN, errno.EBUSY):
# we have to retry send data
continue
else:
raise
def test_send_whole_file(self):
# normal send
total_sent = 0
offset = 0
nbytes = 4096
while total_sent < len(self.DATA):
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.assertEqual(offset, total_sent)
self.assertEqual(total_sent, len(self.DATA))
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(len(data), len(self.DATA))
self.assertEqual(data, self.DATA)
def test_send_at_certain_offset(self):
# start sending a file at a certain offset
total_sent = 0
offset = len(self.DATA) // 2
must_send = len(self.DATA) - offset
nbytes = 4096
while total_sent < must_send:
sent = self.sendfile_wrapper(self.sockno, self.fileno, offset, nbytes)
if sent == 0:
break
offset += sent
total_sent += sent
self.assertTrue(sent <= nbytes)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
expected = self.DATA[len(self.DATA) // 2:]
self.assertEqual(total_sent, len(expected))
self.assertEqual(len(data), len(expected))
self.assertEqual(data, expected)
def test_offset_overflow(self):
# specify an offset > file size
offset = len(self.DATA) + 4096
try:
sent = os.sendfile(self.sockno, self.fileno, offset, 4096)
except OSError as e:
# Solaris can raise EINVAL if offset >= file length, ignore.
if e.errno != errno.EINVAL:
raise
else:
self.assertEqual(sent, 0)
self.client.shutdown(socket.SHUT_RDWR)
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b'')
def test_invalid_offset(self):
with self.assertRaises(OSError) as cm:
os.sendfile(self.sockno, self.fileno, -1, 4096)
self.assertEqual(cm.exception.errno, errno.EINVAL)
def test_keywords(self):
# Keyword arguments should be supported
os.sendfile(out=self.sockno, offset=0, count=4096,
**{'in': self.fileno})
if self.SUPPORT_HEADERS_TRAILERS:
os.sendfile(self.sockno, self.fileno, offset=0, count=4096,
headers=(), trailers=(), flags=0)
# --- headers / trailers tests
@requires_headers_trailers
def test_headers(self):
total_sent = 0
sent = os.sendfile(self.sockno, self.fileno, 0, 4096,
headers=[b"x" * 512])
total_sent += sent
offset = 4096
nbytes = 4096
while 1:
sent = self.sendfile_wrapper(self.sockno, self.fileno,
offset, nbytes)
if sent == 0:
break
total_sent += sent
offset += sent
expected_data = b"x" * 512 + self.DATA
self.assertEqual(total_sent, len(expected_data))
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(hash(data), hash(expected_data))
@requires_headers_trailers
def test_trailers(self):
TESTFN2 = support.TESTFN + "2"
file_data = b"abcdef"
with open(TESTFN2, 'wb') as f:
f.write(file_data)
with open(TESTFN2, 'rb')as f:
self.addCleanup(os.remove, TESTFN2)
os.sendfile(self.sockno, f.fileno(), 0, len(file_data),
trailers=[b"1234"])
self.client.close()
self.server.wait()
data = self.server.handler_instance.get_data()
self.assertEqual(data, b"abcdef1234")
@requires_headers_trailers
@unittest.skipUnless(hasattr(os, 'SF_NODISKIO'),
'test needs os.SF_NODISKIO')
def test_flags(self):
try:
os.sendfile(self.sockno, self.fileno, 0, 4096,
flags=os.SF_NODISKIO)
except OSError as err:
if err.errno not in (errno.EBUSY, errno.EAGAIN):
raise
def supports_extended_attributes():
if not hasattr(os, "setxattr"):
return False
try:
with open(support.TESTFN, "wb") as fp:
try:
os.setxattr(fp.fileno(), b"user.test", b"")
except OSError:
return False
finally:
support.unlink(support.TESTFN)
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
return m is None or int(m.group(1)) >= 39
@unittest.skipUnless(supports_extended_attributes(),
"no non-broken extended attribute support")
class ExtendedAttributeTests(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _check_xattrs_str(self, s, getxattr, setxattr, removexattr, listxattr, **kwargs):
fn = support.TESTFN
open(fn, "wb").close()
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
init_xattr = listxattr(fn)
self.assertIsInstance(init_xattr, list)
setxattr(fn, s("user.test"), b"", **kwargs)
xattr = set(init_xattr)
xattr.add("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"")
setxattr(fn, s("user.test"), b"hello", os.XATTR_REPLACE, **kwargs)
self.assertEqual(getxattr(fn, b"user.test", **kwargs), b"hello")
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test"), b"bye", os.XATTR_CREATE, **kwargs)
self.assertEqual(cm.exception.errno, errno.EEXIST)
with self.assertRaises(OSError) as cm:
setxattr(fn, s("user.test2"), b"bye", os.XATTR_REPLACE, **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
setxattr(fn, s("user.test2"), b"foo", os.XATTR_CREATE, **kwargs)
xattr.add("user.test2")
self.assertEqual(set(listxattr(fn)), xattr)
removexattr(fn, s("user.test"), **kwargs)
with self.assertRaises(OSError) as cm:
getxattr(fn, s("user.test"), **kwargs)
self.assertEqual(cm.exception.errno, errno.ENODATA)
xattr.remove("user.test")
self.assertEqual(set(listxattr(fn)), xattr)
self.assertEqual(getxattr(fn, s("user.test2"), **kwargs), b"foo")
setxattr(fn, s("user.test"), b"a"*1024, **kwargs)
self.assertEqual(getxattr(fn, s("user.test"), **kwargs), b"a"*1024)
removexattr(fn, s("user.test"), **kwargs)
many = sorted("user.test{}".format(i) for i in range(100))
for thing in many:
setxattr(fn, thing, b"x", **kwargs)
self.assertEqual(set(listxattr(fn)), set(init_xattr) | set(many))
def _check_xattrs(self, *args, **kwargs):
def make_bytes(s):
return bytes(s, "ascii")
self._check_xattrs_str(str, *args, **kwargs)
support.unlink(support.TESTFN)
self._check_xattrs_str(make_bytes, *args, **kwargs)
def test_simple(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr)
def test_lpath(self):
self._check_xattrs(os.getxattr, os.setxattr, os.removexattr,
os.listxattr, follow_symlinks=False)
def test_fds(self):
def getxattr(path, *args):
with open(path, "rb") as fp:
return os.getxattr(fp.fileno(), *args)
def setxattr(path, *args):
with open(path, "wb") as fp:
os.setxattr(fp.fileno(), *args)
def removexattr(path, *args):
with open(path, "wb") as fp:
os.removexattr(fp.fileno(), *args)
def listxattr(path, *args):
with open(path, "rb") as fp:
return os.listxattr(fp.fileno(), *args)
self._check_xattrs(getxattr, setxattr, removexattr, listxattr)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific tests")
class Win32DeprecatedBytesAPI(unittest.TestCase):
def test_deprecated(self):
import nt
filename = os.fsencode(support.TESTFN)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
for func, *args in (
(nt._getfullpathname, filename),
(nt._isdir, filename),
(os.access, filename, os.R_OK),
(os.chdir, filename),
(os.chmod, filename, 0o777),
(os.getcwdb,),
(os.link, filename, filename),
(os.listdir, filename),
(os.lstat, filename),
(os.mkdir, filename),
(os.open, filename, os.O_RDONLY),
(os.rename, filename, filename),
(os.rmdir, filename),
(os.startfile, filename),
(os.stat, filename),
(os.unlink, filename),
(os.utime, filename),
):
self.assertRaises(DeprecationWarning, func, *args)
@support.skip_unless_symlink
def test_symlink(self):
filename = os.fsencode(support.TESTFN)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
self.assertRaises(DeprecationWarning,
os.symlink, filename, filename)
@unittest.skipUnless(hasattr(os, 'get_terminal_size'), "requires os.get_terminal_size")
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
try:
size = os.get_terminal_size()
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_stty_match(self):
"""Check if stty returns the same results
stty actually tests stdin, so get_terminal_size is invoked on
stdin explicitly. If stty succeeded, then get_terminal_size()
should work too.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
try:
actual = os.get_terminal_size(sys.__stdin__.fileno())
except OSError as e:
if sys.platform == "win32" or e.errno in (errno.EINVAL, errno.ENOTTY):
# Under win32 a generic OSError can be thrown if the
# handle cannot be retrieved
self.skipTest("failed to query terminal size")
raise
self.assertEqual(expected, actual)
class OSErrorTests(unittest.TestCase):
def setUp(self):
class Str(str):
pass
self.bytes_filenames = []
self.unicode_filenames = []
if support.TESTFN_UNENCODABLE is not None:
decoded = support.TESTFN_UNENCODABLE
else:
decoded = support.TESTFN
self.unicode_filenames.append(decoded)
self.unicode_filenames.append(Str(decoded))
if support.TESTFN_UNDECODABLE is not None:
encoded = support.TESTFN_UNDECODABLE
else:
encoded = os.fsencode(support.TESTFN)
self.bytes_filenames.append(encoded)
self.bytes_filenames.append(memoryview(encoded))
self.filenames = self.bytes_filenames + self.unicode_filenames
def test_oserror_filename(self):
funcs = [
(self.filenames, os.chdir,),
(self.filenames, os.chmod, 0o777),
(self.filenames, os.lstat,),
(self.filenames, os.open, os.O_RDONLY),
(self.filenames, os.rmdir,),
(self.filenames, os.stat,),
(self.filenames, os.unlink,),
]
if sys.platform == "win32":
funcs.extend((
(self.bytes_filenames, os.rename, b"dst"),
(self.bytes_filenames, os.replace, b"dst"),
(self.unicode_filenames, os.rename, "dst"),
(self.unicode_filenames, os.replace, "dst"),
# Issue #16414: Don't test undecodable names with listdir()
# because of a Windows bug.
#
# With the ANSI code page 932, os.listdir(b'\xe7') return an
# empty list (instead of failing), whereas os.listdir(b'\xff')
# raises a FileNotFoundError. It looks like a Windows bug:
# b'\xe7' directory does not exist, FindFirstFileA(b'\xe7')
# fails with ERROR_FILE_NOT_FOUND (2), instead of
# ERROR_PATH_NOT_FOUND (3).
(self.unicode_filenames, os.listdir,),
))
else:
funcs.extend((
(self.filenames, os.listdir,),
(self.filenames, os.rename, "dst"),
(self.filenames, os.replace, "dst"),
))
if hasattr(os, "chown"):
funcs.append((self.filenames, os.chown, 0, 0))
if hasattr(os, "lchown"):
funcs.append((self.filenames, os.lchown, 0, 0))
if hasattr(os, "truncate"):
funcs.append((self.filenames, os.truncate, 0))
if hasattr(os, "chflags"):
funcs.append((self.filenames, os.chflags, 0))
if hasattr(os, "lchflags"):
funcs.append((self.filenames, os.lchflags, 0))
if hasattr(os, "chroot"):
funcs.append((self.filenames, os.chroot,))
if hasattr(os, "link"):
if sys.platform == "win32":
funcs.append((self.bytes_filenames, os.link, b"dst"))
funcs.append((self.unicode_filenames, os.link, "dst"))
else:
funcs.append((self.filenames, os.link, "dst"))
if hasattr(os, "listxattr"):
funcs.extend((
(self.filenames, os.listxattr,),
(self.filenames, os.getxattr, "user.test"),
(self.filenames, os.setxattr, "user.test", b'user'),
(self.filenames, os.removexattr, "user.test"),
))
if hasattr(os, "lchmod"):
funcs.append((self.filenames, os.lchmod, 0o777))
if hasattr(os, "readlink"):
if sys.platform == "win32":
funcs.append((self.unicode_filenames, os.readlink,))
else:
funcs.append((self.filenames, os.readlink,))
for filenames, func, *func_args in funcs:
for name in filenames:
try:
func(name, *func_args)
except OSError as err:
self.assertIs(err.filename, name)
else:
self.fail("No exception thrown by {}".format(func))
class CPUCountTests(unittest.TestCase):
def test_cpu_count(self):
cpus = os.cpu_count()
if cpus is not None:
self.assertIsInstance(cpus, int)
self.assertGreater(cpus, 0)
else:
self.skipTest("Could not determine the number of CPUs")
class FDInheritanceTests(unittest.TestCase):
def test_get_set_inheritable(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
os.set_inheritable(fd, True)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(os.get_inheritable(fd), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
os.set_inheritable(fd, True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_open(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
self.assertEqual(os.get_inheritable(fd), False)
@unittest.skipUnless(hasattr(os, 'pipe'), "need os.pipe()")
def test_pipe(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
self.assertEqual(os.get_inheritable(rfd), False)
self.assertEqual(os.get_inheritable(wfd), False)
def test_dup(self):
fd1 = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd1)
fd2 = os.dup(fd1)
self.addCleanup(os.close, fd2)
self.assertEqual(os.get_inheritable(fd2), False)
@unittest.skipUnless(hasattr(os, 'dup2'), "need os.dup2()")
def test_dup2(self):
fd = os.open(__file__, os.O_RDONLY)
self.addCleanup(os.close, fd)
# inheritable by default
fd2 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd2)
self.assertEqual(os.get_inheritable(fd2), True)
finally:
os.close(fd2)
# force non-inheritable
fd3 = os.open(__file__, os.O_RDONLY)
try:
os.dup2(fd, fd3, inheritable=False)
self.assertEqual(os.get_inheritable(fd3), False)
finally:
os.close(fd3)
@unittest.skipUnless(hasattr(os, 'openpty'), "need os.openpty()")
def test_openpty(self):
master_fd, slave_fd = os.openpty()
self.addCleanup(os.close, master_fd)
self.addCleanup(os.close, slave_fd)
self.assertEqual(os.get_inheritable(master_fd), False)
self.assertEqual(os.get_inheritable(slave_fd), False)
@support.reap_threads
def test_main():
support.run_unittest(
FileTests,
StatAttributeTests,
UtimeTests,
EnvironTests,
WalkTests,
FwalkTests,
MakedirTests,
DevNullTests,
URandomTests,
URandomFDTests,
ExecTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests,
Pep383Tests,
Win32KillTests,
Win32ListdirTests,
Win32SymlinkTests,
NonLocalSymlinkTests,
FSEncodingTests,
DeviceEncodingTests,
PidTests,
LoginTests,
LinkTests,
TestSendfile,
ProgramPriorityTests,
ExtendedAttributeTests,
Win32DeprecatedBytesAPI,
TermsizeTests,
OSErrorTests,
RemoveDirsTests,
CPUCountTests,
FDInheritanceTests,
)
if __name__ == "__main__":
test_main()
| {
"repo_name": "moto-timo/ironpython3",
"path": "Src/StdLib/Lib/test/test_os.py",
"copies": "3",
"size": "98383",
"license": "apache-2.0",
"hash": 5681176939369943000,
"line_mean": 36.3370018975,
"line_max": 101,
"alpha_frac": 0.5746724536,
"autogenerated": false,
"ratio": 3.827238776939236,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5901911230539236,
"avg_score": null,
"num_lines": null
} |
ASC = 'asc'
DESC = 'desc'
class ColumnURL(object):
"""
Represents the url a column's data should point to.
:params
url - The reversible namespace / name combination for the
relevant view
args - A LIST of arguments to pass (even if there's just one),
all of which correspond to model fields
attrs - A DICT of arguments to set as the anchor tag attributes, NOT
including the href (of course)
Ex:
class MyColumnUrl(ColumnURL):
url = 'namespace:name'
args = ['field1', 'field2']
attr = {'class': 'btn btn-primary'}
"""
url = None
args = []
attrs = None
class Column(object):
"""
Generic table column based off object access. Built to work with models.
:params
csv_value -- Return the value to write to a csv file
field - The field to use for this column
header - What to display in the <th></th> tag
accessor - How to get the data, with respect to the parent
object. For example, if I have a field 'created'
and I only want to display the date, I could set
the annotation to 'created.date'. This is also
where you would utilize related fields, for
example, 'participant__full_name'
annotation - An annotation that needs to be added to the
queryset before the value is accessed. This
takes the form of a callable, so a lambda or
function reference is appropriate here.
default - What to display if the data is NULL.
css_class - A class name to apply to each TD in the table.
url_class - The ColumnURL class for the field.
key - Not set by the user. Basically, whatever we name the
column in the table class. This is used to help
both determine the field and header if either are
not explicitly set in the constructor. For example,
a column declared ``field = Column()`` will have
a field ``field`` and a header ``Field`` as neither
were explicitly set
editable - Whether the column should render a field from a supplied
formset.
sortable - Whether or not the field should be sortable. The lookup chain
for the sorting field is `sort_field` -> `accessor` -> `field`,
so a custom accessor is followed if applicable
sort_field - An optional field to pass that should map directly to the
field name that the django ORM expects, if you're making use of
some special case accessor to do rendering.
"""
def __init__(self, field=None, header=None, accessor=None,
annotation=None, default=None, css_class=None,
url_class=None, editable=False, sortable=False,
sort_field=None):
self.field = field
self.header = header
self.accessor = accessor
self.annotation = annotation
self.default = '---' if default is None else default
self.css_class = css_class
self.url_class = url_class
self.key = None
self.editable = editable
self.sortable = sortable
self.sort_field = sort_field
def is_linked(self):
return self.url_class is not None
def get_url(self, request=None):
return self.url_class()
def csv_value(self, object):
return Column.value(self, object)
def value(self, object):
if self.accessor is None and '__' not in self.field:
# accessor is just a plain field
object = getattr(object, self.field)
elif hasattr(self.accessor, '__call__'):
# accessor can be a callable
object = self.accessor(object)
else:
# accessor is some crazy dot or underscore notation
chain = self.accessor or self.field
arg = chain.replace('__', '.').split('.')
for a in arg:
if object is None:
return self.default
fn = getattr(object, a)
object = fn() if callable(fn) else fn
return object or self.default
def get_sort_field(self):
return self.sort_field or self.accessor or self.field
def render_sort(self, direction):
return (self.render_sort_asc() if direction == ASC
else self.render_sort_desc())
def render_sort_asc(self):
return self.get_sort_field()
def render_sort_desc(self):
return '-%s' % self.render_sort_asc()
class DictColumn(Column):
"""
Dict Column for tables based off REST objects and other dictionaries.
This is meant to be used in conjunction with MockQuerySet found in utils.py
"""
def value(self, d):
if self.accessor is None and '__' not in self.field:
# accessor is just a plain field
d = d.get(self.field, None)
elif hasattr(self.accessor, '__call__'):
# accessor can be a callable
d = self.accessor(d)
else:
# accessor is some crazy dot or underscore notation
chain = self.accessor or self.field
arg = chain.replace('__', '.').split('.')
for a in arg:
if d is None:
return self.default
fn = d.get(a, None)
d = fn() if callable(fn) else fn
return d or self.default
class FieldColumn(Column):
def __init__(self, *args, **kwargs):
raise Exception("FieldColumn is no longer used."
" Use Column(editable=True)")
| {
"repo_name": "SheepDogInc/sheepdog_tables",
"path": "sheepdog_tables/column.py",
"copies": "1",
"size": "5675",
"license": "bsd-3-clause",
"hash": -7645091137704419000,
"line_mean": 33.6036585366,
"line_max": 79,
"alpha_frac": 0.5874889868,
"autogenerated": false,
"ratio": 4.486166007905139,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5573654994705138,
"avg_score": null,
"num_lines": null
} |
"""A scatter plot example using Highcharts"""
from itertools import chain
import numpy as np
from Orange.data import Table
from Orange.widgets import gui, settings, widget, highcharts
class Scatterplot(highcharts.Highchart):
"""
Scatterplot extends Highchart and just defines some sane defaults:
* enables scroll-wheel zooming,
* enables rectangle (+ individual point) selection,
* sets the chart type to 'scatter' (could also be 'bubble' or as
appropriate; Se Highcharts JS docs)
* sets the selection callback. The callback is passed a list (array)
of indices of selected points for each data series the chart knows
about.
"""
def __init__(self, selection_callback, **kwargs):
super().__init__(enable_zoom=True,
enable_select='xy+',
chart_type='scatter',
selection_callback=selection_callback,
**kwargs)
class OWScatterPlot(widget.OWWidget):
"""Example scatter plot visualization using Highcharts"""
name = 'Simple Scatter Plot'
description = 'An example scatter plot visualization using Highcharts.'
icon = "icons/ScatterPlot.svg"
inputs = [("Data", Table, "set_data")]
outputs = [("Selected Data", Table)]
attr_x = settings.Setting('')
attr_y = settings.Setting('')
graph_name = 'scatter'
def __init__(self):
super().__init__()
self.data = None
self.indices = None
self.n_selected = 0
self.series_rows = []
# Create the UI controls for selecting axes attributes
box = gui.vBox(self.controlArea, 'Axes')
self.cbx = gui.comboBox(box, self, 'attr_x',
label='X:',
orientation='horizontal',
callback=self.replot,
sendSelectedValue=True)
self.cby = gui.comboBox(box, self, 'attr_y',
label='Y:',
orientation='horizontal',
callback=self.replot,
sendSelectedValue=True)
gui.label(self.controlArea, self, '%(n_selected)d points are selected',
box='Info')
gui.rubber(self.controlArea)
# Create an instance of Scatter plot. Initial Highcharts configuration
# can be passed as '_'-delimited keyword arguments. See Highcharts
# class docstrings and Highcharts API documentation for more info and
# usage examples.
self.scatter = Scatterplot(selection_callback=self.on_selection,
xAxis_gridLineWidth=0,
yAxis_gridLineWidth=0,
title_text='Scatterplot example',
tooltip_shared=False,
# In development, we can enable debug mode
# and get right-click-inspect and related
# console utils available:
debug=True)
# Just render an empty chart so it shows a nice 'No data to display'
# warning
self.scatter.chart()
self.mainArea.layout().addWidget(self.scatter)
def set_data(self, data):
self.data = data
# When the widget receives new data, we need to:
# ... reset the combo boxes ...
def init_combos():
self.cbx.clear()
self.cby.clear()
for var in data.domain if data is not None else []:
if var.is_primitive():
self.cbx.addItem(gui.attributeIconDict[var], var.name)
self.cby.addItem(gui.attributeIconDict[var], var.name)
init_combos()
# If the data is actually None, we should just
# ... reset the scatter plot, selected indices ...
if data is None:
self.scatter.clear()
self.indices = None
self.commit()
return
# ... else, select the first two attributes and replot the scatter.
if len(data.domain) >= 2:
self.attr_x = self.cbx.itemText(0)
self.attr_y = self.cbx.itemText(1)
self.replot()
def replot(self):
# Brace yourself ...
if self.data is None or not self.attr_x or not self.attr_y:
# Sanity checks failed; nothing to do
return
data = self.data
attr_x, attr_y = data.domain[self.attr_x], data.domain[self.attr_y]
# Highcharts widget accepts an options dict. This dict is converted
# to options Object Highcharts JS uses in its examples. All keys are
# **exactly the same** as for Highcharts JS.
options = dict(series=[])
# For our scatter plot, we need data in a standard numpy 2D array,
# with x and y values in the two columns ...
cols = []
for attr in (attr_x, attr_y):
subset = data[:, attr]
cols.append(subset.Y if subset.Y.size else subset.X)
# ... that's our X here
X = np.column_stack(cols)
# Highcharts point selection returns indexes of selected points per
# each input series. Thus we should maintain a "map" of such indices
# into the original data table.
self.series_rows = []
# If data has a discrete class, we want to color nodes by it, and we
# do so by constructing a separate instance series for each class
# value. This is one way of doing it. If you know of a better one,
# you must be so lucky and I envy you!!
if data.domain.has_discrete_class:
y = data[:, data.domain.class_var].Y.ravel()
for yval, yname in enumerate(data.domain.class_var.values):
rows = (y == yval).nonzero()[0]
self.series_rows.append(rows)
options['series'].append(dict(data=X[rows], name=yname))
# If data doesn't have a discrete class, just use the whole data as
# a single series (colored with default color — no gradient fill in
# this example).
else:
self.series_rows.append(np.arange(len(X)))
options['series'].append(dict(data=X, showInLegend=False))
# Besides the options dict, Highcharts can also be passed keyword
# parameters, where each parameter is split on underscores in
# simulated object hierarchy. This works:
kwargs = dict(
xAxis_title_text=attr_x.name,
yAxis_title_text=attr_y.name,
tooltip_headerFormat=(
'<span style="color:{point.color}">\u25CF</span> '
'{series.name} <br/>'),
tooltip_pointFormat=(
'<b>{attr_x.name}:</b> {{point.x}}<br/>'
'<b>{attr_y.name}:</b> {{point.y}}<br/>').format_map(locals()))
# If any of selected attributes is discrete, we correctly scatter it
# as a categorical
if attr_x.is_discrete:
kwargs['xAxis_categories'] = attr_x.values
if attr_y.is_discrete:
kwargs['yAxis_categories'] = attr_y.values
# That's it, we can scatter our scatter by calling its chart method
# with the parameters we'd constructed
self.scatter.chart(options, **kwargs)
def on_selection(self, indices):
# When points on the scatter plot are selected, this method is called.
# Variable indices contains indices of selected points **per each
# input series** (series in the options object above).
# Luckily, we kept original data indices that form each of the
# series ...
self.indices = list(chain.from_iterable(
self.series_rows[i][selected]
for i, selected in enumerate(indices)
))
# Let's give the user some feedback
self.n_selected = len(self.indices)
# And that's it, we can commit the output!
self.commit()
def commit(self):
self.send('Selected Data',
self.data[self.indices] if self.indices else None)
def send_report(self):
self.report_data('Data', self.data)
self.report_raw('Scatter plot', self.scatter.svg())
def main():
from PyQt4.QtGui import QApplication
app = QApplication([])
ow = OWScatterPlot()
data = Table("iris")
ow.set_data(data)
ow.show()
app.exec_()
if __name__ == "__main__":
main()
| {
"repo_name": "qPCR4vir/orange3",
"path": "doc/development/source/code/owScatterplot.py",
"copies": "5",
"size": "8606",
"license": "bsd-2-clause",
"hash": -8616552769513734000,
"line_mean": 37.7567567568,
"line_max": 79,
"alpha_frac": 0.5678754068,
"autogenerated": false,
"ratio": 4.336693548387097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7404568955187097,
"avg_score": null,
"num_lines": null
} |
"""A scenario featuring parametrized tests.
"""
import pytest
def test_multiple(ctestdir):
ctestdir.makepyfile("""
import pytest
_md = pytest.mark.dependency
@pytest.mark.parametrize("x,y", [
pytest.param(0, 0, marks=_md(name="a1")),
pytest.param(0, 1, marks=_md(name="a2")),
pytest.param(1, 0, marks=_md(name="a3")),
pytest.param(1, 1, marks=_md(name="a4"))
])
def test_a(x,y):
assert x==0 or y==0
@pytest.mark.parametrize("u,v", [
pytest.param(1, 2, marks=_md(name="b1", depends=["a1", "a2"])),
pytest.param(1, 3, marks=_md(name="b2", depends=["a1", "a3"])),
pytest.param(1, 4, marks=_md(name="b3", depends=["a1", "a4"])),
pytest.param(2, 3, marks=_md(name="b4", depends=["a2", "a3"])),
pytest.param(2, 4, marks=_md(name="b5", depends=["a2", "a4"])),
pytest.param(3, 4, marks=_md(name="b6", depends=["a3", "a4"]))
])
def test_b(u,v):
pass
@pytest.mark.parametrize("w", [
pytest.param(1, marks=_md(name="c1", depends=["b1", "b3", "b5"])),
pytest.param(2, marks=_md(name="c2", depends=["b1", "b3", "b6"])),
pytest.param(3, marks=_md(name="c3", depends=["b1", "b2", "b4"]))
])
def test_c(w):
pass
""")
result = ctestdir.runpytest("--verbose")
result.assert_outcomes(passed=7, skipped=5, failed=1)
result.stdout.re_match_lines(r"""
.*::test_a\[0-0\] PASSED
.*::test_a\[0-1\] PASSED
.*::test_a\[1-0\] PASSED
.*::test_a\[1-1\] FAILED
.*::test_b\[1-2\] PASSED
.*::test_b\[1-3\] PASSED
.*::test_b\[1-4\] SKIPPED(?:\s+\(.*\))?
.*::test_b\[2-3\] PASSED
.*::test_b\[2-4\] SKIPPED(?:\s+\(.*\))?
.*::test_b\[3-4\] SKIPPED(?:\s+\(.*\))?
.*::test_c\[1\] SKIPPED(?:\s+\(.*\))?
.*::test_c\[2\] SKIPPED(?:\s+\(.*\))?
.*::test_c\[3\] PASSED
""")
| {
"repo_name": "RKrahl/pytest-dependency",
"path": "tests/test_03_param.py",
"copies": "1",
"size": "2044",
"license": "apache-2.0",
"hash": 8803762835841109000,
"line_mean": 34.8596491228,
"line_max": 78,
"alpha_frac": 0.4657534247,
"autogenerated": false,
"ratio": 2.9367816091954024,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39025350338954023,
"avg_score": null,
"num_lines": null
} |
"""Ascending recurrence algorithms for spherical Bessel functions.
The recurrence relation used is http://dlmf.nist.gov/10.51.E1 .
"""
import numpy as np
from scipy.misc import factorial, factorial2
def recurrence_pattern(n, z, f0, f1):
"""Ascending recurrence for jn, yn, h1n, h2n."""
s0 = f0
if n == 0:
return s0
s1 = f1
if n == 1:
return s1
for idx in xrange(n - 1):
sn = (2*idx + 3)/z*s1 - s0
s0 = s1
s1 = sn
if np.isinf(sn):
return sn
return sn
def modified_recurrence_pattern(n, z, f0, f1):
"""Ascending recurrence for i1n, i2n, (-1)^n * kn."""
s0 = f0
if n == 0:
return s0
s1 = f1
if n == 1:
return s1
for idx in xrange(n - 1):
sn = -(2*idx + 3)/z*s1 + s0
s0 = s1
s1 = sn
if np.isinf(sn):
return sn
return sn
def v_recurrence_pattern(n, z, f0, f1):
# This seems correct but produces seg faults.
out = np.empty((n + z).shape)
s0 = np.ones(shape=out.shape)*f0
s1 = np.ones(shape=out.shape)*f1
out[n == 0] = s0[n == 0]
out[n == 1] = s1[n == 1]
for idx in xrange(int(np.max(n)) - 1):
sn = (2*idx + 3)/z*s1 - s0
# Would an "if idx + 2 in n" speed this up?
out[n == idx + 2] = sn[n == idx + 2]
s0 = s1
s1 = sn
return out
@np.vectorize
def sph_jn(n, z):
return recurrence_pattern(n, z,
np.sin(z)/z,
np.sin(z)/z**2 - np.cos(z)/z)
@np.vectorize
def sph_yn(n, z):
return recurrence_pattern(n, z,
-np.cos(z)/z,
-np.cos(z)/z**2 - np.sin(z)/z)
@np.vectorize
def sph_i1n(n, z):
return modified_recurrence_pattern(n, z,
np.sinh(z)/z,
-np.sinh(z)/z**2 + np.cosh(z)/z)
@np.vectorize
def sph_i2n(n, z):
return modified_recurrence_pattern(n, z,
np.cosh(z)/z,
-np.cosh(z)/z**2 + np.sinh(z)/z)
@np.vectorize
def sph_kn(n, z):
return (-1)**n * modified_recurrence_pattern(n, z,
np.pi/2*np.exp(-z)/z,
np.pi/2*np.exp(-z)*(1/z + 1/z**2))
@np.vectorize
def sph_h1n(n, z):
return recurrence_pattern(n, z, -1j*np.exp(1j*z)/z,
-(1j/z + 1)*np.exp(1j*z)/z)
@np.vectorize
def sph_h2n(n, z):
return recurrence_pattern(n, z, 1j*np.exp(-1j*z)/z,
(1j/z - 1)*np.exp(-1j*z)/z)
| {
"repo_name": "tpudlik/sbf",
"path": "algos/a_recur.py",
"copies": "1",
"size": "2634",
"license": "mit",
"hash": 2211320959727777000,
"line_mean": 26.1546391753,
"line_max": 71,
"alpha_frac": 0.4677296887,
"autogenerated": false,
"ratio": 2.7961783439490446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3763908032649044,
"avg_score": null,
"num_lines": null
} |
"""A `SceneEditor` for the `SceneModel`.
"""
# Authors: Prabhu Ramachandran <prabhu [at] aero.iitb.ac.in>
# Robert Kern <robert.kern [at] gmail.com>
#
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
import os
from pyface.qt import QtGui
# Enthought library imports.
from traits.api import Any, Bool, Callable
from traitsui.qt4.editor import Editor
from traitsui.basic_editor_factory import BasicEditorFactory
from decorated_scene import DecoratedScene
#####################################################################
# `_SceneEditor` class
#####################################################################
class _SceneEditor(Editor):
""" An editor for SceneModels.
"""
# The editor is scrollable, so override the default.
scrollable = Bool(True)
# Internal GUI traits.
_scene = Any()
#### Public 'Editor' interface #############################################
def init(self, parent):
""" Finishes initializing the editor by creating the underlying toolkit
widget.
"""
factory = self.factory
self.control = QtGui.QWidget()
lay = QtGui.QVBoxLayout(self.control)
lay.setContentsMargins(0, 0, 0, 0)
assert self.value.scene_editor is None, \
"The SceneModel may only have one active editor!"
self._create_scene()
self.value.activated = True
def update_editor(self):
""" Updates the editor when the object trait changes external to the
editor.
"""
# Everything should really be handled elsewhere in trait notifications.
# Just pass here.
pass
def dispose(self):
""" Disposes of the contents of an editor.
"""
# Remove notifications.
self.value.closing = True
self.value.scene_editor = None
self._setup_scene_notifications(remove=True)
# Remove the current scene.
if self._scene is not None:
self._scene.close()
self._scene = None
# This will destroy self.control and all of its children, including the
# scene's control.
super(_SceneEditor, self).dispose()
#### Private '_SceneEditor' interface ##################################
def _create_scene(self):
""" Create the TVTK scene widget.
"""
factory = self.factory
self._scene = factory.scene_class(self.control)
scene = self._scene
self.value.scene_editor = scene
# Disable rendering on the scene until we're finished.
scene.disable_render = True
# Add all of the actors in the current actor map.
for obj, actors in self.value.actor_map.items():
self._add_actors_widgets(actors)
# Add all of the actors in the current actor map.
self._add_actors_widgets(self.value.actor_list)
# Set up Traits notifications.
self._setup_scene_notifications()
# Re-enable rendering.
scene.disable_render = False
self.control.layout().addWidget(scene.control)
# Force a render.
scene.render()
def _setup_scene_notifications(self, remove=False):
""" Set up or remove all of the Trait notifications that control the
scene widget.
"""
traits_to_sync = ['foreground', 'anti_aliasing_frames',
'stereo', 'background', 'off_screen_rendering',
'polygon_smoothing', 'jpeg_progressive',
'point_smoothing', 'busy', 'disable_render',
'magnification', 'jpeg_quality',
'parallel_projection', 'line_smoothing']
model = self.value
scene = self._scene
if not remove:
scene.set(**model.get(traits_to_sync))
for trait in traits_to_sync:
scene.sync_trait(trait, model, mutual=True, remove=remove)
model.on_trait_change(
scene.render,
name='do_render',
remove=remove,
)
model.on_trait_change(
self._actors_changed,
name='actor_map_items',
remove=remove,
)
model.on_trait_change(
self._actor_map_changed,
name='actor_map',
remove=remove,
)
model.on_trait_change(
self._actor_list_items_changed,
name='actor_list_items',
remove=remove,
)
model.on_trait_change(
self._actor_list_changed,
name='actor_list',
remove=remove,
)
def _actors_changed(self, event):
""" Handle the event of the actors in the actor map changing.
"""
scene = self._scene
# Temporarily turn off rendering. We (re)store the old value of
# disable_render because it may already be True.
old_disable_render = scene.disable_render
scene.disable_render = True
try:
for obj, actors in event.removed.items():
self._remove_actors_widgets(actors)
for obj, actors in event.added.items():
self._add_actors_widgets(actors)
for obj, actors in event.changed.items():
# The actors in the event are the old ones. Grab the new ones
# from the actor map itself.
self._remove_actors_widgets(actors)
self._add_actors_widgets(self.value.actor_map[obj])
finally:
scene.disable_render = old_disable_render
scene.render()
def _actor_map_changed(self, object, name, old, new):
""" Handle the case when the entire actor map is set to something else.
"""
scene = self._scene
# Temporarily turn off rendering. We (re)store the old value of
# disable_render because it may already be True.
old_disable_render = scene.disable_render
scene.disable_render = True
try:
for obj, actors in old.items():
self._remove_actors_widgets(actors)
for obj, actors in new.items():
self._add_actors_widgets(actors)
finally:
scene.disable_render = old_disable_render
scene.render()
def _actor_list_items_changed(self, event):
self._actor_list_changed(self.value, 'actor_list', event.removed,
event.added)
def _actor_list_changed(self, object, name, old, new):
""" Handle the event of the actors in the actor map changing.
"""
scene = self._scene
# Temporarily turn off rendering. We (re)store the old value of
# disable_render because it may already be True.
old_disable_render = scene.disable_render
scene.disable_render = True
try:
self._remove_actors_widgets(old)
self._add_actors_widgets(new)
finally:
scene.disable_render = old_disable_render
scene.render()
def _separate_actors_widgets(self, actors_widgets):
"""Given a sequence (or single) of actors or widgets, this returns a
list of just the actors and another of just the widgets.
"""
if not hasattr(actors_widgets, '__getitem__'):
actors_widgets = [actors_widgets]
actors = []
widgets = []
for actor in actors_widgets:
if actor.is_a('vtk3DWidget') or actor.is_a('vtkInteractorObserver'):
widgets.append(actor)
else:
actors.append(actor)
return actors, widgets
def _add_actors_widgets(self, actors_widgets):
"""Add actors and widgets to scene."""
scene = self._scene
actors, widgets = self._separate_actors_widgets(actors_widgets)
scene.add_actors(actors)
enabled_info = self.value.enabled_info
for widget in widgets:
scene.add_widgets(widget, enabled_info.get(widget, True))
def _remove_actors_widgets(self, actors_widgets):
"""Remove actors and widgets from scene."""
scene = self._scene
actors, widgets = self._separate_actors_widgets(actors_widgets)
scene.remove_actors(actors)
scene.remove_widgets(widgets)
#####################################################################
# `SceneEditor` class
#####################################################################
class SceneEditor(BasicEditorFactory):
""" A TraitsUI editor factory for SceneModel instances.
"""
# The class of the editor object to be constructed.
klass = _SceneEditor
# The class or factory function for creating the actual scene object.
scene_class = Callable(DecoratedScene)
#### EOF #######################################################################
| {
"repo_name": "liulion/mayavi",
"path": "tvtk/pyface/ui/qt4/scene_editor.py",
"copies": "2",
"size": "8877",
"license": "bsd-3-clause",
"hash": -2363983492995635000,
"line_mean": 33.1423076923,
"line_max": 80,
"alpha_frac": 0.5615635913,
"autogenerated": false,
"ratio": 4.390207715133531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008350747481182263,
"num_lines": 260
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.