text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import datetime
from django.utils.translation import ugettext as _
from django.db.models import Sum, Count
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from .models import Userdata, Spiel, Tipp, Runde
from misc.tools import rtr
color_dict = { Tipp.NULL: 'EEEEEE', Tipp.TENDENZ: 'CCFFCC', Tipp.TORDIFFERENZ: '99FF99', Tipp.ERGEBNIS: '55FF55' }
@login_required # FIXME msg
def liste(request):
t = datetime.datetime.now() + datetime.timedelta(hours=1)
spiele = list(Spiel.objects.filter(runde__freigabe__gt=0).filter(datum__gte=t).order_by('datum', 'mannschaft1'))
spiele_alt = list(Spiel.objects.filter(runde__freigabe__gt=0).filter(datum__lt=t).order_by('-datum', 'mannschaft1'))
if request.POST:
for s in spiele:
tipps = Tipp.objects.filter(user=request.user.id).filter(spiel=s.id)
if tipps:
s.tt1, s.tt2 = tipps[0].tore1, tipps[0].tore2
s.tc = '99FF99'
else:
s.tc = 'EEEEEE'
if not s.tippbar(): continue
try:
t1, t2 = request.POST['s'+str(s.id)+'t1'], request.POST['s'+str(s.id)+'t2']
assert t1 != '' or t2 != ''
except:
continue
try:
t1 = int(t1)
assert t1 >= 0
t2 = int(t2)
assert t2 >= 0
except:
s.tc = 'FF9999'
continue
s.tt1, s.tt2 = t1, t2
s.tc = '99FF99'
if tipps:
if tipps[0].tore1 != t1 or tipps[0].tore2 != t2:
tipp = tipps[0]
tipp.tore1, tipp.tore2 = t1, t2
tipp.save()
else:
tipp = Tipp(spiel=s, user=request.user, tore1=t1, tore2=t2, punkte=0)
tipp.save()
else:
for s in spiele:
s.tc = 'EEEEEE'
tipps = Tipp.objects.filter(user=request.user.id).filter(spiel=s.id)
if tipps:
s.tt1 = tipps[0].tore1
s.tt2 = tipps[0].tore2
s.tc = '99FF99'
for s in spiele_alt:
s.tc = 'CCCCCC'
tipps = Tipp.objects.filter(user=request.user.id).filter(spiel=s.id)
if tipps:
s.tt1 = tipps[0].tore1
s.tt2 = tipps[0].tore2
s.tc = color_dict[tipps[0].punkte]
return rtr(request, 'liste', spiele=spiele, spiele_alt=spiele_alt, current_time=str(datetime.datetime.now()))
@login_required
def andere(request, sortby=None):
return stats(request, sortby, 10)
@login_required
def anderek(request, sortby=None):
return stats(request, sortby)
@login_required
def stats(request, sortby=None, limit=None):
ud = Userdata.objects.get(user=request.user)
t = datetime.datetime.now() + datetime.timedelta(hours=1)
sp = Spiel.objects.filter(runde__freigabe__exact=2).filter(datum__lt=t).order_by('-datum')
if limit is None:
spiele = sp
else:
spiele = sp[:limit]
slist = []
for s in spiele:
if s.tippbar(): continue
slist.append(s)
if sortby == 'byteam':
ulist = list(Userdata.objects.filter(team=ud.team).order_by('platz'))
ulist.extend(list(Userdata.objects.exclude(team=ud.team).order_by('team', 'platz')))
else:
if ud.friends.count() > 1: # always a friend to oneself
ulist = list(ud.friends.all().order_by('platz'))
ulist.extend(list(Userdata.objects.exclude(id__in=ud.friends.all()).order_by('platz')))
else:
ulist = list(Userdata.objects.all().order_by('platz'))
team_offset = 0
team = ''
for ctr, u in enumerate(ulist):
if sortby == 'byteam':
if u.team != team:
# at this point in ulist new team starts
team = u.team
team_offset = ctr
u.team_pl = ctr + 1 - team_offset
u.li = []
for s in slist:
tipps = Tipp.objects.filter(user=u.user, spiel=s.id)
if tipps:
t = tipps[0]
u.li.append((str(t.tore1)+':'+str(t.tore2), color_dict[t.punkte]))
else:
u.li.append(('-', 'CCCCCC'))
return rtr(request, 'andere', slist=slist, ud=ud, users=ulist, limit=limit,
zeige_plaetze=(ud.platz != 0 and sortby!='byteam'))
@login_required
@csrf_exempt
def toggle_friend(request, id):
ud = Userdata.objects.get(user=request.user)
fr = Userdata.objects.get(id=id)
if ud != fr:
if fr in ud.friends.all():
ud.friends.remove(fr)
else:
ud.friends.add(fr)
return HttpResponse({}, content_type="application/json")
@login_required
def stat_teams(request):
teams = []
for team, teamname in settings.TEAM_CHOICES:
num_players = Userdata.objects.filter(team=team).count()
if num_players:
sum_pts = Userdata.objects.filter(team=team).aggregate(Sum('punkte'))['punkte__sum']
avg = sum_pts*1.0/num_players if num_players else 0
teams.append(( avg, teamname, num_players, sum_pts, '%.2f' % avg))
teams.sort()
teams.reverse()
return rtr(request, 'stat_teams', teams=teams)
|
{
"content_hash": "d922da97401565d5394ab44646aa4c0e",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 120,
"avg_line_length": 37.1764705882353,
"alnum_prop": 0.5777074542897328,
"repo_name": "ugoertz/tippspiel",
"id": "ef688f4fdcfe57a1a9985a9b65f29d95d35615f7",
"size": "5688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tipps/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14437"
},
{
"name": "Python",
"bytes": "40934"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TaskStatus'
db.create_table(u'profiles_taskstatus', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('task', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('t_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('error', self.gf('django.db.models.fields.BooleanField')(default=False)),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal(u'profiles', ['TaskStatus'])
def backwards(self, orm):
# Deleting model 'TaskStatus'
db.delete_table(u'profiles_taskstatus')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 10, 14, 10, 39, 53128)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 10, 14, 10, 39, 52726)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
{
"content_hash": "4f914e37cc98b299e885a3bfccc3c017",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 194,
"avg_line_length": 81.97297297297297,
"alnum_prop": 0.5553907022749752,
"repo_name": "ProvidencePlan/Profiles",
"id": "dc085382b5408d370e09666bbd90cf733fc16585",
"size": "18216",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "communityprofiles/profiles/oldmigrations/0050_auto__add_taskstatus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132319"
},
{
"name": "HTML",
"bytes": "146060"
},
{
"name": "JavaScript",
"bytes": "188204"
},
{
"name": "Python",
"bytes": "2668150"
},
{
"name": "Ruby",
"bytes": "4727"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
}
|
import deeplearn
# Rather than showing numbers show the species names
species = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
# Load the neural network
deeplearn.load("result.nn")
# These dimensions are similar to those which exist in the
# data set, but are adjusted slightly so that the network
# has never seen these exact values before
print "Expected: " + species[0]
deeplearn.test([5.44, 3.436, 1.667, 0.214])
print "Returned: " + species[deeplearn.getClass()]
print "\nExpected: " + species[1]
deeplearn.test([6.14, 2.75, 4.04, 1.32])
print "Returned: " + species[deeplearn.getClass()]
print "\nExpected: " + species[2]
deeplearn.test([6.71, 3.14, 5.92, 2.29])
print "Returned: " + species[deeplearn.getClass()]
|
{
"content_hash": "ed618e52a657dc543bea512c98df2adb",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 62,
"avg_line_length": 29.36,
"alnum_prop": 0.7125340599455041,
"repo_name": "xypan1232/libdeep-python",
"id": "4094e48ce92591d43a21745a8d94cde5a0b1e579",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/iris/test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "25165"
},
{
"name": "Makefile",
"bytes": "378"
},
{
"name": "Python",
"bytes": "1801"
}
],
"symlink_target": ""
}
|
"""
Installation script:
To release a new version to PyPi:
- Ensure the version is correctly set in accounting.__init__.py
- Run:
`python setup.py sdist`
`twine upload dist/*`
"""
from setuptools import setup, find_packages
import os
import sys
from accounting import get_version
PROJECT_DIR = os.path.dirname(__file__)
setup(name='django-accounting',
version=get_version().replace(' ', '-'),
url='https://github.com/dulacp/django-accounting',
author="Pierre Dulac",
author_email="dulacpi@gmail.com",
description="Accounting made accessible for small businesses and "
"sole proprietorships through a simple Django project",
long_description=open(os.path.join(PROJECT_DIR, 'README.rst')).read(),
keywords="Accounting, Django, Money, Cashflow",
license='MIT',
platforms=['linux'],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
install_requires=[
'django>=1.8.0,<1.9',
# Used to render the forms
'django-bootstrap3==4.11.0',
# Used to improve the forms
'django_select2==5.8.10',
# Used for date/time form fields
'django-datetime-widget>=0.9,<1.0',
# Define beautiful tags
'django-classy-tags==0.5.1',
# Internationalization
'Babel>=1.0,<1.4',
# Date utilities
'python-dateutil>=2.2,<2.3',
# Select2
'django-select2>=4.3,<4.4',
],
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Application Frameworks']
)
|
{
"content_hash": "d99463f01f8646f336949036db0ea794",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 35.131147540983605,
"alnum_prop": 0.592627158189454,
"repo_name": "dulaccc/django-accounting",
"id": "a0f9302a5d5fea02d70229ab6e6cb70f39978aa1",
"size": "2165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2604"
},
{
"name": "HTML",
"bytes": "61744"
},
{
"name": "JavaScript",
"bytes": "11766"
},
{
"name": "Makefile",
"bytes": "982"
},
{
"name": "Python",
"bytes": "181537"
},
{
"name": "Shell",
"bytes": "913"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import abc
from collections.abc import Callable, Sequence
from logging import getLogger
from typing import Optional
import numpy as np
from numpy.typing import ArrayLike
from pandas import DataFrame, Series
from scipy.interpolate import interp1d
from scipy.special import wofz
import riip.dataframe
from .formulas import formulas_cython_dict, formulas_numpy_dict
logger = getLogger(__package__)
def _ensure_positive_imag(x: ArrayLike) -> np.ndarray:
"""If the imaginary part of x is negative, change it to zero."""
_x = np.asarray(x, dtype=np.complex128)
return _x.real + 1j * _x.imag * (_x.imag > 0)
class AbstractMaterial(metaclass=abc.ABCMeta):
"""Abstract base class for materials"""
@abc.abstractmethod
def __init__(self, *args) -> None:
self.label = ""
@abc.abstractmethod
def n(self, wls: ArrayLike) -> np.ndarray:
"""Retrun refractive index at wavelength wls [μm]"""
return np.asarray(wls, dtype=np.float64)
@abc.abstractmethod
def k(self, wls: ArrayLike) -> np.ndarray:
"""Return extinction coefficient at wavelength wls [μm]"""
return np.asarray(wls, dtype=np.float64)
@abc.abstractmethod
def eps(self, wls: ArrayLike) -> np.ndarray:
"""Return permittivity at wavelength wls [μm]"""
return np.asarray(wls, dtype=np.float64)
def bound_check(self, wl: ArrayLike, nk: str) -> None:
pass
def plot(
self,
wls: Sequence | np.ndarray,
comp: str = "n",
fmt1: Optional[str] = "-",
fmt2: Optional[str] = "--",
**kwargs,
) -> None:
"""Plot refractive index, extinction coefficient or permittivity.
Args:
wls (Sequence | np.ndarray): Wavelength coordinates to be plotted [μm].
comp (str): 'n', 'k' or 'eps'
fmt1 (Optional[str]): Plot format for n and Re(eps).
fmt2 (Optional[str]): Plot format for k and Im(eps).
"""
import matplotlib.pyplot as plt
kwargs.setdefault("lw", 4)
kwargs.setdefault("ms", 8)
if comp == "n":
ns = self.n(wls)
plt.plot(wls, ns, fmt1, label=self.label, **kwargs)
plt.ylabel(r"$n$")
elif comp == "k":
ks = self.k(wls)
plt.plot(wls, ks, fmt2, label=self.label, **kwargs)
plt.ylabel(r"$k$")
elif comp == "eps":
eps = self.eps(wls)
(line,) = plt.plot(wls, eps.real, fmt1, label=self.label, **kwargs)
color = line.get_color()
plt.plot(wls, eps.imag, fmt2, color=color, **kwargs)
plt.ylabel(r"$\varepsilon$")
plt.xlabel(r"$\lambda$ $[\mathrm{\mu m}]$")
plt.legend()
class RiiMaterial(AbstractMaterial):
"""This class provide dispersion formula defined in refractiveindex.info database.
Attributes:
catalog: Catalog of data.
raw_data: The experimental data set.
"""
def __init__(
self, id: int, catalog: DataFrame, raw_data: DataFrame, bound_check: bool = True
) -> None:
"""Initialize RiiMaterial
Args:
id (int): ID number
catalog (DataFrame): catalog of Rii_Pandas DataFrame.
raw_data (DataFrame): raw_data of Rii_Pandas DataFrame.
bound_check (bool): True if bound check should be done. Defaults to True.
"""
self.catalog: Series = catalog.loc[id]
# raw_data becomes a Series if it has only 1 row.
self.raw_data: Series | DataFrame = raw_data.loc[id]
self.f = int(self.catalog["formula"])
if self.f > 0:
self.cs = self.raw_data["c"].to_numpy()[:24]
self.formula = lambda x: formulas_numpy_dict[self.f](x, self.cs)
self.label = f"{self.catalog['book']} {self.catalog['page']}"
self.bound_check_flag = bound_check
self.__n = self._func_n()
self.__k = self._func_k()
def bound_check(self, wl: ArrayLike, nk: str) -> None:
"""Raise ValueError if wl is out of bounds"""
_x = np.atleast_1d(wl)
if not self.bound_check_flag:
return
if nk == "n":
wl_min = self.catalog["wl_n_min"]
wl_max = self.catalog["wl_n_max"]
elif nk == "k":
wl_min = self.catalog["wl_k_min"]
wl_max = self.catalog["wl_k_max"]
elif nk == "nk":
wl_min = self.catalog["wl_min"]
wl_max = self.catalog["wl_max"]
else:
raise ValueError("nk must be 'n', 'k', or 'nk'.")
x_min = min(_x)
x_max = max(_x)
if x_min < wl_min or x_max > wl_max:
raise ValueError(
f"Wavelength [{x_min} {x_max}] is out of bounds [{wl_min} {wl_max}][um]"
)
def _func_n(self) -> Callable:
tabulated = self.catalog["tabulated"]
if self.f > 0:
if self.f <= 20:
return self.formula
else:
return lambda x: np.sqrt(_ensure_positive_imag(self.formula(x))).real
elif "n" in tabulated:
num_n = self.catalog["num_n"]
if num_n == 1:
return lambda x: self.raw_data["n"] * np.ones_like(x)
elif num_n < 4:
val = np.mean(self.raw_data["n"])
return lambda x: val * np.ones_like(x)
else:
wls = self.raw_data["wl_n"].to_numpy()[:num_n]
ns = self.raw_data["n"].to_numpy()[:num_n]
if wls[0] < wls[-1]:
fill_value = (ns[0], ns[-1])
else:
fill_value = (ns[-1], ns[0])
return interp1d(
wls,
ns,
kind="cubic",
bounds_error=False,
fill_value=fill_value,
assume_sorted=True,
)
else:
logger.warning("Refractive index is missing and set to zero.")
return lambda x: np.zeros_like(x)
def _func_k(self) -> Callable:
tabulated = self.catalog["tabulated"]
if "k" in tabulated:
num_k = self.catalog["num_k"]
if num_k == 1:
return lambda x: self.raw_data["k"] * np.ones_like(x)
elif num_k < 4:
val = np.mean(self.raw_data["k"])
return lambda x: val * np.ones_like(x)
else:
wls = self.raw_data["wl_k"].to_numpy()[:num_k]
ks = self.raw_data["k"].to_numpy()[:num_k]
if wls[0] < wls[-1]:
fill_value = (ks[0], ks[-1])
else:
fill_value = (ks[-1], ks[0])
return interp1d(
wls,
ks,
kind="cubic",
bounds_error=False,
fill_value=fill_value,
assume_sorted=True,
)
else:
formula = int(self.catalog["formula"])
if formula > 20:
return lambda x: np.sqrt(_ensure_positive_imag(self.formula(x))).imag
else:
logger.warning("Extinction coefficient is missing and set to zero.")
return lambda x: np.zeros_like(x)
def n(self, wl: ArrayLike) -> np.ndarray:
"""Return refractive index at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
_wl = np.asarray(wl)
self.bound_check(_wl, "n")
return self.__n(_wl)
def k(self, wl: ArrayLike) -> np.ndarray:
"""Return extinction coefficient at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
_wl = np.asarray(wl)
self.bound_check(_wl, "k")
return self.__k(_wl)
def eps(self, wl: ArrayLike) -> np.ndarray:
"""Return complex dielectric constant at given wavelength.
Args:
wl (Union[float, complex, Sequence, np.ndarray]): Wavelength [μm].
"""
_wl = np.asarray(wl)
if self.f > 20:
self.bound_check(_wl, "nk")
return self.formula(_wl)
n: np.ndarray = self.n(_wl)
k: np.ndarray = self.k(_wl)
eps = n ** 2 - k ** 2 + 2j * n * k
return eps
class ConstMaterial(AbstractMaterial):
"""A class defines a material with constant permittivity
Attributes:
ce (float | complex): The value of constant permittivity
label (str): A label used in plot
"""
def __init__(self, params: dict) -> None:
"""Initialize Material
Args:
params (Dict): parameter dict contains the following key and values
'RI' (float | complex): Constant refractive index.
'e' (float | complex): Constant permittivity.
"""
if "RI" in params:
RI: float | complex = params["RI"]
self.ce: float | complex = RI ** 2
self.cn = RI.real
self.ck = RI.imag
self.label = f"RI: {RI}"
if "e" in params:
e = params["e"]
if e != RI ** 2:
raise ValueError("e must be RI ** 2.")
elif "e" in params:
e = params["e"]
self.label = f"eps: {e}"
self.ce = e
ri = np.sqrt(_ensure_positive_imag(e))
self.cn = ri.real
self.ck = ri.imag
else:
raise ValueError("'RI' or 'e' must be specified")
def n(self, wl: ArrayLike) -> np.ndarray:
"""Return refractive index at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
return self.cn * np.ones_like(wl)
def k(self, wl: ArrayLike) -> np.ndarray:
"""Return extinction coefficient at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
return self.ck * np.ones_like(wl)
def eps(self, wl: ArrayLike) -> np.ndarray:
"""Return complex dielectric constant at given wavelength.
Args:
wl (Union[float, complex, Sequence, np.ndarray]): Wavelength [μm].
"""
return self.ce * np.ones_like(wl)
class PEC(ConstMaterial):
"""Perfect Electric Conductor class as a material that has negative large pemittivity
Attributes:
ce (-1e8+0j: complex): The value of constant permittivity
label ('PEC': str): A label used in plot
"""
def __init__(
self,
) -> None:
"""Initialize Material
Args:
rid (RiiDataFrame): Rii_Pandas DataFrame.
"""
self.label = "PEC"
self.ce = -1.0e8
self.cn = 0.0
self.ck = 1.0e4
class Material(AbstractMaterial):
"""A Class that constructs RiiMaterial or ConstMaterial instance depending on the given parameters.
Implement __call__ method that calculate the permittivity at a single value of angular frequency.
Introduce 'im_factor' that is a magnification factor multiplied to the imaginary part of permittivity.
Args:
AbstractMaterial ([type]): [description]
"""
def __init__(
self, params: dict, rid: Optional[riip.dataframe.RiiDataFrame] = None
) -> None:
"""Initialize Material
Args:
params (dict): parameter dict contains the following key and values
'PEC' (bool): If the target material is PEC. Deafaults to False.
'id' (int): ID number.
'book' (str): book value in catalog of RiiDataFrame.
'page' (str): page value in catalog of RiiDataFrame.
'RI' (complex): Constant refractive index.
'e' (complex): Constant permittivity.
'bound_check' (bool): True if bound check should be done. Defaults to True.
'im_factor' (float): A magnification factor multiplied to the imaginary part of permittivity. Defaults to 1.0.
rid (RiiDataFrame): Rii_Pandas DataFrame. Defaults to None.
"""
self.params = params
if params.get("PEC", False):
self.material: PEC | ConstMaterial | RiiMaterial = PEC()
self.__ce0: Optional[float | complex] = self.material.ce
self.f = 0
elif "RI" in params or "e" in params:
self.material = ConstMaterial(params)
self.__ce0 = self.material.ce
self.f = 0
elif "id" not in params and ("book" not in params or "page" not in params):
raise ValueError(
"'PEC', 'RI', 'e', 'id', or 'book'-'page' pair must be specified"
)
else:
if rid is None:
rid = riip.dataframe.RiiDataFrame()
if "book" in params and "page" in params:
idx = rid.book_page_to_id(params)
if "id" in "params":
idx != params["id"]
raise ValueError(
"There is an inconsistency between 'id' and 'book'-'page' pair"
)
else:
params["id"] = idx
self.material = RiiMaterial(
params["id"], rid.catalog, rid.raw_data, params.get("bound_check", True)
)
self.catalog = self.material.catalog
self.wl_max = self.catalog["wl_max"]
self.wl_min = self.catalog["wl_min"]
self.raw_data = self.material.raw_data
self.bound_check_flag = self.material.bound_check_flag
self.__ce0 = None
self.f = self.material.f
if self.f != 0:
self.cs = self.material.cs
self.__w: Optional[float | complex] = None
self.__ce: Optional[float | complex] = self.__ce0
self.im_factor = params.get("im_factor", 1.0)
def eps(self, wl: ArrayLike) -> np.ndarray:
"""Return complex dielectric constant at given wavelength.
Args:
wl (Union[float, complex, Sequence, np.ndarray]): Wavelength [μm].
"""
e = self.material.eps(wl)
if self.__im_factor != 1.0:
imag = e.imag * self.__im_factor
e = e.real + 1j * imag
return e
def n(self, wl: ArrayLike) -> np.ndarray:
"""Return refractive index at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
return np.sqrt(_ensure_positive_imag(self.eps(wl))).real
def k(self, wl: ArrayLike) -> np.ndarray:
"""Return extinction coefficient at given wavelength.
Args:
wl (ArrayLike): Wavelength [μm].
"""
return np.sqrt(_ensure_positive_imag(self.eps(wl))).imag
@property
def im_factor(self) -> float:
return self.__im_factor
@im_factor.setter
def im_factor(self, factor: float) -> None:
self.__w = None
self.__im_factor = factor
self.__ce = self.__ce0
if factor != 1.0:
self.label = self.material.label + f" im_factor: {factor}"
if self.__ce0 is not None and self.__ce0.imag != 0.0:
imag = self.__ce0.imag * factor
self.__ce = self.__ce0.real + 1j * imag
else:
self.label = self.material.label
def __call__(self, w: float | complex) -> float | complex:
"""Return relative permittivity at given angular frequency.
Args:
w (float | complex): A float indicating the angular frequency (vacuum wavenumber ω/c [rad/μm]).
Returns:
float | complex: Relative permittivity at w
"""
if self.__ce is not None:
return self.__ce
if self.__w is None or w != self.__w:
wl = 2 * np.pi / w.real
if self.bound_check_flag and (wl < self.wl_min or wl > self.wl_max):
raise ValueError(
f"Wavelength {wl} is out of bounds [{self.wl_min} {self.wl_max}][um]"
)
if self.f > 0:
if self.f > 20:
self.__e = formulas_cython_dict[self.f](w, self.cs)
else:
_n = formulas_cython_dict[self.f](w.real, self.cs)
_k = self.material.k(2 * np.pi / w.real).item()
self.__e = _n ** 2 - _k ** 2 + 2j * _n * _k
if self.__im_factor != 1.0:
imag = self.__e.imag * self.__im_factor
self.__e = self.__e.real + 1j * imag
else:
self.__e = self.eps(wl).item()
if self.__e.imag == 0.0:
self.__e = self.__e.real
self.__w = w
return self.__e
|
{
"content_hash": "b0c141d6212d398e7418ceeb968c6f5f",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 126,
"avg_line_length": 35.579281183932345,
"alnum_prop": 0.5207677223839801,
"repo_name": "mnishida/RII_Pandas",
"id": "77fa30876bcea76343e1b3f11f91c9d68da9e346",
"size": "16844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/riip/material.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "5777"
},
{
"name": "Makefile",
"bytes": "726"
},
{
"name": "Python",
"bytes": "63540"
}
],
"symlink_target": ""
}
|
lista = enumerate('zero um dois três quatro cinco seis sete oito nove'.split())
numero_string=dict(lista)
string_numero={valor:chave for chave,valor in numero_string.items()}
print (numero_string)
print(string_numero)
def para_numeral(n):
numeros=[]
for digito in str(n):
numeros.append(numero_string[int(digito)])
return ", ".join(numeros)
assert "um" == para_numeral(1)
assert "um, dois" == para_numeral(12)
assert "um, um" == para_numeral(11)
def para_inteiro(string_n):
string=""
lista=string_n.split(", ")
for digito in lista:
string+=str(string_numero[digito])
return int(string)
assert 1== para_inteiro('um')
assert 12== para_inteiro('um, dois')
|
{
"content_hash": "2585a563825ceb9e6b134be0c5cbd9c8",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 25.310344827586206,
"alnum_prop": 0.6430517711171662,
"repo_name": "igorlimasan/poo-python",
"id": "f763efa97d0a86654d7de1b48c84b73efd878388",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tarefa03/Tarefa03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2200"
}
],
"symlink_target": ""
}
|
from lxml import etree
from webob import exc
from nova.api.openstack.compute.contrib import hypervisors
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
TEST_HYPERS = [
dict(id=1,
service_id=1,
service=dict(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service_id=2,
service=dict(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
dict(name="inst4", uuid="uuid4", host="compute2")]
def fake_compute_node_get_all(context):
return TEST_HYPERS
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS:
if hyper['id'] == compute_id:
return hyper
raise exception.ComputeHostNotFound
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
class HypervisorsTest(test.TestCase):
def setUp(self):
super(HypervisorsTest, self).setUp()
self.context = context.get_admin_context()
self.controller = hypervisors.HypervisorsController()
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(db, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
self.assertEqual(result, dict(id=1, hypervisor_hostname="hyper1"))
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
service=dict(id=1, host='compute1')))
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
TEST_SERVERS)
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
servers=[
dict(name="inst1", uuid="uuid1"),
dict(name="inst2", uuid="uuid2"),
dict(name="inst3", uuid="uuid3"),
dict(name="inst4", uuid="uuid4")]))
def test_index(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors')
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1, hypervisor_hostname="hyper1"),
dict(id=2, hypervisor_hostname="hyper2")]))
def test_detail(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail')
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service=dict(id=2, host="compute2"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]))
def test_show_noid(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_withid(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)))
def test_uptime_noid(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_uptime_notimplemented(self):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
self.stubs.Set(self.controller.api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, '1')
def test_uptime_implemented(self):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
self.stubs.Set(self.controller.api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
result = self.controller.uptime(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
id=1,
hypervisor_hostname="hyper1",
uptime="fake uptime")))
def test_search(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/search')
result = self.controller.search(req, 'hyper')
self.assertEqual(result, dict(hypervisors=[
dict(id=1, hypervisor_hostname="hyper1"),
dict(id=2, hypervisor_hostname="hyper2")]))
def test_servers(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers')
result = self.controller.servers(req, 'hyper')
self.assertEqual(result, dict(hypervisors=[
dict(id=1,
hypervisor_hostname="hyper1",
servers=[
dict(name="inst1", uuid="uuid1"),
dict(name="inst3", uuid="uuid3")]),
dict(id=2,
hypervisor_hostname="hyper2",
servers=[
dict(name="inst2", uuid="uuid2"),
dict(name="inst4", uuid="uuid4")])]))
def test_statistics(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics')
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)))
class HypervisorsSerializersTest(test.TestCase):
def compare_to_exemplar(self, exemplar, hyper):
# Check attributes
for key, value in exemplar.items():
if key in ('service', 'servers'):
# These turn into child elements and get tested
# separately below...
continue
self.assertEqual(str(value), hyper.get(key))
# Check child elements
required_children = set([child for child in ('service', 'servers')
if child in exemplar])
for child in hyper:
self.assertTrue(child.tag in required_children)
required_children.remove(child.tag)
# Check the node...
if child.tag == 'service':
for key, value in exemplar['service'].items():
self.assertEqual(str(value), child.get(key))
elif child.tag == 'servers':
for idx, grandchild in enumerate(child):
self.assertEqual('server', grandchild.tag)
for key, value in exemplar['servers'][idx].items():
self.assertEqual(str(value), grandchild.get(key))
# Are they all accounted for?
self.assertEqual(len(required_children), 0)
def test_index_serializer(self):
serializer = hypervisors.HypervisorIndexTemplate()
exemplar = dict(hypervisors=[
dict(hypervisor_hostname="hyper1",
id=1),
dict(hypervisor_hostname="hyper2",
id=2)])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisors', tree.tag)
self.assertEqual(len(exemplar['hypervisors']), len(tree))
for idx, hyper in enumerate(tree):
self.assertEqual('hypervisor', hyper.tag)
self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
def test_detail_serializer(self):
serializer = hypervisors.HypervisorDetailTemplate()
exemplar = dict(hypervisors=[
dict(hypervisor_hostname="hyper1",
id=1,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=1, host="compute1")),
dict(hypervisor_hostname="hyper2",
id=2,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=2, host="compute2"))])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisors', tree.tag)
self.assertEqual(len(exemplar['hypervisors']), len(tree))
for idx, hyper in enumerate(tree):
self.assertEqual('hypervisor', hyper.tag)
self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
def test_show_serializer(self):
serializer = hypervisors.HypervisorTemplate()
exemplar = dict(hypervisor=dict(
hypervisor_hostname="hyper1",
id=1,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=1, host="compute1")))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor'], tree)
def test_uptime_serializer(self):
serializer = hypervisors.HypervisorUptimeTemplate()
exemplar = dict(hypervisor=dict(
hypervisor_hostname="hyper1",
id=1,
uptime='fake uptime'))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor'], tree)
def test_servers_serializer(self):
serializer = hypervisors.HypervisorServersTemplate()
exemplar = dict(hypervisors=[
dict(hypervisor_hostname="hyper1",
id=1,
servers=[
dict(name="inst1",
uuid="uuid1"),
dict(name="inst2",
uuid="uuid2")]),
dict(hypervisor_hostname="hyper2",
id=2,
servers=[
dict(name="inst3",
uuid="uuid3"),
dict(name="inst4",
uuid="uuid4")])])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisors', tree.tag)
self.assertEqual(len(exemplar['hypervisors']), len(tree))
for idx, hyper in enumerate(tree):
self.assertEqual('hypervisor', hyper.tag)
self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
def test_statistics_serializer(self):
serializer = hypervisors.HypervisorStatisticsTemplate()
exemplar = dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor_statistics', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor_statistics'], tree)
|
{
"content_hash": "89e966e4ea726c26a70cad2273dc656c",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 78,
"avg_line_length": 37.23991935483871,
"alnum_prop": 0.5002977640625846,
"repo_name": "tylertian/Openstack",
"id": "740477ca3b3aa3e13bc6fde5729ad8bbc919c457",
"size": "19105",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "openstack F/nova/nova/tests/api/openstack/compute/contrib/test_hypervisors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "239919"
},
{
"name": "JavaScript",
"bytes": "156942"
},
{
"name": "Python",
"bytes": "16949418"
},
{
"name": "Shell",
"bytes": "96743"
}
],
"symlink_target": ""
}
|
"""Views for the node settings page."""
# -*- coding: utf-8 -*-
from dateutil.parser import parse as dateparse
import httplib as http
import logging
from flask import request, make_response
from framework.exceptions import HTTPError
from addons.base import generic_views
from addons.github.api import GitHubClient
from addons.github.apps import github_hgrid_data
from addons.github.exceptions import GitHubError
from addons.github.serializer import GitHubSerializer
from addons.github.utils import verify_hook_signature, MESSAGES
from website.models import NodeLog
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_contributor_or_public, must_be_valid_project,
)
logger = logging.getLogger(__name__)
logging.getLogger('github3').setLevel(logging.WARNING)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
SHORT_NAME = 'github'
FULL_NAME = 'GitHub'
############
# Generics #
############
github_account_list = generic_views.account_list(
SHORT_NAME,
GitHubSerializer
)
github_import_auth = generic_views.import_auth(
SHORT_NAME,
GitHubSerializer
)
def _get_folders(node_addon, folder_id):
pass
github_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
github_get_config = generic_views.get_config(
SHORT_NAME,
GitHubSerializer
)
github_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
#################
# Special Cased #
#################
@must_not_be_registration
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_set_config(auth, **kwargs):
node_settings = kwargs.get('node_addon', None)
node = kwargs.get('node', None)
user_settings = kwargs.get('user_addon', None)
try:
if not node:
node = node_settings.owner
if not user_settings:
user_settings = node_settings.user_settings
except AttributeError:
raise HTTPError(http.BAD_REQUEST)
# Parse request
github_user_name = request.json.get('github_user', '')
github_repo_name = request.json.get('github_repo', '')
if not github_user_name or not github_repo_name:
raise HTTPError(http.BAD_REQUEST)
# Verify that repo exists and that user can access
connection = GitHubClient(external_account=node_settings.external_account)
repo = connection.repo(github_user_name, github_repo_name)
if repo is None:
if user_settings:
message = (
'Cannot access repo. Either the repo does not exist '
'or your account does not have permission to view it.'
)
else:
message = (
'Cannot access repo.'
)
return {'message': message}, http.BAD_REQUEST
changed = (
github_user_name != node_settings.user or
github_repo_name != node_settings.repo
)
# Update hooks
if changed:
# Delete existing hook, if any
node_settings.delete_hook()
# Update node settings
node_settings.user = github_user_name
node_settings.repo = github_repo_name
# Log repo select
node.add_log(
action='github_repo_linked',
params={
'project': node.parent_id,
'node': node._id,
'github': {
'user': github_user_name,
'repo': github_repo_name,
}
},
auth=auth,
)
# Add new hook
if node_settings.user and node_settings.repo:
node_settings.add_hook(save=False)
node_settings.save()
return {}
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_download_starball(node_addon, **kwargs):
archive = kwargs.get('archive', 'tar')
ref = request.args.get('sha', 'master')
connection = GitHubClient(external_account=node_addon.external_account)
headers, data = connection.starball(
node_addon.user, node_addon.repo, archive, ref
)
resp = make_response(data)
for key, value in headers.iteritems():
resp.headers[key] = value
return resp
#########
# HGrid #
#########
@must_be_contributor_or_public
@must_have_addon('github', 'node')
def github_root_folder(*args, **kwargs):
"""View function returning the root container for a GitHub repo. In
contrast to other add-ons, this is exposed via the API for GitHub to
accommodate switching between branches and commits.
"""
node_settings = kwargs['node_addon']
auth = kwargs['auth']
data = request.args.to_dict()
return github_hgrid_data(node_settings, auth=auth, **data)
#########
# Repos #
#########
@must_have_addon(SHORT_NAME, 'user')
@must_have_addon(SHORT_NAME, 'node')
@must_be_addon_authorizer(SHORT_NAME)
@must_have_permission('write')
def github_create_repo(**kwargs):
repo_name = request.json.get('name')
if not repo_name:
raise HTTPError(http.BAD_REQUEST)
node_settings = kwargs['node_addon']
connection = GitHubClient(external_account=node_settings.external_account)
try:
repo = connection.create_repo(repo_name, auto_init=True)
except GitHubError:
# TODO: Check status code
raise HTTPError(http.BAD_REQUEST)
return {
'user': repo.owner.login,
'repo': repo.name,
}
#########
# Hooks #
#########
# TODO: Refactor using NodeLogger
def add_hook_log(node, github, action, path, date, committer, include_urls=False,
sha=None, save=False):
"""Add log event for commit from webhook payload.
:param node: Node to add logs to
:param github: GitHub node settings record
:param path: Path to file
:param date: Date of commit
:param committer: Committer name
:param include_urls: Include URLs in `params`
:param sha: SHA of updated file
:param save: Save changes
"""
github_data = {
'user': github.user,
'repo': github.repo,
}
urls = {}
if include_urls:
# TODO: Move to helper function
url = node.web_url_for('addon_view_or_download_file', path=path, provider=SHORT_NAME)
urls = {
'view': '{0}?ref={1}'.format(url, sha),
'download': '{0}?action=download&ref={1}'.format(url, sha)
}
node.add_log(
action=action,
params={
'project': node.parent_id,
'node': node._id,
'path': path,
'github': github_data,
'urls': urls,
},
auth=None,
foreign_user=committer,
log_date=date,
save=save,
)
@must_be_valid_project
@must_not_be_registration
@must_have_addon('github', 'node')
def github_hook_callback(node_addon, **kwargs):
"""Add logs for commits from outside OSF.
"""
if request.json is None:
return {}
# Fail if hook signature is invalid
verify_hook_signature(
node_addon,
request.data,
request.headers,
)
node = kwargs['node'] or kwargs['project']
payload = request.json
for commit in payload.get('commits', []):
# TODO: Look up OSF user by commit
# Skip if pushed by OSF
if commit['message'] and commit['message'] in MESSAGES.values():
continue
_id = commit['id']
date = dateparse(commit['timestamp'])
committer = commit['committer']['name']
# Add logs
for path in commit.get('added', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_ADDED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('modified', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_UPDATED,
path, date, committer, include_urls=True, sha=_id,
)
for path in commit.get('removed', []):
add_hook_log(
node, node_addon, 'github_' + NodeLog.FILE_REMOVED,
path, date, committer,
)
node.save()
|
{
"content_hash": "1c2d2932ec72fc2c2c6a034692db97ac",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 93,
"avg_line_length": 26.835483870967742,
"alnum_prop": 0.6089674239692271,
"repo_name": "hmoco/osf.io",
"id": "0a3b7da6fd11aac3d37f30da5a4202bd28428322",
"size": "8319",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "addons/github/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "175175"
},
{
"name": "HTML",
"bytes": "193496"
},
{
"name": "JavaScript",
"bytes": "1690469"
},
{
"name": "Mako",
"bytes": "672179"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7856328"
}
],
"symlink_target": ""
}
|
from mudwyrm_users.admin.achaea import ScriptState
from mudwyrm_users.admin.achaea.action import Action, Outcome, EventOutcome
from mudwyrm_users.admin.achaea.trigger import Trigger, Alias, OnEvent
from mudwyrm_users.admin.achaea.scripts import achaea
from mudwyrm_users.admin.achaea.scripts import char
from mudwyrm_users.admin.achaea.scripts.actions import all_actions as actions
p = None
s = ScriptState()
def init(processor):
assert processor is not None
global p
p = processor
s.armor_removed = False
################################################################################
@Trigger(r'^You remove a canvas backpack\.$')
def backpack_removed(match):
p.send("wear %s" % char.config['pack'])
#@Trigger(r'^You get \d+ gold sovereigns from a canvas backpack\.$')
#def got_money_from_pack(match):
# p.send("put money in %s" % char.config['pack'])
@Trigger(r'^You begin to wield some gold sovereigns in your (?:right|left) hand\.$')
def money_wielded(match):
p.send("unwield money")
p.send("put money in %s" % char.config['pack'])
@OnEvent('PromptLine', when=lambda: s.armor_removed)
def rewear_armor():
if 'armor' in char.config:
if actions.wear.possible(char.config['armor']):
p.act(actions.wear, char.config['armor'])
s.armor_removed = False
@Trigger(r'^You put .+ into a luxurious black top hat\.$')
def item_put_in_luxurious_black_top_hat(match):
p.send("get money from hat")
p.send("get money from hat")
p.send("get money from hat")
################################################################################
@Trigger(r'^You cease wielding a cavalry shield in your (left|right) hand\.$')
def shield_unwielded(match):
p.act(actions.wield, "shield", match.group(1))
@Trigger(r'^You remove a pair of plain black leather shoes\.$')
def shoes_removed(match):
p.act(actions.wear, "shoes75779")
@Trigger(r"^You remove a man's white silk shirt\.$")
def shirt_removed(match):
p.act(actions.wear, "shirt240754")
@Trigger(r'^You remove a studded black trench coat\.$')
def coat_removed(match):
p.act(actions.wear, "coat366210")
@Trigger(r'^You remove a cowled black wolfskin cloak\.$')
def cloak_removed(match):
p.act(actions.wear, "cloak64612")
@Trigger(r'^You remove a brooch of Thoth\.$')
def brooch_removed(match):
p.act(actions.wear, "brooch368122")
@Trigger(r'^You remove an embossed leather belt of the elements\.$')
def belt_removed(match):
p.act(actions.wear, "belt219230")
@Trigger(r'^You remove a suit of scale mail\.$')
def scalemail_removed(match):
s.armor_removed = True
|
{
"content_hash": "64f9cf207b37526c9bfc7150dc544d65",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 84,
"avg_line_length": 32.67901234567901,
"alnum_prop": 0.6501700037778617,
"repo_name": "sh-ft/mudwyrm_users",
"id": "540a4e94ecd5a13b55e9342e7657229b1c7cd51e",
"size": "2647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mudwyrm_users/admin/achaea/scripts/antitheft.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "23536"
},
{
"name": "Python",
"bytes": "273045"
},
{
"name": "Ruby",
"bytes": "339"
}
],
"symlink_target": ""
}
|
import imghdr
import json
import os.path
import string
from copy import copy
from datetime import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.safestring import mark_safe
import bleach
import commonware.log
import jinja2
from tower import ugettext as _
import amo
import amo.models
from access.models import Group
from addons.models import Addon
from mkt.webapps.models import Webapp
from reviews.models import Review
from tags.models import Tag
from users.helpers import user_link
from users.models import UserProfile
from versions.models import Version
log = commonware.log.getLogger('devhub')
class AppLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by app.
"""
addon = models.ForeignKey(Webapp, db_constraint=False)
activity_log = models.ForeignKey('ActivityLog')
class Meta:
db_table = 'log_activity_app'
ordering = ('-created',)
class CommentLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog')
comments = models.CharField(max_length=255)
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog')
version = models.ForeignKey(Version)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog')
user = models.ForeignKey(UserProfile)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(amo.models.ModelBase):
"""
This table is for indexing the activity log by access group.
"""
activity_log = models.ForeignKey('ActivityLog')
group = models.ForeignKey(Group)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class ActivityLogManager(amo.models.ManagerBase):
def for_apps(self, apps):
if isinstance(apps, Webapp):
apps = (apps,)
vals = (AppLog.objects.filter(addon__in=apps)
.values_list('activity_log', flat=True))
if vals:
return self.filter(pk__in=list(vals))
else:
return self.none()
def for_version(self, version):
vals = (VersionLog.objects.filter(version=version)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_group(self, group):
return self.filter(grouplog__group=group)
def for_user(self, user):
vals = (UserLog.objects.filter(user=user)
.values_list('activity_log', flat=True))
return self.filter(pk__in=list(vals))
def for_developer(self):
return self.exclude(action__in=amo.LOG_ADMINS + amo.LOG_HIDE_DEVELOPER)
def admin_events(self):
return self.filter(action__in=amo.LOG_ADMINS)
def editor_events(self):
return self.filter(action__in=amo.LOG_EDITORS)
def review_queue(self, webapp=False):
qs = self._by_type(webapp)
return (qs.filter(action__in=amo.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID))
def total_reviews(self, webapp=False):
qs = self._by_type(webapp)
"""Return the top users, and their # of reviews."""
return (qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=amo.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def monthly_reviews(self, webapp=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type(webapp)
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
return (qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date,
action__in=amo.LOG_REVIEW_QUEUE)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count'))
def user_position(self, values_qs, user):
try:
return next(i for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id) + 1
except StopIteration:
return None
def total_reviews_user_position(self, user, webapp=False):
return self.user_position(self.total_reviews(webapp), user)
def monthly_reviews_user_position(self, user, webapp=False):
return self.user_position(self.monthly_reviews(webapp), user)
def _by_type(self, webapp=False):
qs = super(ActivityLogManager, self).get_query_set()
return qs.extra(
tables=['log_activity_app'],
where=['log_activity_app.activity_log_id=log_activity.id'])
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(amo.models.ModelBase):
TYPES = sorted([(value.id, key) for key, value in amo.LOG.items()])
user = models.ForeignKey('users.UserProfile', null=True)
action = models.SmallIntegerField(choices=TYPES, db_index=True)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@property
def arguments(self):
try:
# d is a structure:
# ``d = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
d = json.loads(self._arguments)
except:
log.debug('unserializing data from addon_log failed: %s' % self.id)
return None
objs = []
for item in d:
# item has only one element.
model_name, pk = item.items()[0]
if model_name in ('str', 'int', 'null'):
objs.append(pk)
else:
(app_label, model_name) = model_name.split('.')
model = models.loading.get_model(app_label, model_name)
# Cope with soft deleted models.
if hasattr(model, 'with_deleted'):
objs.extend(model.with_deleted.filter(pk=pk))
else:
objs.extend(model.objects.filter(pk=pk))
return objs
@arguments.setter
def arguments(self, args=[]):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, basestring):
serialize_me.append({'str': arg})
elif isinstance(arg, (int, long)):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(dict(((unicode(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((unicode(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return amo.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = amo.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
review = None
version = None
collection = None
tag = None
group = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
addon = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.name)
arguments.remove(arg)
if isinstance(arg, Review) and not review:
review = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), _('Review'))
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = _('Version {0}')
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(u'<a href="{0}">{1}</a>',
arg.get_url_path(), arg.tag_text)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
user = user_link(self.user)
try:
kw = dict(addon=addon, review=review, version=version,
collection=collection, tag=tag, user=user, group=group)
return self.f(format, *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __unicode__(self):
return self.to_string()
def __html__(self):
return self
# TODO: remove once we migrate to CommAtttachment (ngoke).
class ActivityLogAttachment(amo.models.ModelBase):
"""
Model for an attachment to an ActivityLog instance. Used by the Marketplace
reviewer tools, where reviewers can attach files to comments made during
the review process.
"""
activity_log = models.ForeignKey('ActivityLog')
filepath = models.CharField(max_length=255)
description = models.CharField(max_length=255, blank=True)
mimetype = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'log_activity_attachment'
ordering = ('id',)
def get_absolute_url(self):
return reverse('reviewers.apps.review.attachment', args=[self.pk])
def filename(self):
"""
Returns the attachment's file name.
"""
return os.path.basename(self.filepath)
def full_path(self):
"""
Returns the full filesystem path of the attachment.
"""
return os.path.join(settings.REVIEWER_ATTACHMENTS_PATH, self.filepath)
def display_name(self):
"""
Returns a string describing the attachment suitable for front-end
display.
"""
display = self.description if self.description else self.filename()
return mark_safe(bleach.clean(display))
def is_image(self):
"""
Returns a boolean indicating whether the attached file is an image of a
format recognizable by the stdlib imghdr module.
"""
return imghdr.what(self.full_path()) is not None
|
{
"content_hash": "15aaae981e28de36803489825c084d84",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 79,
"avg_line_length": 33.0911528150134,
"alnum_prop": 0.5928056388236247,
"repo_name": "jinankjain/zamboni",
"id": "e366c234fd1b9f8c9c364484511f8f5232e758c5",
"size": "12343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/devhub/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import datetime
import qsstats
def _is_empty(series):
for s in series:
for v in s['values']:
if v['y'] > 0:
return False
return True
class SimpleGraph(object):
datetime_xaxis = False
def __init__(self, *args):
self.series = [{
'area': False,
'label': '',
'values': [
{
'x': unit.label,
'y': unit.qs.count()
} for unit in args
]
}]
self.empty = _is_empty(self.series)
class AbstractTimeGraph(object):
class SCOPE:
year = 'years'
month = 'months'
week = 'weeks'
hour = 'hours'
minute = 'minutes'
def __init__(self, interval, *units, **kwargs):
accumulative = kwargs.pop('accumulative', False)
self.series = []
for unit in units:
start = unit.start_date
end = unit.end_date
qss = qsstats.QuerySetStats(unit.qs, unit.date_field)
time_series = qss.time_series(start, end, interval=interval)
if accumulative:
time_series_acc = []
for item in time_series:
try:
time_series_acc.append((item[0], item[1] + time_series_acc[-1][1]))
except IndexError:
# exception when time series is empty
assert len(time_series_acc) == 0
time_series_acc.append(item)
self.series.append({
'area': False,
'label': '%s (acc.)' % unit.label,
'values': self.get_series_values(time_series_acc)
})
self.series.append({
'area': True,
'label': unit.label,
'values': self.get_series_values(time_series)
})
self.empty = _is_empty(self.series)
def get_series_values(self, time_series):
raise NotImplementedError
class AbsoluteTimeGraph(AbstractTimeGraph):
datetime_xaxis = True
def get_series_values(self, time_series):
return map(lambda x: {'x': x[0], 'y': x[1]}, time_series)
class RelativeTimeGraph(AbstractTimeGraph):
datetime_xaxis = False
def get_series_values(self, time_series):
return map(lambda x, i: {'x': i, 'y': x[1]}, time_series, xrange(0, len(time_series)))
class AgeTimeGraph(AbstractTimeGraph):
datetime_xaxis = False
def __init__(self, *args, **kwargs):
self.today = kwargs.pop('today', None)
if self.today is None:
self.today = datetime.datetime.now()
super(AgeTimeGraph, self).__init__(*args, **kwargs)
def get_series_values(self, time_series):
time_series = list(reversed(time_series)) # We need to print values in the same order in which they are shown in the graph
return map(lambda x, i: {'x': self.today.year - x[0].year, 'y': x[1]}, time_series, xrange(0, len(time_series)))
|
{
"content_hash": "ba81976de31a2370d0951cb39fefe03a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 131,
"avg_line_length": 28.192660550458715,
"alnum_prop": 0.5206638464041653,
"repo_name": "animekita/selvbetjening",
"id": "ab56ac13f79a9e0be5a6751e1ae6322dbc002fc4",
"size": "3073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selvbetjening/sadmin2/graphs/timelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17368"
},
{
"name": "HTML",
"bytes": "107945"
},
{
"name": "JavaScript",
"bytes": "4914"
},
{
"name": "Python",
"bytes": "881254"
},
{
"name": "Shell",
"bytes": "312"
}
],
"symlink_target": ""
}
|
import build
import config
import train
|
{
"content_hash": "81223ed3bfe25eda2f5bf3a2442189c2",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 13,
"avg_line_length": 13.333333333333334,
"alnum_prop": 0.85,
"repo_name": "2PacIsAlive/DeepOncology",
"id": "0627dd1db0c177d1c6aa50a2bef645aa21d275ca",
"size": "40",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deep_networks/network/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15124"
},
{
"name": "Python",
"bytes": "18987"
}
],
"symlink_target": ""
}
|
import pexpect
import sys
import os
#
# Creates Variables to be referenced by code
#
switch_un = "pytest"
switch_pw = "pytest"
enable_pw = "password"
vlan = "300"
vlan_name = "TESTVLAN"
switch_port = "GigabitEthernet0/10"
#
#
# Create a logfile
stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
# Create Import for Text file that contains Switch IP's
# Read the list to variable switches
# Create a for loop in which switch is the variable derived
# from a line of swtiches
with open('device_file1.txt') as fname:
switches = fname.read().splitlines()
for switch in switches:
#
# This section shows the setup of the SSH session and
# authentication through config T
#
print switch
child = pexpect.spawn('ssh %s@%s' % (switch_un, switch))
# Drop logfile to Standard Out.
child.logfile = stdout
child.expect('Password:')
child.sendline(switch_pw)
child.expect('>')
child.sendline('terminal length 0')
child.expect('\>')
child.sendline('enable')
child.expect('Password:')
child.sendline(enable_pw)
child.expect('#')
child.sendline('conf t')
#
# This section starts the configuration SNMP
child.expect('\(config\)#')
child.sendline('snmp-server community pycom ro')
child.expect('\(config\)#')
child.sendline('exit')
#
# This section starts the interface configuration
#
# Shows that the next prompt we exped from SSH session (child) is (#)
child.expect('#')
# This section starts the termination of the session
#
child.sendline('end')
child.expect('#')
child.sendline('wr mem')
child.expect('#')
child.sendline('quit')
child.close()
|
{
"content_hash": "f2035fb67481817c396a0449c642d808",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 70,
"avg_line_length": 25.737704918032787,
"alnum_prop": 0.7089171974522293,
"repo_name": "joshobrien77/staticnat",
"id": "c8d89a492d4c8aa21303ae96853c7479d05a3ec7",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wdye/wdye4/wdye4.1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12721"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta, time
from decimal import Decimal
from uuid import uuid1, uuid4, UUID
import six
import unittest
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table
from cassandra.cqlengine.management import drop_table
from cassandra.cqlengine.models import Model
from cassandra.util import Date, Time
from tests.integration import PROTOCOL_VERSION
from tests.integration.cqlengine.base import BaseCassEngTestCase
class BaseColumnIOTest(BaseCassEngTestCase):
"""
Tests that values are come out of cassandra in the format we expect
To test a column type, subclass this test, define the column, and the primary key
and data values you want to test
"""
# The generated test model is assigned here
_generated_model = None
# the column we want to test
column = None
# the values we want to test against, you can
# use a single value, or multiple comma separated values
pkey_val = None
data_val = None
@classmethod
def setUpClass(cls):
super(BaseColumnIOTest, cls).setUpClass()
# if the test column hasn't been defined, bail out
if not cls.column:
return
# create a table with the given column
class IOTestModel(Model):
table_name = cls.column.db_type + "_io_test_model_{}".format(uuid4().hex[:8])
pkey = cls.column(primary_key=True)
data = cls.column()
cls._generated_model = IOTestModel
sync_table(cls._generated_model)
# tupleify the tested values
if not isinstance(cls.pkey_val, tuple):
cls.pkey_val = cls.pkey_val,
if not isinstance(cls.data_val, tuple):
cls.data_val = cls.data_val,
@classmethod
def tearDownClass(cls):
super(BaseColumnIOTest, cls).tearDownClass()
if not cls.column:
return
drop_table(cls._generated_model)
def comparator_converter(self, val):
""" If you want to convert the original value used to compare the model vales """
return val
def test_column_io(self):
""" Tests the given models class creates and retrieves values as expected """
if not self.column:
return
for pkey, data in zip(self.pkey_val, self.data_val):
# create
m1 = self._generated_model.create(pkey=pkey, data=data)
# get
m2 = self._generated_model.get(pkey=pkey)
assert m1.pkey == m2.pkey == self.comparator_converter(pkey), self.column
assert m1.data == m2.data == self.comparator_converter(data), self.column
# delete
self._generated_model.filter(pkey=pkey).delete()
class TestBlobIO(BaseColumnIOTest):
column = columns.Blob
pkey_val = six.b('blake'), uuid4().bytes
data_val = six.b('eggleston'), uuid4().bytes
class TestBlobIO2(BaseColumnIOTest):
column = columns.Blob
pkey_val = bytearray(six.b('blake')), uuid4().bytes
data_val = bytearray(six.b('eggleston')), uuid4().bytes
class TestTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = 'monkey'
class TestNonBinaryTextIO(BaseColumnIOTest):
column = columns.Text
pkey_val = 'bacon'
data_val = '0xmonkey'
class TestInteger(BaseColumnIOTest):
column = columns.Integer
pkey_val = 5
data_val = 6
class TestBigInt(BaseColumnIOTest):
column = columns.BigInt
pkey_val = 6
data_val = pow(2, 63) - 1
class TestDateTime(BaseColumnIOTest):
column = columns.DateTime
now = datetime(*datetime.now().timetuple()[:6])
pkey_val = now
data_val = now + timedelta(days=1)
class TestDate(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestDate, cls).setUpClass()
column = columns.Date
now = Date(datetime.now().date())
pkey_val = now
data_val = Date(now.days_from_epoch + 1)
class TestUUID(BaseColumnIOTest):
column = columns.UUID
pkey_val = str(uuid4()), uuid4()
data_val = str(uuid4()), uuid4()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
class TestTimeUUID(BaseColumnIOTest):
column = columns.TimeUUID
pkey_val = str(uuid1()), uuid1()
data_val = str(uuid1()), uuid1()
def comparator_converter(self, val):
return val if isinstance(val, UUID) else UUID(val)
# until Floats are implicitly single:
class FloatSingle(columns.Float):
def __init__(self, **kwargs):
super(FloatSingle, self).__init__(double_precision=False, **kwargs)
class TestFloatIO(BaseColumnIOTest):
column = FloatSingle
pkey_val = 4.75
data_val = -1.5
class TestDoubleIO(BaseColumnIOTest):
column = columns.Double
pkey_val = 3.14
data_val = -1982.11
class TestDecimalIO(BaseColumnIOTest):
column = columns.Decimal
pkey_val = Decimal('1.35'), 5, '2.4'
data_val = Decimal('0.005'), 3.5, '8'
def comparator_converter(self, val):
return Decimal(val)
class TestTime(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTime, cls).setUpClass()
column = columns.Time
pkey_val = Time(time(2, 12, 7, 48))
data_val = Time(time(16, 47, 25, 7))
class TestSmallInt(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestSmallInt, cls).setUpClass()
column = columns.SmallInt
pkey_val = 16768
data_val = 32523
class TestTinyInt(BaseColumnIOTest):
@classmethod
def setUpClass(cls):
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest("Protocol v4 datatypes require native protocol 4+, currently using: {0}".format(PROTOCOL_VERSION))
super(TestTinyInt, cls).setUpClass()
column = columns.TinyInt
pkey_val = 1
data_val = 123
|
{
"content_hash": "14199719bfcaa5b6777b9f577a7715ab",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 134,
"avg_line_length": 25.48605577689243,
"alnum_prop": 0.6560887916210724,
"repo_name": "kracekumar/python-driver",
"id": "243f7096ad43aa8381a9579323523312663a0964",
"size": "6972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cqlengine/columns/test_value_io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "24198"
},
{
"name": "Python",
"bytes": "1548808"
}
],
"symlink_target": ""
}
|
"""ACL Groups admin handler."""
import httplib
import re
from simian.mac import admin
from simian.mac import models
from simian.mac.common import auth
from simian.mac.common import util
# TODO(user): consolidate email regex duplicated in settings.
MAIL_REGEX = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}\b'
class ACLGroups(admin.AdminHandler):
def get(self, group=None):
"""GET handler."""
if not self.IsAdminUser():
self.error(httplib.FORBIDDEN)
return
if group:
group_members = self.GetMembers(group)
d = {'report_type': 'acl_groups', 'list': [(gm,) for gm in group_members],
'columns': 1, 'regex': [r'/%s/' % MAIL_REGEX],
'title': 'ACL Group: %s' % group, 'back': '/admin/acl_groups',
'infopanel': 'Full email address required (e.g. user@example.com)'}
self.Render('list_edit.html', d)
else:
group_data = []
for name, title in auth.ACL_GROUPS.items():
members = self.GetMembers(name)
group_data.append({'name': name, 'title': title, 'members': members})
d = {'report_type': 'acl_groups', 'groups': group_data}
self.Render('acl_groups.html', d)
@admin.AdminHandler.XsrfProtected('acl_groups')
def post(self, group=None):
"""POST handler."""
if not self.IsAdminUser():
self.error(httplib.FORBIDDEN)
return
if group:
values = self.request.get_all('item_0', None)
members = []
is_email = re.compile(MAIL_REGEX)
for member in values:
if is_email.match(member):
members.append(member)
else:
self.error(httplib.BAD_REQUEST)
self.response.out.write('malformed email')
return
models.KeyValueCache.MemcacheWrappedSet(group, 'text_value',
util.Serialize(members))
self.redirect('/admin/acl_groups?msg=Group%20saved')
def GetMembers(self, group_name):
"""Get a list of members belonging to the group."""
members = models.KeyValueCache.MemcacheWrappedGet(group_name, 'text_value')
member_list = []
if members:
try:
member_list = util.Deserialize(members)
except util.DeserializeError:
pass
return member_list
|
{
"content_hash": "2c5834d0db933466e13a911dc9cc31d7",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 33.62686567164179,
"alnum_prop": 0.6147359076786507,
"repo_name": "sillywilly42/simian",
"id": "12b6aa2d63c5dbe2bdeeba0a69f89a5a61352e4f",
"size": "2875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/simian/mac/admin/acl_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "38117"
},
{
"name": "HTML",
"bytes": "96126"
},
{
"name": "JavaScript",
"bytes": "34481"
},
{
"name": "Makefile",
"bytes": "7246"
},
{
"name": "Python",
"bytes": "1402979"
},
{
"name": "Shell",
"bytes": "20790"
}
],
"symlink_target": ""
}
|
class SbtBuildFileEditor:
def __init__(self, outputHandle):
self.outputHandle = outputHandle
commentInfo = "\n\n// ----- This part is generated by Scala Project Generator Facade plugin -----"
self.__writeToOutputHandle(commentInfo)
def simpleTransformation(self, kv):
key, value = kv
t = '\n\n' + key + ' := ' + value
self.__writeToOutputHandle(t)
def simpleTransformationBatch(self, kvList):
for t in kvList:
self.simpleTransformation(t)
def transformUsingOtherKey(self, kv):
key, otherKey = kv
t = '\n\n' + key + ' <<= ' + otherKey
self.__writeToOutputHandle(t)
def transformUsingOtherKeyBatch(self, kvList):
for t in kvList:
self.transformUsingOtherKey(t)
def __writeToOutputHandle(self, data):
self.outputHandle.write(data)
|
{
"content_hash": "74d3e143ea7a4e3d12ce16e62af04848",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 32.51851851851852,
"alnum_prop": 0.6150341685649203,
"repo_name": "lgmerek/ScalaProjectGeneratorFacade",
"id": "b4444fbc90a60c8aa59f899c4059b84a04d5174a",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sbtBuildFileEditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23747"
}
],
"symlink_target": ""
}
|
import traceback
import tornado.web
from tornado import gen
from admin.handler.baseHandler import BaseHandler
from common.authLib import auth_permissions
from setting import logger
class AdminRoleHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
roles = []
num = int(self.get_argument("num", 15))
page = int(self.get_argument("page", 1))
total_count = 0
try:
query = {}
show = {"_id": 0}
cursor = self.db.sys_role.find(query, show).skip((page - 1) * num).limit(num)
while (yield cursor.fetch_next):
user = cursor.next_object()
roles.append(user)
total_count = yield self.db.sys_role.find().count()
except:
logger.error(traceback.format_exc())
self.render("admin/sys_role_list.html", roles=roles, res_msg=res_msg, total_count=total_count, page=page, num=num)
class AdminRoleAddHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
role = {}
self.render("admin/sys_role_add.html", res_msg=res_msg, form_action="/admin/role/add", role=role)
@auth_permissions
@gen.coroutine
def post(self, *args, **kwargs):
role_id = self.get_argument("role_id", "")
role_name = self.get_argument("role_name", "")
try:
role_dict = {
"role_id": role_id,
"role_name": role_name,
}
query = {"role_id": role_id}
yield self.db.sys_role.update(query, role_dict, upsert=True)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/role")
class AdminRoleUpdateHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
res_msg = ""
role = {}
try:
role_id = self.get_argument("role_id", "")
query = {"role_id": role_id}
show = {"_id": 0}
role = yield self.db.sys_role.find_one(query, show)
except:
logger.error(traceback.format_exc())
self.render("admin/sys_role_add.html", role=role, res_msg=res_msg, form_action="/admin/role/update")
@auth_permissions
@gen.coroutine
def post(self, *args, **kwargs):
role_id = self.get_argument("role_id", "")
role_name = self.get_argument("role_name", "")
try:
role_dict = {
"role_id": role_id,
"role_name": role_name,
}
query = {"role_id": role_id}
yield self.db.sys_role.update(query, {"$set": role_dict}, upsert=True)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/user")
class AdminRoleDeleteHandler(BaseHandler):
@tornado.web.authenticated
@auth_permissions
@gen.coroutine
def get(self, *args, **kwargs):
try:
role_id = self.get_argument("role_id", "")
query = {"role_id": role_id}
self.db.sys_role.remove(query)
except:
logger.error(traceback.format_exc())
self.redirect("/admin/role")
|
{
"content_hash": "c99f5c725dddb02b99c555c4f59c4a6f",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 122,
"avg_line_length": 32.34615384615385,
"alnum_prop": 0.562128418549346,
"repo_name": "xin1195/smartSearch",
"id": "4a017c74dee3ca7a2cecac654ba8659402332a71",
"size": "3408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admin/handler/roleHandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "874"
},
{
"name": "CSS",
"bytes": "2591"
},
{
"name": "HTML",
"bytes": "78169"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "57059"
}
],
"symlink_target": ""
}
|
import os
os.chdir('bindings/python')
execfile('setup.py')
|
{
"content_hash": "a96578aefce7be8663eacab0dac27327",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7457627118644068,
"repo_name": "jpetso/libtorrent",
"id": "06f12e01a8b3a7a9836fe9c0f516524b04be96fb",
"size": "82",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "882"
},
{
"name": "C",
"bytes": "396307"
},
{
"name": "C++",
"bytes": "5562878"
},
{
"name": "CMake",
"bytes": "8978"
},
{
"name": "Python",
"bytes": "40131"
},
{
"name": "Shell",
"bytes": "22196"
}
],
"symlink_target": ""
}
|
"""Main entry point for the Oozie to Airflow converter"""
import argparse
import logging
import os
import sys
# pylint: disable=no-name-in-module
from distutils.spawn import find_executable
from subprocess import CalledProcessError, check_call
from o2a.converter.mappers import ACTION_MAP
from o2a.converter.oozie_converter import OozieConverter
from o2a.converter.constants import HDFS_FOLDER
from o2a.converter.renderers import PythonRenderer, DotRenderer
from o2a.transformers.add_node_notificaton_transformer import AddNodeNotificationTransformer
from o2a.transformers.add_workflow_notificaton_transformer import AddWorkflowNotificationTransformer
from o2a.transformers.remove_end_transformer import RemoveEndTransformer
from o2a.transformers.remove_fork_transformer import RemoveForkTransformer
from o2a.transformers.remove_inaccessible_node_transformer import RemoveInaccessibleNodeTransformer
from o2a.transformers.remove_join_transformer import RemoveJoinTransformer
from o2a.transformers.remove_kill_transformer import RemoveKillTransformer
from o2a.transformers.remove_start_transformer import RemoveStartTransformer
from o2a.utils.constants import CONFIG, WORKFLOW_XML
INDENT = 4
PROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
def get_o2a_validate_workflows_script():
# If the o2a-validate-workflows script is present in the project or on the path
# use it to validate the workflow
validate_workflows_script = os.path.join(PROJECT_PATH, "bin", "o2a-validate-workflows")
if not os.path.isfile(validate_workflows_script):
validate_workflows_script = find_executable("o2a-validate-workflows")
if not os.path.isfile(validate_workflows_script):
logging.info(f"Skipping workflow validation as the {validate_workflows_script} is missing")
return None
logging.info(f"Found o2a-validate-workflows script at {validate_workflows_script}. Validating workflow")
return validate_workflows_script
# pylint: disable=missing-docstring
def main():
args = parse_args(sys.argv[1:])
input_directory_path = args.input_directory_path
output_directory_path = args.output_directory_path
start_days_ago = args.start_days_ago
schedule_interval = args.schedule_interval
dag_name = args.dag_name
if not dag_name:
dag_name = os.path.basename(input_directory_path)
conf_path = os.path.join(input_directory_path, CONFIG)
if not os.path.isfile(conf_path):
logging.warning(
f"""
#################################### WARNING ###########################################
The '{CONFIG}' file was not detected in {input_directory_path}.
It may be necessary to provide input parameters for the workflow.
In case of any conversion errors make sure this configuration file is really not needed.
Otherwise please provide it.
########################################################################################
"""
)
validate_workflows_script = get_o2a_validate_workflows_script()
if validate_workflows_script:
try:
check_call([validate_workflows_script, f"{input_directory_path}/{HDFS_FOLDER}/{WORKFLOW_XML}"])
except CalledProcessError:
logging.error(
"Workflow failed schema validation. " "Please correct the workflow XML and try again."
)
exit(1)
os.makedirs(output_directory_path, exist_ok=True)
if args.dot:
renderer_class = DotRenderer
else:
renderer_class = PythonRenderer
renderer = renderer_class(
output_directory_path=output_directory_path,
schedule_interval=schedule_interval,
start_days_ago=start_days_ago,
)
transformers = [
RemoveInaccessibleNodeTransformer(),
RemoveEndTransformer(),
RemoveKillTransformer(),
RemoveStartTransformer(),
RemoveJoinTransformer(),
RemoveForkTransformer(),
AddWorkflowNotificationTransformer(),
AddNodeNotificationTransformer(),
]
converter = OozieConverter(
dag_name=dag_name,
input_directory_path=input_directory_path,
output_directory_path=output_directory_path,
action_mapper=ACTION_MAP,
renderer=renderer,
transformers=transformers,
user=args.user,
)
converter.recreate_output_directory()
converter.convert()
def parse_args(args):
parser = argparse.ArgumentParser(
description="Convert Apache Oozie workflows to Apache Airflow workflows."
)
parser.add_argument("-i", "--input-directory-path", help="Path to input directory", required=True)
parser.add_argument("-o", "--output-directory-path", help="Desired output directory", required=True)
parser.add_argument("-n", "--dag-name", help="Desired DAG name [defaults to input directory name]")
parser.add_argument(
"-u",
"--user",
help="The user to be used in place of all " "${user.name} [defaults to user who ran the conversion]",
)
parser.add_argument("-s", "--start-days-ago", help="Desired DAG start as number of days ago", default=0)
parser.add_argument(
"-v", "--schedule-interval", help="Desired DAG schedule interval as number of days", default=0
)
parser.add_argument("-d", "--dot", help="Renders workflow files in DOT format", action="store_true")
return parser.parse_args(args)
|
{
"content_hash": "f0f026000ada503b05806e4205b7c373",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 109,
"avg_line_length": 40.32592592592592,
"alnum_prop": 0.695077149155033,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "2ccb2beefdd787976ce3a622e811a689734ea86e",
"size": "6038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "o2a/o2a.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
}
|
"""Tests for artifacts."""
import os
import subprocess
import time
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# Pull in some extra artifacts used for testing.
from grr.lib import artifact_lib_test
# pylint: enable=unused-import,g-bad-import-order
from grr.client import client_utils_linux
from grr.client import client_utils_osx
from grr.client import vfs
from grr.client.client_actions import standard
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
# pylint: mode=test
WMI_SAMPLE = [
rdfvalue.Dict({u"Version": u"65.61.49216", u"InstallDate2": u"",
u"Name": u"Google Chrome", u"Vendor": u"Google, Inc.",
u"Description": u"Google Chrome", u"IdentifyingNumber":
u"{35790B21-ACFE-33F5-B320-9DA320D96682}",
u"InstallDate": u"20130710"}),
rdfvalue.Dict({u"Version": u"7.0.1", u"InstallDate2": u"",
u"Name": u"Parity Agent", u"Vendor": u"Bit9, Inc.",
u"Description": u"Parity Agent", u"IdentifyingNumber":
u"{ADC7EB41-4CC2-4FBA-8FBE-9338A9FB7666}",
u"InstallDate": u"20130710"}),
rdfvalue.Dict({u"Version": u"8.0.61000", u"InstallDate2": u"",
u"Name": u"Microsoft Visual C++ 2005 Redistributable (x64)",
u"Vendor": u"Microsoft Corporation", u"Description":
u"Microsoft Visual C++ 2005 Redistributable (x64)",
u"IdentifyingNumber":
u"{ad8a2fa1-06e7-4b0d-927d-6e54b3d3102}",
u"InstallDate": u"20130710"})]
class TestCmdProcessor(parsers.CommandParser):
output_types = ["SoftwarePackage"]
supported_artifacts = ["TestCmdArtifact"]
def Parse(self, cmd, args, stdout, stderr, return_val, time_taken,
knowledge_base):
_ = cmd, args, stdout, stderr, return_val, time_taken, knowledge_base
installed = rdfvalue.SoftwarePackage.InstallState.INSTALLED
soft = rdfvalue.SoftwarePackage(name="Package1", description="Desc1",
version="1", architecture="amd64",
install_state=installed)
yield soft
soft = rdfvalue.SoftwarePackage(name="Package2", description="Desc2",
version="1", architecture="i386",
install_state=installed)
yield soft
# Also yield something random so we can test return type filtering.
yield rdfvalue.StatEntry()
# Also yield an anomaly to test that.
yield rdfvalue.Anomaly(type="PARSER_ANOMALY",
symptom="could not parse gremlins.")
class MultiProvideParser(parsers.RegistryValueParser):
output_types = ["Dict"]
supported_artifacts = ["DepsProvidesMultiple"]
def Parse(self, stat, knowledge_base):
_ = stat, knowledge_base
test_dict = {"environ_temp": rdfvalue.RDFString("tempvalue"),
"environ_path": rdfvalue.RDFString("pathvalue")}
yield rdfvalue.Dict(test_dict)
class RekallMock(action_mocks.MemoryClientMock):
def __init__(self, client_id, result_filename):
self.result_filename = result_filename
self.client_id = client_id
def RekallAction(self, _):
# Generate this file with:
# rekall -r data -f win7_trial_64bit.raw pslist > rekall_pslist_result.dat
ps_list_file = os.path.join(config_lib.CONFIG["Test.data_dir"],
self.result_filename)
result = rdfvalue.RekallResponse(
json_messages=open(ps_list_file).read(10000000),
plugin="pslist",
client_urn=self.client_id)
return [result, rdfvalue.Iterator(state="FINISHED")]
class ArtifactBaseTest(test_lib.GRRBaseTest):
@classmethod
def LoadTestArtifacts(cls):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
class ArtifactTest(ArtifactBaseTest):
"""Helper class for tests using artifacts."""
def setUp(self):
super(ArtifactTest, self).setUp()
self.client_id = self.SetupClients(1)[0]
self.client_id = self.SetupClients(1)[0]
class MockClient(action_mocks.MemoryClientMock):
def WmiQuery(self, _):
return WMI_SAMPLE
def SetWindowsClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Windows"))
fd.Set(fd.Schema.OS_VERSION("6.2"))
fd.Set(fd.Schema.ARCH("AMD64"))
fd.Flush()
def UpdateCoreKBAttributes(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = fd.Get(fd.Schema.KNOWLEDGE_BASE)
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
fd.Set(fd.Schema.KNOWLEDGE_BASE, kb)
fd.Flush()
def SetLinuxClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Linux"))
fd.Set(fd.Schema.OS_VERSION("12.04"))
fd.Flush()
def SetDarwinClient(self):
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Darwin"))
fd.Set(fd.Schema.OS_VERSION("10.9"))
fd.Flush()
def MockClientMountPointsWithImage(self, image_path, fs_type="ext2"):
"""Mock the client to run off a test image.
Args:
image_path: The path to the image file.
fs_type: The filesystem in the image.
Returns:
A context manager which ensures that client actions are served off the
test image.
"""
def MockGetMountpoints():
return {"/": (image_path, fs_type)}
return utils.MultiStubber(
(client_utils_linux, "GetMountpoints", MockGetMountpoints),
(client_utils_osx, "GetMountpoints", MockGetMountpoints),
(standard, "HASH_CACHE", utils.FastStore(100)))
def RunCollectorAndGetCollection(self, artifact_list, client_mock=None,
**kw):
"""Helper to handle running the collector flow."""
if client_mock is None:
client_mock = self.MockClient(client_id=self.client_id)
output_name = "/analysis/output/%s" % int(time.time())
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock=client_mock, output=output_name,
client_id=self.client_id, artifact_list=artifact_list,
token=self.token, **kw):
pass
output_urn = self.client_id.Add(output_name)
return aff4.FACTORY.Open(output_urn, aff4_type="RDFValueCollection",
token=self.token)
class GRRArtifactTest(ArtifactTest):
def testRDFMaps(self):
"""Validate the RDFMaps."""
for rdf_name, dat in artifact.GRRArtifactMappings.rdf_map.items():
# "info/software", "InstalledSoftwarePackages", "INSTALLED_PACKAGES",
# "Append"
_, aff4_type, aff4_attribute, operator = dat
if operator not in ["Append", "Overwrite"]:
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, unknown operator %s in %s" %
(operator, rdf_name))
if aff4_type not in aff4.AFF4Object.classes:
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, invalid AFF4 Object %s in %s" %
(aff4_type, rdf_name))
attr = getattr(aff4.AFF4Object.classes[aff4_type].SchemaCls,
aff4_attribute)()
if not isinstance(attr, rdfvalue.RDFValue):
raise artifact_lib.ArtifactDefinitionError(
"Bad RDFMapping, bad attribute %s for %s" %
(aff4_attribute, rdf_name))
def testUploadArtifactYamlFile(self):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
filecontent = open(test_artifacts_file).read()
artifact.UploadArtifactYamlFile(filecontent, token=self.token)
def testUploadArtifactYamlFileMissingDoc(self):
content = """name: Nodoc
collectors:
- collector_type: GREP
args:
path_list: [/etc/blah]
content_regex_list: ["stuff"]
supported_os: [Linux]
"""
with self.assertRaises(artifact_lib.ArtifactDefinitionError):
artifact.UploadArtifactYamlFile(content, token=self.token)
def testUploadArtifactYamlFileBadList(self):
content = """name: BadList
doc: here's the doc
collectors:
- collector_type: GREP
args:
path_list: /etc/blah
content_regex_list: ["stuff"]
supported_os: [Linux]
"""
with self.assertRaises(artifact_lib.ArtifactDefinitionError):
artifact.UploadArtifactYamlFile(content, token=self.token)
class ArtifactFlowTest(ArtifactTest):
def setUp(self):
"""Make sure things are initialized."""
super(ArtifactFlowTest, self).setUp()
fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
fd.Set(fd.Schema.SYSTEM("Linux"))
kb = fd.Schema.KNOWLEDGE_BASE()
artifact.SetCoreGRRKnowledgeBaseValues(kb, fd)
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="gogol"))
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="gevulot"))
kb.MergeOrAddUser(rdfvalue.KnowledgeBaseUser(username="exomemory"))
fd.Set(kb)
fd.Flush()
self.LoadTestArtifacts()
def testCmdArtifact(self):
"""Check we can run command based artifacts and get anomalies."""
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin):
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
def communicate(self): # pylint: disable=g-bad-name
return "stdout here", "stderr here"
client_mock = self.MockClient("ExecuteCommand", client_id=self.client_id)
with utils.Stubber(subprocess, "Popen", Popen):
for _ in test_lib.TestFlowHelper(
"ArtifactCollectorFlow", client_mock, client_id=self.client_id,
store_results_in_aff4=True, use_tsk=False,
artifact_list=["TestCmdArtifact"], token=self.token):
pass
urn = self.client_id.Add("info/software")
fd = aff4.FACTORY.Open(urn, token=self.token)
packages = fd.Get(fd.Schema.INSTALLED_PACKAGES)
self.assertEqual(len(packages), 2)
self.assertEqual(packages[0].__class__.__name__, "SoftwarePackage")
with aff4.FACTORY.Open(self.client_id.Add("anomalies"),
token=self.token) as anomaly_coll:
self.assertEqual(len(anomaly_coll), 1)
self.assertTrue("gremlin" in anomaly_coll[0].symptom)
def testWMIQueryArtifact(self):
"""Check we can run WMI based artifacts."""
self.SetWindowsClient()
self.UpdateCoreKBAttributes()
self.RunCollectorAndGetCollection(["WMIInstalledSoftware"],
store_results_in_aff4=True)
urn = self.client_id.Add("info/software")
fd = aff4.FACTORY.Open(urn, token=self.token)
packages = fd.Get(fd.Schema.INSTALLED_PACKAGES)
self.assertEqual(len(packages), 3)
self.assertEqual(packages[0].description, "Google Chrome")
def testRekallPsListArtifact(self):
"""Check we can run Rekall based artifacts."""
self.SetWindowsClient()
self.CreateSignedDriver()
fd = self.RunCollectorAndGetCollection(
["RekallPsList"], RekallMock(
self.client_id, "rekall_pslist_result.dat"))
self.assertEqual(len(fd), 36)
self.assertEqual(fd[0].exe, "System")
self.assertEqual(fd[0].pid, 4)
self.assertIn("DumpIt.exe", [x.exe for x in fd])
def testRekallVadArtifact(self):
"""Check we can run Rekall based artifacts."""
# The client should now be populated with the data we care about.
with aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token) as fd:
fd.Set(fd.Schema.KNOWLEDGE_BASE(
os="Windows",
environ_systemdrive=r"c:"))
self.SetWindowsClient()
self.CreateSignedDriver()
fd = self.RunCollectorAndGetCollection(
["FullVADBinaryList"], RekallMock(
self.client_id, "rekall_vad_result.dat"))
self.assertEqual(len(fd), 1986)
self.assertEqual(fd[0].path, u"c:\\Windows\\System32\\ntdll.dll")
for x in fd:
self.assertEqual(x.pathtype, "OS")
extension = x.path.lower().split(".")[-1]
self.assertIn(extension, ["exe", "dll", "pyd", "drv", "mui", "cpl"])
def testFilesArtifact(self):
"""Check GetFiles artifacts."""
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
orig_path = art_reg["TestFilesArtifact"].collectors[0].args["path_list"]
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = (
[os.path.join(self.base_path, "auth.log")])
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock)
urn = self.client_id.Add("fs/os/").Add(self.base_path).Add("auth.log")
aff4.FACTORY.Open(urn, aff4_type="VFSBlobImage", token=self.token)
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = orig_path
def testLinuxPasswdHomedirsArtifact(self):
"""Check LinuxPasswdHomedirs artifacts."""
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
orig_path = art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"]
art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"] = [
os.path.join(self.base_path, "passwd")]
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
fd = self.RunCollectorAndGetCollection(["LinuxPasswdHomedirs"],
client_mock=client_mock)
self.assertEqual(len(fd), 3)
self.assertItemsEqual([x.username for x in fd], [u"exomemory", u"gevulot",
u"gogol"])
for user in fd:
if user.username == u"exomemory":
self.assertEqual(user.full_name, u"Never Forget (admin)")
self.assertEqual(user.gid, 47)
self.assertEqual(user.homedir, u"/var/lib/exomemory")
self.assertEqual(user.shell, u"/bin/sh")
self.assertEqual(user.uid, 46)
art_reg["LinuxPasswdHomedirs"].collectors[0].args["path_list"] = orig_path
def testArtifactOutput(self):
"""Check we can run command based artifacts."""
self.SetLinuxClient()
# Update the artifact path to point to the test directory.
art_reg = artifact_lib.ArtifactRegistry.artifacts
art_reg["TestFilesArtifact"].collectors[0].args["path_list"] = ([
os.path.join(self.base_path, "auth.log")])
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile",
"FingerprintFile", "HashBuffer",
"ListDirectory", "Find")
# Will raise if something goes wrong.
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock)
# Will raise if something goes wrong.
self.RunCollectorAndGetCollection(["TestFilesArtifact"],
client_mock=client_mock,
split_output_by_artifact=True)
# Test the on_no_results_error option.
with self.assertRaises(RuntimeError) as context:
self.RunCollectorAndGetCollection(
["NullArtifact"], client_mock=client_mock,
split_output_by_artifact=True, on_no_results_error=True)
if "collector returned 0 responses" not in str(context.exception):
raise RuntimeError("0 responses should have been returned")
class GrrKbTest(ArtifactTest):
def SetupWindowsMocks(self):
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetWindowsClient()
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.REGISTRY] = test_lib.FakeRegistryVFSHandler
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeFullVFSHandler
def testKnowledgeBaseRetrievalWindows(self):
"""Check we can retrieve a knowledge base from a client."""
self.SetupWindowsMocks()
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
# The client should now be populated with the data we care about.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.environ_systemroot, "C:\\Windows")
self.assertEqual(kb.time_zone, "US/Alaska")
self.assertEqual(kb.code_page, "cp_1252")
self.assertEqual(kb.environ_windir, "C:\\Windows")
self.assertEqual(kb.environ_allusersprofile, "C:\\Users\\All Users")
self.assertEqual(kb.environ_allusersappdata, "C:\\ProgramData")
self.assertEqual(kb.environ_temp, "C:\\Windows\\TEMP")
self.assertEqual(kb.environ_systemdrive, "C:")
self.assertItemsEqual([x.username for x in kb.users],
["jim", "kovacs"])
user = kb.GetUser(username="jim")
self.assertEqual(user.username, "jim")
self.assertEqual(user.sid, "S-1-5-21-702227068-2140022151-3110739409-1000")
def testKnowledgeBaseMultiProvides(self):
"""Check we can handle multi-provides."""
self.SetupWindowsMocks()
# Replace some artifacts with test one that will run the MultiProvideParser.
self.LoadTestArtifacts()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["DepsProvidesMultiple"])
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
# The client should now be populated with the data we care about.
client = aff4.FACTORY.Open(self.client_id, token=self.token)
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.environ_temp, "tempvalue")
self.assertEqual(kb.environ_path, "pathvalue")
def testKnowledgeBaseRetrievalFailures(self):
"""Test kb retrieval failure modes."""
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
self.assertRaises(artifact_lib.KnowledgeBaseUninitializedError,
artifact.GetArtifactKnowledgeBase, client)
kb = rdfvalue.KnowledgeBase()
kb.hostname = "test"
client.Set(client.Schema.KNOWLEDGE_BASE(kb))
client.Flush(sync=True)
self.assertRaises(artifact_lib.KnowledgeBaseAttributesMissingError,
artifact.GetArtifactKnowledgeBase, client)
def testKnowledgeBaseRetrievalDarwin(self):
"""Check we can retrieve a Darwin kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetDarwinClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["OSXUsers"])
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 10)
self.assertEqual(kb.os_minor_version, 9)
# scalzi from /Users dir listing.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["scalzi"])
user = kb.GetUser(username="scalzi")
self.assertEqual(user.homedir, "/Users/scalzi")
def testKnowledgeBaseRetrievalLinux(self):
"""Check we can retrieve a Linux kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"NetgroupConfiguration",
"LinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes", ["^login$"])
config_lib.CONFIG.Set("Artifacts.netgroup_user_blacklist", ["isaac"])
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
# user 1,2,3 from wtmp. yagharek from netgroup.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2",
"user3", "yagharek"])
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1")
def testKnowledgeBaseRetrievalLinuxPasswd(self):
"""Check we can retrieve a Linux kb."""
test_lib.ClientFixture(self.client_id, token=self.token)
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile", "Grep")
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["LinuxWtmp",
"LinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_additions", [])
config_lib.CONFIG.Set("Artifacts.knowledge_base_skip", [])
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
# user 1,2,3 from wtmp.
# Bert and Ernie not present (Users fixture overriden by kb).
self.assertItemsEqual([x.username for x in kb.users], ["user1", "user2",
"user3"])
user = kb.GetUser(username="user1")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552099)
self.assertEqual(user.homedir, "/home/user1")
user = kb.GetUser(username="user2")
self.assertEqual(user.last_logon.AsSecondsFromEpoch(), 1296552102)
self.assertEqual(user.homedir, "/home/user2")
self.assertFalse(kb.GetUser(username="buguser3"))
def testKnowledgeBaseRetrievalLinuxNoUsers(self):
"""Cause a users.username dependency failure."""
test_lib.ClientFixture(self.client_id, token=self.token)
self.SetLinuxClient()
config_lib.CONFIG.Set("Artifacts.knowledge_base",
["NetgroupConfiguration",
"NssCacheLinuxPasswdHomedirs",
"LinuxRelease"])
config_lib.CONFIG.Set("Artifacts.netgroup_filter_regexes",
["^doesntexist$"])
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.FakeTestDataVFSHandler
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
for _ in test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
require_complete=False,
client_id=self.client_id, token=self.token):
pass
client = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw")
kb = artifact.GetArtifactKnowledgeBase(client)
self.assertEqual(kb.os_major_version, 14)
self.assertEqual(kb.os_minor_version, 4)
self.assertItemsEqual([x.username for x in kb.users], [])
def testKnowledgeBaseNoOS(self):
"""Check unset OS dies."""
vfs.VFS_HANDLERS[
rdfvalue.PathSpec.PathType.OS] = test_lib.ClientVFSHandlerFixture
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory",
"FingerprintFile")
self.assertRaises(flow.FlowError, list, test_lib.TestFlowHelper(
"KnowledgeBaseInitializationFlow", client_mock,
client_id=self.client_id, token=self.token))
def testGlobRegistry(self):
"""Test that glob works on registry."""
self.SetupWindowsMocks()
client_mock = action_mocks.ActionMock("TransferBuffer", "StatFile", "Find",
"HashBuffer", "ListDirectory")
paths = ["HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT"
"\\CurrentVersion\\ProfileList\\ProfilesDirectory",
"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT"
"\\CurrentVersion\\ProfileList\\AllUsersProfile"]
for _ in test_lib.TestFlowHelper(
"Glob", client_mock, paths=paths,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY,
client_id=self.client_id, token=self.token):
pass
path = paths[0].replace("\\", "/")
fd = aff4.FACTORY.Open(self.client_id.Add("registry").Add(path),
token=self.token)
self.assertEqual(fd.__class__.__name__, "VFSFile")
self.assertEqual(fd.Get(fd.Schema.STAT).registry_data.GetValue(),
"%SystemDrive%\\Users")
def testGetDependencies(self):
"""Test that dependencies are calculated correctly."""
self.SetupWindowsMocks()
with utils.Stubber(artifact_lib.ArtifactRegistry, "artifacts", {}):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
# No dependencies
args = artifact.CollectArtifactDependenciesArgs(
artifact_list=["DepsHomedir2"])
collect_obj = artifact.CollectArtifactDependencies(None, token=self.token)
collect_obj.args = args
collect_obj.knowledge_base = None
collect_obj.state.Register("all_deps", set())
collect_obj.state.Register("awaiting_deps_artifacts", [])
collect_obj.state.Register("knowledge_base",
rdfvalue.KnowledgeBase(os="Windows"))
no_deps = collect_obj.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, [])
self.assertItemsEqual(collect_obj.state.all_deps, [])
self.assertItemsEqual(collect_obj.state.awaiting_deps_artifacts, [])
# Dependency tree with a single starting point
args = artifact.CollectArtifactDependenciesArgs(
artifact_list=["DepsHomedir"])
collect_obj.args = args
no_deps = collect_obj.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, ["DepsControlSet"])
self.assertItemsEqual(collect_obj.state.all_deps, ["environ_windir",
"users.username",
"current_control_set"])
self.assertItemsEqual(collect_obj.state.awaiting_deps_artifacts,
["DepsWindir", "DepsWindirRegex"])
def testGetKBDependencies(self):
"""Test that KB dependencies are calculated correctly."""
self.SetupWindowsMocks()
with utils.Stubber(artifact_lib.ArtifactRegistry, "artifacts", {}):
test_artifacts_file = os.path.join(
config_lib.CONFIG["Test.data_dir"], "test_artifacts.json")
artifact_lib.LoadArtifactsFromFiles([test_artifacts_file])
config_lib.CONFIG.Set("Artifacts.knowledge_base", ["DepsParent",
"DepsDesktop",
"DepsHomedir",
"DepsWindir",
"DepsWindirRegex",
"DepsControlSet",
"FakeArtifact"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_additions",
["DepsHomedir2"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_skip", ["DepsWindir"])
config_lib.CONFIG.Set("Artifacts.knowledge_base_heavyweight",
["FakeArtifact"])
args = rdfvalue.KnowledgeBaseInitializationArgs(lightweight=True)
kb_init = artifact.KnowledgeBaseInitializationFlow(None, token=self.token)
kb_init.args = args
kb_init.state.Register("all_deps", set())
kb_init.state.Register("awaiting_deps_artifacts", [])
kb_init.state.Register("knowledge_base",
rdfvalue.KnowledgeBase(os="Windows"))
no_deps = kb_init.GetFirstFlowsForCollection()
self.assertItemsEqual(no_deps, ["DepsControlSet", "DepsHomedir2"])
self.assertItemsEqual(kb_init.state.all_deps, ["users.homedir",
"users.desktop",
"users.username",
"environ_windir",
"current_control_set"])
self.assertItemsEqual(kb_init.state.awaiting_deps_artifacts,
["DepsParent", "DepsDesktop", "DepsHomedir",
"DepsWindirRegex"])
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
{
"content_hash": "06bb73fe889105ba72ba1ffeb466a962",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 80,
"avg_line_length": 41.82423435419441,
"alnum_prop": 0.6292581980261064,
"repo_name": "ojengwa/grr",
"id": "eb81a052c2acff07a9d486ad7094dd180b3ae3ff",
"size": "31473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/artifact_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "7781"
},
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "37269"
},
{
"name": "HTML",
"bytes": "30838"
},
{
"name": "JavaScript",
"bytes": "831938"
},
{
"name": "Makefile",
"bytes": "6524"
},
{
"name": "Protocol Buffer",
"bytes": "170942"
},
{
"name": "Python",
"bytes": "4652186"
},
{
"name": "Ruby",
"bytes": "1131"
},
{
"name": "Shell",
"bytes": "42248"
}
],
"symlink_target": ""
}
|
"""GitLab unused jobs collector."""
from typing import cast
from dateutil.parser import parse
from collector_utilities.functions import days_ago
from collector_utilities.type import Job
from .base import GitLabJobsBase
class GitLabUnusedJobs(GitLabJobsBase):
"""Collector class to get unused job counts from GitLab."""
def _count_job(self, job: Job) -> bool:
"""Return whether the job is unused."""
max_days = int(cast(str, self._parameter("inactive_job_days")))
return super()._count_job(job) and days_ago(parse(job["created_at"])) > max_days
|
{
"content_hash": "bed5c15c82f19c619833fb54c25ba26f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 30.68421052631579,
"alnum_prop": 0.7084048027444254,
"repo_name": "ICTU/quality-time",
"id": "f79c0fe0de66cc198a72b91a4c919125c8d5c15a",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/collector/src/source_collectors/gitlab/unused_jobs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11325"
},
{
"name": "Dockerfile",
"bytes": "7493"
},
{
"name": "Gherkin",
"bytes": "48447"
},
{
"name": "HTML",
"bytes": "1575"
},
{
"name": "JavaScript",
"bytes": "547159"
},
{
"name": "Python",
"bytes": "1386198"
},
{
"name": "Shell",
"bytes": "19321"
}
],
"symlink_target": ""
}
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,'app.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
SECRET_KEY = 'you-will-never-guess'
OPENID_PROVIDERS = [
{'name': 'Google', 'url': 'https://accounts.google.com/o/oauth2/token'},
{'name': 'Yahoo', 'url': 'https://me.yahoo.com'},
{'name': 'AOL', 'url': 'http://openid.aol.com/<username>'},
{'name': 'Flickr', 'url': 'http://www.flickr.com/<username>'},
{'name': 'MyOpenID', 'url': 'https://www.myopenid.com'}]
|
{
"content_hash": "55519caad4dc21c73d2bb384723a00ac",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 76,
"avg_line_length": 41.642857142857146,
"alnum_prop": 0.6243567753001715,
"repo_name": "ychen820/microblog",
"id": "f2b14efd0fd50225d4baf1d3b152e6765bcbbde8",
"size": "583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
from django.apps.registry import Apps
from django.db import models
from django.db.migrations.state import ProjectState, ModelState, InvalidBasesError
from django.test import TestCase
from .models import ModelWithCustomBase
class StateTests(TestCase):
"""
Tests state construction, rendering and modification by operations.
"""
def test_create(self):
"""
Tests making a ProjectState from an Apps
"""
new_apps = Apps(["migrations"])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = "migrations"
apps = new_apps
unique_together = ["name", "bio"]
index_together = ["bio", "age"]
class AuthorProxy(Author):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
ordering = ["name"]
class SubAuthor(Author):
width = models.FloatField(null=True)
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
title = models.CharField(max_length=1000)
author = models.ForeignKey(Author)
contributors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
verbose_name = "tome"
db_table = "test_tome"
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
author_proxy_state = project_state.models['migrations', 'authorproxy']
sub_author_state = project_state.models['migrations', 'subauthor']
book_state = project_state.models['migrations', 'book']
self.assertEqual(author_state.app_label, "migrations")
self.assertEqual(author_state.name, "Author")
self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertEqual(author_state.fields[2][1].null, False)
self.assertEqual(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {"unique_together": set([("name", "bio")]), "index_together": set([("bio", "age")])})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(book_state.app_label, "migrations")
self.assertEqual(book_state.name, "Book")
self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"])
self.assertEqual(book_state.fields[1][1].max_length, 1000)
self.assertEqual(book_state.fields[2][1].null, False)
self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField")
self.assertEqual(book_state.options, {"verbose_name": "tome", "db_table": "test_tome"})
self.assertEqual(book_state.bases, (models.Model, ))
self.assertEqual(author_proxy_state.app_label, "migrations")
self.assertEqual(author_proxy_state.name, "AuthorProxy")
self.assertEqual(author_proxy_state.fields, [])
self.assertEqual(author_proxy_state.options, {"proxy": True, "ordering": ["name"]})
self.assertEqual(author_proxy_state.bases, ("migrations.author", ))
self.assertEqual(sub_author_state.app_label, "migrations")
self.assertEqual(sub_author_state.name, "SubAuthor")
self.assertEqual(len(sub_author_state.fields), 2)
self.assertEqual(sub_author_state.bases, ("migrations.author", ))
def test_render(self):
"""
Tests rendering a ProjectState into an Apps.
"""
project_state = ProjectState()
project_state.add_model_state(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
project_state.add_model_state(ModelState(
"migrations",
"SubTag",
[
('tag_ptr', models.OneToOneField(
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
to='migrations.Tag',
)),
("awesome", models.BooleanField()),
],
options={},
bases=("migrations.Tag",),
))
new_apps = project_state.render()
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field_by_name("name")[0].max_length, 100)
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field_by_name("hidden")[0].null, False)
self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2)
def test_render_model_inheritance(self):
class Book(models.Model):
title = models.CharField(max_length=1000)
class Meta:
app_label = "migrations"
apps = Apps()
class Novel(Book):
class Meta:
app_label = "migrations"
apps = Apps()
# First, test rendering individually
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(Novel)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent model is in the app registry, it should be fine
ModelState.from_model(Book).render(apps)
ModelState.from_model(Novel).render(apps)
def test_render_model_with_multiple_inheritance(self):
class Foo(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class Bar(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class FooBar(Foo, Bar):
class Meta:
app_label = "migrations"
apps = Apps()
class AbstractSubFooBar(FooBar):
class Meta:
abstract = True
apps = Apps()
class SubFooBar(AbstractSubFooBar):
class Meta:
app_label = "migrations"
apps = Apps()
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(FooBar)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent models are in the app registry, it should be fine
ModelState.from_model(Foo).render(apps)
self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model])
ModelState.from_model(Bar).render(apps)
self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model])
ModelState.from_model(FooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar'])
ModelState.from_model(SubFooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar'])
def test_render_project_dependencies(self):
"""
Tests that the ProjectState render method correctly renders models
to account for inter-model base dependencies.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class B(A):
class Meta:
app_label = "migrations"
apps = new_apps
class C(B):
class Meta:
app_label = "migrations"
apps = new_apps
class D(A):
class Meta:
app_label = "migrations"
apps = new_apps
class E(B):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
class F(D):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(A))
project_state.add_model_state(ModelState.from_model(B))
project_state.add_model_state(ModelState.from_model(C))
project_state.add_model_state(ModelState.from_model(D))
project_state.add_model_state(ModelState.from_model(E))
project_state.add_model_state(ModelState.from_model(F))
final_apps = project_state.render()
self.assertEqual(len(final_apps.get_models()), 6)
# Now make an invalid ProjectState and make sure it fails
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(A))
project_state.add_model_state(ModelState.from_model(B))
project_state.add_model_state(ModelState.from_model(C))
project_state.add_model_state(ModelState.from_model(F))
with self.assertRaises(InvalidBasesError):
project_state.render()
def test_equality(self):
"""
Tests that == and != are implemented correctly.
"""
# Test two things that should be equal
project_state = ProjectState()
project_state.add_model_state(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
other_state = project_state.clone()
self.assertEqual(project_state, project_state)
self.assertEqual(project_state, other_state)
self.assertEqual(project_state != project_state, False)
self.assertEqual(project_state != other_state, False)
# Make a very small change (max_len 99) and see if that affects it
project_state = ProjectState()
project_state.add_model_state(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=99)),
("hidden", models.BooleanField()),
],
{},
None,
))
self.assertNotEqual(project_state, other_state)
self.assertEqual(project_state == other_state, False)
def test_dangling_references_throw_error(self):
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author)
class Meta:
app_label = "migrations"
apps = new_apps
class Magazine(models.Model):
authors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(Author))
project_state.add_model_state(ModelState.from_model(Book))
project_state.add_model_state(ModelState.from_model(Magazine))
rendered_state = project_state.render()
self.assertEqual(len(rendered_state.get_models()), 3)
# now make an invalid one with a ForeignKey
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(Book))
with self.assertRaises(ValueError):
rendered_state = project_state.render()
# and another with ManyToManyField
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(Magazine))
with self.assertRaises(ValueError):
rendered_state = project_state.render()
def test_real_apps(self):
"""
Tests that including real apps can resolve dangling FK errors.
This test relies on the fact that contenttypes is always loaded.
"""
new_apps = Apps()
class TestModel(models.Model):
ct = models.ForeignKey("contenttypes.ContentType")
class Meta:
app_label = "migrations"
apps = new_apps
# If we just stick it into an empty state it should fail
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(TestModel))
with self.assertRaises(ValueError):
project_state.render()
# If we include the real app it should succeed
project_state = ProjectState(real_apps=["contenttypes"])
project_state.add_model_state(ModelState.from_model(TestModel))
rendered_state = project_state.render()
self.assertEqual(
len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]),
1,
)
def test_ignore_order_wrt(self):
"""
Makes sure ProjectState doesn't include OrderWrt fields when
making from existing models.
"""
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author)
class Meta:
app_label = "migrations"
apps = new_apps
order_with_respect_to = "author"
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model_state(ModelState.from_model(Author))
project_state.add_model_state(ModelState.from_model(Book))
self.assertEqual(
[name for name, field in project_state.models["migrations", "book"].fields],
["id", "author"],
)
class ModelStateTests(TestCase):
def test_custom_model_base(self):
state = ModelState.from_model(ModelWithCustomBase)
self.assertEqual(state.bases, (models.Model,))
def test_bound_field_sanity_check(self):
field = models.CharField(max_length=1)
field.model = models.Model
with self.assertRaisesMessage(ValueError,
'ModelState.fields cannot be bound to a model - "field" is.'):
ModelState('app', 'Model', [('field', field)])
def test_fields_immutability(self):
"""
Tests that rendering a model state doesn't alter its internal fields.
"""
apps = Apps()
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)])
Model = state.render(apps)
self.assertNotEqual(Model._meta.get_field('name'), field)
def test_repr(self):
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C'])
self.assertEqual(repr(state), "<ModelState: 'app.Model'>")
project_state = ProjectState()
project_state.add_model_state(state)
with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"):
project_state.render()
|
{
"content_hash": "793ed65c88acbaeb50d59ba6316fbbdf",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 132,
"avg_line_length": 36.61805555555556,
"alnum_prop": 0.575384031860421,
"repo_name": "dhoffman34/django",
"id": "ebeaffb7873827e0cd0162e689854ff46d9268dd",
"size": "15819",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/migrations/test_state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""DBCore is an abstract database package that forms the basis for beets'
Library.
"""
from __future__ import absolute_import
from .db import Model, Database
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
from .queryparse import parse_sorted_query
from .query import InvalidQueryError
# flake8: noqa
|
{
"content_hash": "ddf1790ff5384d2045ce8c306313c91e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.7939814814814815,
"repo_name": "kareemallen/beets",
"id": "059f1fc4f1bad3134303d3aa5c1600331fff96c5",
"size": "1103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "beets/dbcore/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1642185"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
import os
import sys
import stat
import operator
import struct
import shutil
#from modulegraph.util import *
from macholib import mach_o
MAGIC = [
struct.pack('!L', getattr(mach_o, 'MH_' + _))
for _ in ['MAGIC', 'CIGAM', 'MAGIC_64', 'CIGAM_64']
]
FAT_MAGIC_BYTES = struct.pack('!L', mach_o.FAT_MAGIC)
MAGIC_LEN = 4
STRIPCMD = ['/usr/bin/strip', '-x', '-S', '-']
try:
unicode
except NameError:
unicode = str
def fsencoding(s, encoding=sys.getfilesystemencoding()):
"""
Ensure the given argument is in filesystem encoding (not unicode)
"""
if isinstance(s, unicode):
s = s.encode(encoding)
return s
def move(src, dst):
"""
move that ensures filesystem encoding of paths
"""
shutil.move(fsencoding(src), fsencoding(dst))
def copy2(src, dst):
"""
copy2 that ensures filesystem encoding of paths
"""
shutil.copy2(fsencoding(src), fsencoding(dst))
def flipwritable(fn, mode=None):
"""
Flip the writability of a file and return the old mode. Returns None
if the file is already writable.
"""
if os.access(fn, os.W_OK):
return None
old_mode = os.stat(fn).st_mode
os.chmod(fn, stat.S_IWRITE | old_mode)
return old_mode
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file
"""
def __init__(self, fileobj, start, size):
self._fileobj = fileobj
self._start = start
self._end = start + size
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._fileobj.tell() - self._start
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == 0:
seekto += self._start
elif whence == 1:
seekto += self._fileobj.tell()
elif whence == 2:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
def write(self, bytes):
here = self._fileobj.tell()
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.write(bytes)
def read(self, size=sys.maxsize):
if size < 0:
raise ValueError("Invalid size %s while reading from %s", size, self._fileobj)
here = self._fileobj.tell()
self._checkwindow(here, 'read')
bytes = min(size, self._end - here)
return self._fileobj.read(bytes)
def mergecopy(src, dest):
"""
copy2, but only if the destination isn't up to date
"""
if os.path.exists(dest) and os.stat(dest).st_mtime >= os.stat(src).st_mtime:
return
copy2(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy, srcbase=None):
"""
Recursively merge a directory tree using mergecopy().
"""
src = fsencoding(src)
dst = fsencoding(dst)
if srcbase is None:
srcbase = src
names = map(fsencoding, os.listdir(src))
try:
os.makedirs(dst)
except OSError:
pass
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if condition is not None and not condition(srcname):
continue
try:
if os.path.islink(srcname):
# XXX: This is naive at best, should check srcbase(?)
realsrc = os.readlink(srcname)
os.symlink(realsrc, dstname)
elif os.path.isdir(srcname):
mergetree(srcname, dstname,
condition=condition, copyfn=copyfn, srcbase=srcbase)
else:
copyfn(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, why))
if errors:
raise IOError(errors)
def sdk_normalize(filename):
"""
Normalize a path to strip out the SDK portion, normally so that it
can be decided whether it is in a system path or not.
"""
if filename.startswith('/Developer/SDKs/'):
pathcomp = filename.split('/')
del pathcomp[1:4]
filename = '/'.join(pathcomp)
return filename
NOT_SYSTEM_FILES=[]
def in_system_path(filename):
"""
Return True if the file is in a system path
"""
fn = sdk_normalize(os.path.realpath(filename))
if fn.startswith('/usr/local/'):
return False
elif fn.startswith('/System/') or fn.startswith('/usr/'):
if fn in NOT_SYSTEM_FILES:
return False
return True
else:
return False
def has_filename_filter(module):
"""
Return False if the module does not have a filename attribute
"""
return getattr(module, 'filename', None) is not None
def get_magic():
"""
Get a list of valid Mach-O header signatures, not including the fat header
"""
return MAGIC
def is_platform_file(path):
"""
Return True if the file is Mach-O
"""
if not os.path.exists(path) or os.path.islink(path):
return False
# If the header is fat, we need to read into the first arch
with open(path, 'rb') as fileobj:
bytes = fileobj.read(MAGIC_LEN)
if bytes == FAT_MAGIC_BYTES:
# Read in the fat header
fileobj.seek(0)
header = mach_o.fat_header.from_fileobj(fileobj, _endian_='>')
if header.nfat_arch < 1:
return False
# Read in the first fat arch header
arch = mach_o.fat_arch.from_fileobj(fileobj, _endian_='>')
fileobj.seek(arch.offset)
# Read magic off the first header
bytes = fileobj.read(MAGIC_LEN)
for magic in MAGIC:
if bytes == magic:
return True
return False
def iter_platform_files(dst):
"""
Walk a directory and yield each full path that is a Mach-O file
"""
for root, dirs, files in os.walk(dst):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = sum([len(s) + 1 for s in cmd])
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
|
{
"content_hash": "2be71d854b0909bbea72e28e816dea91",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 90,
"avg_line_length": 28.820408163265306,
"alnum_prop": 0.5749893782750318,
"repo_name": "lovexiaov/SandwichApp",
"id": "7f954b6f789d42f33251d91ae56db2d29523b214",
"size": "7061",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/macholib/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "82490"
},
{
"name": "Objective-C",
"bytes": "50940"
},
{
"name": "Python",
"bytes": "1603870"
},
{
"name": "Shell",
"bytes": "5629"
}
],
"symlink_target": ""
}
|
"""
Provides a ``CustomFormatter`` and ``CustomColoredFormatter`` which are enable
to insert ANSI color codes.
"""
from __future__ import absolute_import
import logging
__all__ = [
'CustomFormatter',
'CustomColoredFormatter',
]
# The background is set with 40 plus the number of the color, and the foreground with 30
RED, YELLOW, BLUE, WHITE = 1, 3, 4, 7
# These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
FORMAT = "{color}{levelname}$RESET:$BOLD{name}$RESET] {message}"
def insert_seqs(message):
return message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
def remove_seqs(message):
return message.replace("$RESET", "").replace("$BOLD", "")
COLORS = {
'DEBUG' : BLUE,
'INFO' : WHITE,
'WARNING' : YELLOW,
'ERROR' : RED,
'CRITICAL' : RED,
}
class CustomFormatter(logging.Formatter):
def __init__(self, fmt=remove_seqs(FORMAT), datefmt=None):
logging.Formatter.__init__(self, fmt, datefmt)
def format(self, record):
if not hasattr(record, "message"):
record.message = record.getMessage()
record.asctime = self.formatTime(record, self.datefmt)
return self._fmt.format(color="", **record.__dict__)
class CustomColoredFormatter(CustomFormatter):
def __init__(self, fmt=insert_seqs(FORMAT), datefmt=None, use_color=True):
CustomFormatter.__init__(self, fmt, datefmt)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
record.color = COLOR_SEQ % (30 + COLORS[levelname])
else:
record.color = ""
if not hasattr(record, "message"):
record.message = record.getMessage()
record.asctime = self.formatTime(record, self.datefmt)
return self._fmt.format(**record.__dict__)
|
{
"content_hash": "d18c672ecf3852bba152c651aae3094b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 88,
"avg_line_length": 30.80952380952381,
"alnum_prop": 0.6347243688820196,
"repo_name": "kreczko/rootpy",
"id": "82f3c750bcb443183f43fd96b01319467453e6fd",
"size": "1941",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rootpy/logger/formatter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "109"
},
{
"name": "Makefile",
"bytes": "2778"
},
{
"name": "Python",
"bytes": "861080"
},
{
"name": "Shell",
"bytes": "3089"
}
],
"symlink_target": ""
}
|
"""Utilities for working with ``Future`` objects.
Tornado previously provided its own ``Future`` class, but now uses
`asyncio.Future`. This module contains utility functions for working
with `asyncio.Future` in a way that is backwards-compatible with
Tornado's old ``Future`` implementation.
While this module is an important part of Tornado's internal
implementation, applications rarely need to interact with it
directly.
"""
import asyncio
from concurrent import futures
import functools
import sys
import types
from tornado.log import app_log
import typing
from typing import Any, Callable, Optional, Tuple, Union
_T = typing.TypeVar("_T")
class ReturnValueIgnoredError(Exception):
# No longer used; was previously used by @return_future
pass
Future = asyncio.Future
FUTURES = (futures.Future, Future)
def is_future(x: Any) -> bool:
return isinstance(x, FUTURES)
class DummyExecutor(futures.Executor):
def submit(
self, fn: Callable[..., _T], *args: Any, **kwargs: Any
) -> "futures.Future[_T]":
future = futures.Future() # type: futures.Future[_T]
try:
future_set_result_unless_cancelled(future, fn(*args, **kwargs))
except Exception:
future_set_exc_info(future, sys.exc_info())
return future
def shutdown(self, wait: bool = True) -> None:
pass
dummy_executor = DummyExecutor()
def run_on_executor(*args: Any, **kwargs: Any) -> Callable:
"""Decorator to run a synchronous method asynchronously on an executor.
Returns a future.
The executor to be used is determined by the ``executor``
attributes of ``self``. To use a different attribute name, pass a
keyword argument to the decorator::
@run_on_executor(executor='_thread_pool')
def foo(self):
pass
This decorator should not be confused with the similarly-named
`.IOLoop.run_in_executor`. In general, using ``run_in_executor``
when *calling* a blocking method is recommended instead of using
this decorator when *defining* a method. If compatibility with older
versions of Tornado is required, consider defining an executor
and using ``executor.submit()`` at the call site.
.. versionchanged:: 4.2
Added keyword arguments to use alternative attributes.
.. versionchanged:: 5.0
Always uses the current IOLoop instead of ``self.io_loop``.
.. versionchanged:: 5.1
Returns a `.Future` compatible with ``await`` instead of a
`concurrent.futures.Future`.
.. deprecated:: 5.1
The ``callback`` argument is deprecated and will be removed in
6.0. The decorator itself is discouraged in new code but will
not be removed in 6.0.
.. versionchanged:: 6.0
The ``callback`` argument was removed.
"""
# Fully type-checking decorators is tricky, and this one is
# discouraged anyway so it doesn't have all the generic magic.
def run_on_executor_decorator(fn: Callable) -> Callable[..., Future]:
executor = kwargs.get("executor", "executor")
@functools.wraps(fn)
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Future:
async_future = Future() # type: Future
conc_future = getattr(self, executor).submit(fn, self, *args, **kwargs)
chain_future(conc_future, async_future)
return async_future
return wrapper
if args and kwargs:
raise ValueError("cannot combine positional and keyword args")
if len(args) == 1:
return run_on_executor_decorator(args[0])
elif len(args) != 0:
raise ValueError("expected 1 argument, got %d", len(args))
return run_on_executor_decorator
_NO_RESULT = object()
def chain_future(a: "Future[_T]", b: "Future[_T]") -> None:
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``, unless
``b`` has already been completed or cancelled by the time ``a`` finishes.
.. versionchanged:: 5.0
Now accepts both Tornado/asyncio `Future` objects and
`concurrent.futures.Future`.
"""
def copy(future: "Future[_T]") -> None:
assert future is a
if b.done():
return
if hasattr(a, "exc_info") and a.exc_info() is not None: # type: ignore
future_set_exc_info(b, a.exc_info()) # type: ignore
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
if isinstance(a, Future):
future_add_done_callback(a, copy)
else:
# concurrent.futures.Future
from tornado.ioloop import IOLoop
IOLoop.current().add_future(a, copy)
def future_set_result_unless_cancelled(
future: "Union[futures.Future[_T], Future[_T]]", value: _T
) -> None:
"""Set the given ``value`` as the `Future`'s result, if not cancelled.
Avoids ``asyncio.InvalidStateError`` when calling ``set_result()`` on
a cancelled `asyncio.Future`.
.. versionadded:: 5.0
"""
if not future.cancelled():
future.set_result(value)
def future_set_exception_unless_cancelled(
future: "Union[futures.Future[_T], Future[_T]]", exc: BaseException
) -> None:
"""Set the given ``exc`` as the `Future`'s exception.
If the Future is already canceled, logs the exception instead. If
this logging is not desired, the caller should explicitly check
the state of the Future and call ``Future.set_exception`` instead of
this wrapper.
Avoids ``asyncio.InvalidStateError`` when calling ``set_exception()`` on
a cancelled `asyncio.Future`.
.. versionadded:: 6.0
"""
if not future.cancelled():
future.set_exception(exc)
else:
app_log.error("Exception after Future was cancelled", exc_info=exc)
def future_set_exc_info(
future: "Union[futures.Future[_T], Future[_T]]",
exc_info: Tuple[
Optional[type], Optional[BaseException], Optional[types.TracebackType]
],
) -> None:
"""Set the given ``exc_info`` as the `Future`'s exception.
Understands both `asyncio.Future` and the extensions in older
versions of Tornado to enable better tracebacks on Python 2.
.. versionadded:: 5.0
.. versionchanged:: 6.0
If the future is already cancelled, this function is a no-op.
(previously ``asyncio.InvalidStateError`` would be raised)
"""
if exc_info[1] is None:
raise Exception("future_set_exc_info called with no exception")
future_set_exception_unless_cancelled(future, exc_info[1])
@typing.overload
def future_add_done_callback(
future: "futures.Future[_T]", callback: Callable[["futures.Future[_T]"], None]
) -> None:
pass
@typing.overload # noqa: F811
def future_add_done_callback(
future: "Future[_T]", callback: Callable[["Future[_T]"], None]
) -> None:
pass
def future_add_done_callback( # noqa: F811
future: "Union[futures.Future[_T], Future[_T]]", callback: Callable[..., None]
) -> None:
"""Arrange to call ``callback`` when ``future`` is complete.
``callback`` is invoked with one argument, the ``future``.
If ``future`` is already done, ``callback`` is invoked immediately.
This may differ from the behavior of ``Future.add_done_callback``,
which makes no such guarantee.
.. versionadded:: 5.0
"""
if future.done():
callback(future)
else:
future.add_done_callback(callback)
|
{
"content_hash": "ce488ed6e3ec80b0398c74922b07bd58",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 83,
"avg_line_length": 30.2570281124498,
"alnum_prop": 0.6529068224050969,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "7638fcfc988b65b287c016a66995ad6bb2b0f9e2",
"size": "8108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/tornado/concurrent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
}
|
import json
from http.client import HTTPSConnection
from paytrail import common
class AuthorizationNotDoneError(BaseException): pass
class ConnectAPI:
APINAME='PaytrailConnectAPI'
def __init__(self, apiKey, secret, apiLocation='account.paytrail.com:443', connectionMethod=HTTPSConnection, location=''):
self.apiLocation = apiLocation
self.apiKey = apiKey
self.connectionMethod = connectionMethod
self.location = location
if type(secret) == str:
self.secret = secret.encode('ascii')
else:
self.secret = secret
def _dial(self, method, location, body):
'''Make the authorization HTTP headers and call the API URL.
Returns the standard HTTPResponse object.'''
headers = common.makeHeaders(self.APINAME, common.makeTimestamp(), self.apiKey, self.secret, method, location, body)
conn = self.connectionMethod(self.apiLocation)
conn.request(method, location, body, headers)
return conn.getresponse()
def _requireLocation(self):
if self.location == "":
raise AuthorizationNotDoneError()
def authorize(self, authKey, locale='fi_FI', location='/connectapi/authorizations',
authType='email+smspin', access=['charge', 'deliveryAddress'], validity='singleCharge'):
'''Make an authorize call. The location attribute has to be populated for the other calls
to work, and this call populates it based on the reply value.'''
body = json.dumps({
'authKey': authKey,
'authType': authType,
'access': access,
'validity': validity,
'locale': locale,
})
response = self._dial('POST', location, body)
self.location = response.getheader('Location')
return {'status': response.status, 'reason': response.reason}
def confirmPin(self, authSecret):
'''Confirms the PIN sent by the previous authorize call.'''
self._requireLocation()
body = json.dumps({'authSecret': authSecret})
location = self.location + '/confirmation'
response = self._dial('POST', location, body)
return {'status': response.status, 'reason': response.reason}
def getAddresses(self):
'''Get the addresses linked to the user.'''
self._requireLocation()
response = self._dial('GET', self.location + '/deliveryAddresses', '')
data = json.loads(response.read().decode('utf8'))
return {'status': response.status, 'reason': response.reason, 'addresses': data['deliveryAddresses']}
def charge(self, orderData):
'''Sends the order and charges the user's linked credit card'''
self._requireLocation()
body = json.dumps({'payment': orderData})
response = self._dial('POST', self.location + '/charges', body)
data = response.read().decode('utf8')
if data:
data = json.loads(data)
return {'status': response.status, 'reason': response.reason, 'payment': data['payment']}
else:
return {'status': response.status, 'reason': response.reason}
|
{
"content_hash": "70ecaebf7a08fa42f477997132bcfcf0",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 126,
"avg_line_length": 35.98863636363637,
"alnum_prop": 0.633722766024629,
"repo_name": "paytrail/paytrail",
"id": "69113c01c6803c0c2c71b9b48998f16c6563c09f",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/paytrail/connectapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9909"
}
],
"symlink_target": ""
}
|
import email.parser
import itertools
import random
import time
import unittest
from collections import defaultdict
from contextlib import contextmanager
import json
from hashlib import md5
import mock
from eventlet import Timeout
from six import BytesIO
from six.moves import range
import swift
from swift.common import utils, swob, exceptions
from swift.common.header_key_dict import HeaderKeyDict
from swift.proxy import server as proxy_server
from swift.proxy.controllers import obj
from swift.proxy.controllers.base import get_info as _real_get_info
from swift.common.storage_policy import POLICIES, ECDriverError, StoragePolicy
from test.unit import FakeRing, FakeMemcache, fake_http_connect, \
debug_logger, patch_policies, SlowBody, FakeStatus
from test.unit.proxy.test_server import node_error_count
def unchunk_body(chunked_body):
body = ''
remaining = chunked_body
while remaining:
hex_length, remaining = remaining.split('\r\n', 1)
length = int(hex_length, 16)
body += remaining[:length]
remaining = remaining[length + 2:]
return body
@contextmanager
def set_http_connect(*args, **kwargs):
old_connect = swift.proxy.controllers.base.http_connect
new_connect = fake_http_connect(*args, **kwargs)
try:
swift.proxy.controllers.base.http_connect = new_connect
swift.proxy.controllers.obj.http_connect = new_connect
swift.proxy.controllers.account.http_connect = new_connect
swift.proxy.controllers.container.http_connect = new_connect
yield new_connect
left_over_status = list(new_connect.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
finally:
swift.proxy.controllers.base.http_connect = old_connect
swift.proxy.controllers.obj.http_connect = old_connect
swift.proxy.controllers.account.http_connect = old_connect
swift.proxy.controllers.container.http_connect = old_connect
class PatchedObjControllerApp(proxy_server.Application):
"""
This patch is just a hook over the proxy server's __call__ to ensure
that calls to get_info will return the stubbed value for
container_info if it's a container info call.
"""
container_info = {}
per_container_info = {}
def __call__(self, *args, **kwargs):
def _fake_get_info(app, env, account, container=None, **kwargs):
if container:
if container in self.per_container_info:
return self.per_container_info[container]
return self.container_info
else:
return _real_get_info(app, env, account, container, **kwargs)
mock_path = 'swift.proxy.controllers.base.get_info'
with mock.patch(mock_path, new=_fake_get_info):
return super(
PatchedObjControllerApp, self).__call__(*args, **kwargs)
class BaseObjectControllerMixin(object):
container_info = {
'write_acl': None,
'read_acl': None,
'storage_policy': None,
'sync_key': None,
'versions': None,
}
# this needs to be set on the test case
controller_cls = None
def setUp(self):
# setup fake rings with handoffs
for policy in POLICIES:
policy.object_ring.max_more_nodes = policy.object_ring.replicas
self.logger = debug_logger('proxy-server')
self.logger.thread_locals = ('txn1', '127.0.0.2')
self.app = PatchedObjControllerApp(
None, FakeMemcache(), account_ring=FakeRing(),
container_ring=FakeRing(), logger=self.logger)
# you can over-ride the container_info just by setting it on the app
self.app.container_info = dict(self.container_info)
# default policy and ring references
self.policy = POLICIES.default
self.obj_ring = self.policy.object_ring
self._ts_iter = (utils.Timestamp(t) for t in
itertools.count(int(time.time())))
def ts(self):
return next(self._ts_iter)
def replicas(self, policy=None):
policy = policy or POLICIES.default
return policy.object_ring.replicas
def quorum(self, policy=None):
policy = policy or POLICIES.default
return policy.quorum
def test_iter_nodes_local_first_noops_when_no_affinity(self):
# this test needs a stable node order - most don't
self.app.sort_nodes = lambda l: l
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = None
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
self.maxDiff = None
self.assertEqual(all_nodes, local_first_nodes)
def test_iter_nodes_local_first_moves_locals_first(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
# we'll write to one more than replica count local nodes
self.app.write_affinity_node_count = lambda r: r + 1
object_ring = self.app.get_object_ring(None)
# make our fake ring have plenty of nodes, and not get limited
# artificially by the proxy max request node count
object_ring.max_more_nodes = 100000
# nothing magic about * 2 + 3, just a way to make it bigger
self.app.request_node_count = lambda r: r * 2 + 3
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
# limit to the number we're going to look at in this request
nodes_requested = self.app.request_node_count(object_ring.replicas)
all_nodes = all_nodes[:nodes_requested]
# make sure we have enough local nodes (sanity)
all_local_nodes = [n for n in all_nodes if
self.app.write_affinity_is_local_fn(n)]
self.assertTrue(len(all_local_nodes) >= self.replicas() + 1)
# finally, create the local_first_nodes iter and flatten it out
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# the local nodes move up in the ordering
self.assertEqual([1] * (self.replicas() + 1), [
node['region'] for node in local_first_nodes[
:self.replicas() + 1]])
# we don't skip any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_iter_nodes_local_first_best_effort(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
self.app.write_affinity_is_local_fn = (
lambda node: node['region'] == 1)
object_ring = self.app.get_object_ring(None)
all_nodes = object_ring.get_part_nodes(1)
all_nodes.extend(object_ring.get_more_nodes(1))
local_first_nodes = list(controller.iter_nodes_local_first(
object_ring, 1))
# we won't have quite enough local nodes...
self.assertEqual(len(all_nodes), self.replicas() +
POLICIES.default.object_ring.max_more_nodes)
all_local_nodes = [n for n in all_nodes if
self.app.write_affinity_is_local_fn(n)]
self.assertEqual(len(all_local_nodes), self.replicas())
# but the local nodes we do have are at the front of the local iter
first_n_local_first_nodes = local_first_nodes[:len(all_local_nodes)]
self.assertEqual(sorted(all_local_nodes),
sorted(first_n_local_first_nodes))
# but we *still* don't *skip* any nodes
self.assertEqual(len(all_nodes), len(local_first_nodes))
self.assertEqual(sorted(all_nodes), sorted(local_first_nodes))
def test_connect_put_node_timeout(self):
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
self.app.conn_timeout = 0.05
with set_http_connect(slow_connect=True):
nodes = [dict(ip='', port='', device='')]
res = controller._connect_put_node(nodes, '', '', {}, ('', ''))
self.assertTrue(res is None)
def test_DELETE_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_missing_one(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] + [204] * (self.replicas() - 1)
random.shuffle(codes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_not_found(self):
# Obviously this test doesn't work if we're testing 1 replica.
# In that case, we don't have any failovers to check.
if self.replicas() == 1:
return
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [404] * (self.replicas() - 1) + [204]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_mostly_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_204s = [204] * self.quorum()
codes = mostly_204s + [404] * (self.replicas() - len(mostly_204s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_mostly_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
mostly_404s = [404] * self.quorum()
codes = mostly_404s + [204] * (self.replicas() - len(mostly_404s))
self.assertEqual(len(codes), self.replicas())
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_DELETE_half_not_found_statuses(self):
self.obj_ring.set_replicas(4)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(404, 204, 404, 204):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_DELETE_half_not_found_headers_and_body(self):
# Transformed responses have bogus bodies and headers, so make sure we
# send the client headers and body from a real node's response.
self.obj_ring.set_replicas(4)
status_codes = (404, 404, 204, 204)
bodies = ('not found', 'not found', '', '')
headers = [{}, {}, {'Pick-Me': 'yes'}, {'Pick-Me': 'yes'}]
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
with set_http_connect(*status_codes, body_iter=bodies,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('Pick-Me'), 'yes')
self.assertEqual(resp.body, '')
def test_DELETE_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='DELETE')
codes = [204] * self.replicas()
with set_http_connect(507, *codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 204)
def test_POST_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_PUT_non_int_delete_after(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-After', resp.body)
def test_POST_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_PUT_negative_delete_after(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '-60'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-After in past', resp.body)
def test_POST_delete_at_non_integer(self):
t = str(int(time.time() + 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_PUT_delete_at_non_integer(self):
t = str(int(time.time() - 100)) + '.1'
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('Non-integer X-Delete-At', resp.body)
def test_POST_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_PUT_delete_at_in_past(self):
t = str(int(time.time() - 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual('X-Delete-At in past', resp.body)
def test_HEAD_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_HEAD_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
with set_http_connect(*([200] * self.replicas())):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_HEAD_x_newest_different_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-timestamp'], newest_timestamp.normal)
def test_HEAD_x_newest_with_two_vector_timestamps(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(time.time(), offset=offset)
for offset in itertools.count())
timestamps = [next(ts) for i in range(self.replicas())]
newest_timestamp = timestamps[-1]
random.shuffle(timestamps)
backend_response_headers = [{
'X-Backend-Timestamp': t.internal,
'X-Timestamp': t.normal
} for t in timestamps]
with set_http_connect(*([200] * self.replicas()),
headers=backend_response_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-backend-timestamp'],
newest_timestamp.internal)
def test_HEAD_x_newest_with_some_missing(self):
req = swob.Request.blank('/v1/a/c/o', method='HEAD',
headers={'X-Newest': 'true'})
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
request_count = self.app.request_node_count(self.obj_ring.replicas)
backend_response_headers = [{
'x-timestamp': next(ts).normal,
} for i in range(request_count)]
responses = [404] * (request_count - 1)
responses.append(200)
request_log = []
def capture_requests(ip, port, device, part, method, path,
headers=None, **kwargs):
req = {
'ip': ip,
'port': port,
'device': device,
'part': part,
'method': method,
'path': path,
'headers': headers,
}
request_log.append(req)
with set_http_connect(*responses,
headers=backend_response_headers,
give_connect=capture_requests):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for req in request_log:
self.assertEqual(req['method'], 'HEAD')
self.assertEqual(req['path'], '/a/c/o')
def test_container_sync_delete(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
req = swob.Request.blank(
'/v1/a/c/o', method='DELETE', headers={
'X-Timestamp': next(ts).internal})
codes = [409] * self.obj_ring.replicas
ts_iter = itertools.repeat(next(ts).internal)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 409)
def test_PUT_requires_length(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 411)
def test_container_update_backend_requests(self):
for policy in POLICIES:
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='PUT',
headers={'Content-Length': '0',
'X-Backend-Storage-Policy-Index': int(policy)})
controller = self.controller_cls(self.app, 'a', 'c', 'o')
# This is the number of container updates we're doing, simulating
# 1 to 15 container replicas.
for num_containers in range(1, 16):
containers = [{'ip': '1.0.0.%s' % i,
'port': '60%s' % str(i).zfill(2),
'device': 'sdb'} for i in range(num_containers)]
backend_headers = controller._backend_requests(
req, self.replicas(policy), 1, containers)
# how many of the backend headers have a container update
container_updates = len(
[headers for headers in backend_headers
if 'X-Container-Partition' in headers])
if num_containers <= self.quorum(policy):
# filling case
expected = min(self.quorum(policy) + 1,
self.replicas(policy))
else:
# container updates >= object replicas
expected = min(num_containers,
self.replicas(policy))
self.assertEqual(container_updates, expected)
# end of BaseObjectControllerMixin
@patch_policies()
class TestReplicatedObjController(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_txn_id_logging_on_PUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
req.headers['content-length'] = '0'
# we capture stdout since the debug log formatter prints the formatted
# message to stdout
stdout = BytesIO()
with set_http_connect((100, Timeout()), 503, 503), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get final status of PUT to',
stdout.getvalue())
def test_PUT_empty_bad_etag(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['Content-Length'] = '0'
req.headers['Etag'] = '"catbus"'
# The 2-tuple here makes getexpect() return 422, not 100. For
# objects that are >0 bytes, you get a 100 Continue and then a 422
# Unprocessable Entity after sending the body. For zero-byte
# objects, though, you get the 422 right away.
codes = [FakeStatus((422, 422))
for _junk in range(self.replicas())]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 422)
def test_PUT_if_none_match(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_if_none_match_denied(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = '*'
req.headers['content-length'] = '0'
with set_http_connect(201, 412, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_PUT_if_none_match_not_star(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['if-none-match'] = 'somethingelse'
req.headers['content-length'] = '0'
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
def test_PUT_connect_exceptions(self):
object_ring = self.app.get_object_ring(None)
self.app.sort_nodes = lambda n: n # disable shuffle
def test_status_map(statuses, expected):
self.app._error_limiting = {}
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
with set_http_connect(*statuses):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, expected)
base_status = [201] * 3
# test happy path
test_status_map(list(base_status), 201)
for i in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[i]), 0)
# single node errors and test isolation
for i in range(3):
status_list = list(base_status)
status_list[i] = 503
test_status_map(status_list, 201)
for j in range(3):
self.assertEqual(node_error_count(
self.app, object_ring.devs[j]), 1 if j == i else 0)
# connect errors
test_status_map((201, Timeout(), 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[1]), 1)
test_status_map((Exception('kaboom!'), 201, 201, 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[0]), 1)
# expect errors
test_status_map((201, 201, (503, None), 201), 201)
self.assertEqual(node_error_count(
self.app, object_ring.devs[2]), 1)
test_status_map(((507, None), 201, 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]),
self.app.error_suppression_limit + 1)
# response errors
test_status_map(((100, Timeout()), 201, 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[0]), 1)
test_status_map((201, 201, (100, Exception())), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[2]), 1)
test_status_map((201, (100, 507), 201), 201)
self.assertEqual(
node_error_count(self.app, object_ring.devs[1]),
self.app.error_suppression_limit + 1)
def test_PUT_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadError('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
with set_http_connect(201, 201, 201):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_GET_transfer_encoding_chunked(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
with set_http_connect(200, headers={'transfer-encoding': 'chunked'}):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Transfer-Encoding'], 'chunked')
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'my-txn-id'
stdout = BytesIO()
with set_http_connect(503, 200), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
for line in stdout.getvalue().splitlines():
self.assertIn('my-txn-id', line)
self.assertIn('From Object Server', stdout.getvalue())
def test_GET_handoff(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [503] * self.obj_ring.replicas + [200]
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_not_found(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
codes = [404] * (self.obj_ring.replicas +
self.obj_ring.max_more_nodes)
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
def test_POST_as_COPY_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
get_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = get_resp + put_resp
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(req.environ['QUERY_STRING'], '')
self.assertTrue('swift.post_as_copy' in req.environ)
def test_POST_as_COPY_static_large_object(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='POST')
get_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = get_resp + put_resp
slo_headers = \
[{'X-Static-Large-Object': True}] * self.obj_ring.replicas
get_headers = slo_headers + [{}] * (len(codes) - len(slo_headers))
headers = {'headers': get_headers}
with set_http_connect(*codes, **headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(req.environ['QUERY_STRING'], '')
self.assertTrue('swift.post_as_copy' in req.environ)
def test_POST_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='POST',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
post_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'POST':
post_headers.append(headers)
x_newest_responses = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
post_resp = [200] * self.obj_ring.replicas
codes = x_newest_responses + post_resp
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(req.environ['QUERY_STRING'], '') # sanity
self.assertTrue('swift.post_as_copy' in req.environ)
for given_headers in post_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_PUT_delete_at(self):
t = str(int(time.time() + 100))
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-At': t})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'), t)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_PUT_converts_delete_after_to_delete_at(self):
req = swob.Request.blank('/v1/a/c/o', method='PUT', body='',
headers={'Content-Type': 'foo/bar',
'X-Delete-After': '60'})
put_headers = []
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
if method == 'PUT':
put_headers.append(headers)
codes = [201] * self.obj_ring.replicas
t = time.time()
with set_http_connect(*codes, give_connect=capture_headers):
with mock.patch('time.time', lambda: t):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
expected_delete_at = str(int(t) + 60)
for given_headers in put_headers:
self.assertEqual(given_headers.get('X-Delete-At'),
expected_delete_at)
self.assertTrue('X-Delete-At-Host' in given_headers)
self.assertTrue('X-Delete-At-Device' in given_headers)
self.assertTrue('X-Delete-At-Partition' in given_headers)
self.assertTrue('X-Delete-At-Container' in given_headers)
def test_container_sync_put_x_timestamp_not_found(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_container_sync_put_x_timestamp_match(self):
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
put_timestamp = utils.Timestamp(time.time()).normal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
ts_iter = itertools.repeat(put_timestamp)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_older(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
self.app.container_info['storage_policy'] = policy_index
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(next(ts).internal)
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_newer(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
orig_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = itertools.repeat(orig_timestamp)
codes = [201] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_put_x_timestamp_conflict(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [201] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_missing_backend_timestamp(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([None, None, None])
codes = [409] * self.obj_ring.replicas
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_other_weird_success_response(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(201, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_put_x_timestamp_conflict_with_if_none_match(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'If-None-Match': '*',
'X-Timestamp': next(ts).internal})
ts_iter = iter([next(ts).internal, None, None])
codes = [409] + [(412, 'notused')] * (self.obj_ring.replicas - 1)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 412)
def test_container_sync_put_x_timestamp_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# object nodes they respond 409 because another in-flight request
# finished and now the on disk timestamp is equal to the request.
put_ts = [put_timestamp] * self.obj_ring.replicas
codes = [409] * self.obj_ring.replicas
ts_iter = iter(put_ts)
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_container_sync_put_x_timestamp_unsynced_race(self):
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
test_indexes = [None] + [int(p) for p in POLICIES]
for policy_index in test_indexes:
put_timestamp = next(ts).internal
req = swob.Request.blank(
'/v1/a/c/o', method='PUT', headers={
'Content-Length': 0,
'X-Timestamp': put_timestamp})
# only one in-flight request finished
put_ts = [None] * (self.obj_ring.replicas - 1)
put_resp = [201] * (self.obj_ring.replicas - 1)
put_ts += [put_timestamp]
put_resp += [409]
ts_iter = iter(put_ts)
codes = put_resp
with set_http_connect(*codes, timestamps=ts_iter):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
def test_COPY_simple(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Content-Length': 0,
'Destination': 'c/o-copy'})
head_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = head_resp + put_resp
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_log_info(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
req.headers['x-copy-from'] = 'some/where'
req.headers['Content-Length'] = 0
# override FakeConn default resp headers to keep log_info clean
resp_headers = {'x-delete-at': None}
head_resp = [200] * self.obj_ring.replicas + \
[404] * self.obj_ring.max_more_nodes
put_resp = [201] * self.obj_ring.replicas
codes = head_resp + put_resp
with set_http_connect(*codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
self.assertEqual(
req.environ.get('swift.log_info'), ['x-copy-from:some/where'])
# and then check that we don't do that for originating POSTs
req = swift.common.swob.Request.blank('/v1/a/c/o')
req.method = 'POST'
req.headers['x-copy-from'] = 'else/where'
with set_http_connect(*codes, headers=resp_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(req.environ.get('swift.log_info'), None)
@patch_policies(
[StoragePolicy(0, '1-replica', True),
StoragePolicy(1, '5-replica', False),
StoragePolicy(2, '8-replica', False),
StoragePolicy(3, '15-replica', False)],
fake_ring_args=[
{'replicas': 1}, {'replicas': 5}, {'replicas': 8}, {'replicas': 15}])
class TestReplicatedObjControllerVariousReplicas(BaseObjectControllerMixin,
unittest.TestCase):
controller_cls = obj.ReplicatedObjectController
@patch_policies(legacy_only=True)
class TestObjControllerLegacyCache(TestReplicatedObjController):
"""
This test pretends like memcache returned a stored value that should
resemble whatever "old" format. It catches KeyErrors you'd get if your
code was expecting some new format during a rolling upgrade.
"""
# in this case policy_index is missing
container_info = {
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
}
def test_invalid_storage_policy_cache(self):
self.app.container_info['storage_policy'] = 1
for method in ('GET', 'HEAD', 'POST', 'PUT', 'COPY'):
req = swob.Request.blank('/v1/a/c/o', method=method)
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
class StubResponse(object):
def __init__(self, status, body='', headers=None):
self.status = status
self.body = body
self.readable = BytesIO(body)
self.headers = HeaderKeyDict(headers)
fake_reason = ('Fake', 'This response is a lie.')
self.reason = swob.RESPONSE_REASONS.get(status, fake_reason)[0]
def getheader(self, header_name, default=None):
return self.headers.get(header_name, default)
def getheaders(self):
if 'Content-Length' not in self.headers:
self.headers['Content-Length'] = len(self.body)
return self.headers.items()
def read(self, amt=0):
return self.readable.read(amt)
@contextmanager
def capture_http_requests(get_response):
class FakeConn(object):
def __init__(self, req):
self.req = req
self.resp = None
def getresponse(self):
self.resp = get_response(self.req)
return self.resp
class ConnectionLog(object):
def __init__(self):
self.connections = []
def __len__(self):
return len(self.connections)
def __getitem__(self, i):
return self.connections[i]
def __iter__(self):
return iter(self.connections)
def __call__(self, ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
conn = FakeConn(req)
self.connections.append(conn)
return conn
fake_conn = ConnectionLog()
with mock.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
@patch_policies(with_ec_default=True)
class TestECObjController(BaseObjectControllerMixin, unittest.TestCase):
container_info = {
'read_acl': None,
'write_acl': None,
'sync_key': None,
'versions': None,
'storage_policy': '0',
}
controller_cls = obj.ECObjectController
def test_determine_chunk_destinations(self):
class FakePutter(object):
def __init__(self, index):
self.node_index = index
controller = self.controller_cls(
self.app, 'a', 'c', 'o')
# create a dummy list of putters, check no handoffs
putters = []
for index in range(0, 4):
putters.append(FakePutter(index))
got = controller._determine_chunk_destinations(putters)
expected = {}
for i, p in enumerate(putters):
expected[p] = i
self.assertEqual(got, expected)
# now lets make a handoff at the end
putters[3].node_index = None
got = controller._determine_chunk_destinations(putters)
self.assertEqual(got, expected)
putters[3].node_index = 3
# now lets make a handoff at the start
putters[0].node_index = None
got = controller._determine_chunk_destinations(putters)
self.assertEqual(got, expected)
putters[0].node_index = 0
# now lets make a handoff in the middle
putters[2].node_index = None
got = controller._determine_chunk_destinations(putters)
self.assertEqual(got, expected)
putters[2].node_index = 0
# now lets make all of them handoffs
for index in range(0, 4):
putters[index].node_index = None
got = controller._determine_chunk_destinations(putters)
self.assertEqual(got, expected)
def test_GET_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertIn('Accept-Ranges', resp.headers)
def test_GET_simple_x_newest(self):
req = swift.common.swob.Request.blank('/v1/a/c/o',
headers={'X-Newest': 'true'})
codes = [200] * self.policy.ec_ndata
with set_http_connect(*codes):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
get_resp = [503] + [200] * self.policy.ec_ndata
with set_http_connect(*get_resp):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
def test_GET_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o')
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(fragments)
# sanity
sanity_body = ''
for fragment_payload in fragment_payloads:
sanity_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(real_body), len(sanity_body))
self.assertEqual(real_body, sanity_body)
# list(zip(...)) for py3 compatibility (zip is lazy there)
node_fragments = list(zip(*fragment_payloads))
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(real_body), len(resp.body))
self.assertEqual(real_body, resp.body)
def test_PUT_simple(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_txn_id_logging_ECPUT(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
self.app.logger.txn_id = req.environ['swift.trans_id'] = 'test-txn-id'
codes = [(100, Timeout(), 503, 503)] * self.replicas()
stdout = BytesIO()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers), \
mock.patch('sys.stdout', stdout):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
for line in stdout.getvalue().splitlines():
self.assertIn('test-txn-id', line)
self.assertIn('Trying to get ',
stdout.getvalue())
def test_PUT_with_explicit_commit_status(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 100, 201)] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [503] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [503] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_mostly_success_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * self.quorum()
codes += [(100, 503, Exception('not used'))] * (
self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_mostly_error_commit(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [(100, 503, Exception('not used'))] * self.quorum()
codes += [201] * (self.replicas() - len(codes))
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_PUT_commit_timeout(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Timeout(), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_commit_exception(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.replicas() - 1)
codes.append((100, Exception('kaboom!'), Exception('not used')))
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_ec_error_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadError('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_chunkreadtimeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.ChunkReadTimeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 408)
def test_PUT_ec_timeout_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise exceptions.Timeout()
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 499)
def test_PUT_ec_exception_during_transfer_data(self):
class FakeReader(object):
def read(self, size):
raise Exception('exception message')
req = swob.Request.blank('/v1/a/c/o.jpg', method='PUT',
body='test body')
req.environ['wsgi.input'] = FakeReader()
req.headers['content-length'] = '6'
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 500)
def test_PUT_with_body(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT')
segment_size = self.policy.ec_segment_size
test_body = ('asdf' * segment_size)[:-10]
etag = md5(test_body).hexdigest()
size = len(test_body)
req.body = test_body
codes = [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
put_requests = defaultdict(lambda: {'boundary': None, 'chunks': []})
def capture_body(conn_id, chunk):
put_requests[conn_id]['chunks'].append(chunk)
def capture_headers(ip, port, device, part, method, path, headers,
**kwargs):
conn_id = kwargs['connection_id']
put_requests[conn_id]['boundary'] = headers[
'X-Backend-Obj-Multipart-Mime-Boundary']
put_requests[conn_id]['backend-content-length'] = headers[
'X-Backend-Obj-Content-Length']
with set_http_connect(*codes, expect_headers=expect_headers,
give_send=capture_body,
give_connect=capture_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
frag_archives = []
for connection_id, info in put_requests.items():
body = unchunk_body(''.join(info['chunks']))
self.assertTrue(info['boundary'] is not None,
"didn't get boundary for conn %r" % (
connection_id,))
self.assertTrue(size > int(info['backend-content-length']) > 0,
"invalid backend-content-length for conn %r" % (
connection_id,))
# email.parser.FeedParser doesn't know how to take a multipart
# message and boundary together and parse it; it only knows how
# to take a string, parse the headers, and figure out the
# boundary on its own.
parser = email.parser.FeedParser()
parser.feed(
"Content-Type: multipart/nobodycares; boundary=%s\r\n\r\n" %
info['boundary'])
parser.feed(body)
message = parser.close()
self.assertTrue(message.is_multipart()) # sanity check
mime_parts = message.get_payload()
self.assertEqual(len(mime_parts), 3)
obj_part, footer_part, commit_part = mime_parts
# attach the body to frag_archives list
self.assertEqual(obj_part['X-Document'], 'object body')
frag_archives.append(obj_part.get_payload())
# assert length was correct for this connection
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[-1]))
# assert length was the same for all connections
self.assertEqual(int(info['backend-content-length']),
len(frag_archives[0]))
# validate some footer metadata
self.assertEqual(footer_part['X-Document'], 'object metadata')
footer_metadata = json.loads(footer_part.get_payload())
self.assertTrue(footer_metadata)
expected = {
'X-Object-Sysmeta-EC-Content-Length': str(size),
'X-Backend-Container-Update-Override-Size': str(size),
'X-Object-Sysmeta-EC-Etag': etag,
'X-Backend-Container-Update-Override-Etag': etag,
'X-Object-Sysmeta-EC-Segment-Size': str(segment_size),
}
for header, value in expected.items():
self.assertEqual(footer_metadata[header], value)
# sanity on commit message
self.assertEqual(commit_part['X-Document'], 'put commit')
self.assertEqual(len(frag_archives), self.replicas())
fragment_size = self.policy.fragment_size
node_payloads = []
for fa in frag_archives:
payload = [fa[x:x + fragment_size]
for x in range(0, len(fa), fragment_size)]
node_payloads.append(payload)
fragment_payloads = zip(*node_payloads)
expected_body = ''
for fragment_payload in fragment_payloads:
self.assertEqual(len(fragment_payload), self.replicas())
if True:
fragment_payload = list(fragment_payload)
expected_body += self.policy.pyeclib_driver.decode(
fragment_payload)
self.assertEqual(len(test_body), len(expected_body))
self.assertEqual(test_body, expected_body)
def test_PUT_old_obj_server(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
responses = [
# one server will response 100-continue but not include the
# needful expect headers and the connection will be dropped
((100, Exception('not used')), {}),
] + [
# and pleanty of successful responses too
(201, {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes',
}),
] * self.replicas()
random.shuffle(responses)
if responses[-1][0] != 201:
# whoops, stupid random
responses = responses[1:] + [responses[0]]
codes, expect_headers = zip(*responses)
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_COPY_cross_policy_type_from_replicated(self):
self.app.per_container_info = {
'c1': self.app.container_info.copy(),
'c2': self.app.container_info.copy(),
}
# make c2 use replicated storage policy 1
self.app.per_container_info['c2']['storage_policy'] = '1'
# a put request with copy from source c2
req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
body='', headers={
'X-Copy-From': 'c2/o'})
# c2 get
codes = [200] * self.replicas(POLICIES[1])
codes += [404] * POLICIES[1].object_ring.max_more_nodes
# c1 put
codes += [201] * self.replicas()
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_COPY_cross_policy_type_to_replicated(self):
self.app.per_container_info = {
'c1': self.app.container_info.copy(),
'c2': self.app.container_info.copy(),
}
# make c1 use replicated storage policy 1
self.app.per_container_info['c1']['storage_policy'] = '1'
# a put request with copy from source c2
req = swift.common.swob.Request.blank('/v1/a/c1/o', method='PUT',
body='', headers={
'X-Copy-From': 'c2/o'})
# c2 get
codes = [404, 200] * self.policy.ec_ndata
headers = {
'X-Object-Sysmeta-Ec-Content-Length': 0,
}
# c1 put
codes += [201] * self.replicas(POLICIES[1])
with set_http_connect(*codes, headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_COPY_cross_policy_type_unknown(self):
self.app.per_container_info = {
'c1': self.app.container_info.copy(),
'c2': self.app.container_info.copy(),
}
# make c1 use some made up storage policy index
self.app.per_container_info['c1']['storage_policy'] = '13'
# a COPY request of c2 with destination in c1
req = swift.common.swob.Request.blank('/v1/a/c2/o', method='COPY',
body='', headers={
'Destination': 'c1/o'})
with set_http_connect():
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def _make_ec_archive_bodies(self, test_body, policy=None):
policy = policy or self.policy
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [test_body[x:x + segment_size]
for x in range(0, len(test_body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = self.policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [''.join(fragments)
for fragments in zip(*fragment_payloads)]
return ec_archive_bodies
def _make_ec_object_stub(self, test_body=None, policy=None):
policy = policy or self.policy
segment_size = policy.ec_segment_size
test_body = test_body or (
'test' * segment_size)[:-random.randint(0, 1000)]
etag = md5(test_body).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_body,
policy=policy)
return {
'body': test_body,
'etag': etag,
'frags': ec_archive_bodies,
}
def _fake_ec_node_response(self, node_frags):
"""
Given a list of entries for each node in ring order, where the
entries are a dict (or list of dicts) which describe all of the
fragment(s); create a function suitable for use with
capture_http_requests that will accept a req object and return a
response that will suitably fake the behavior of an object
server who had the given fragments on disk at the time.
"""
node_map = {}
all_nodes = []
def _build_node_map(req):
node_key = lambda n: (n['ip'], n['port'])
part = utils.split_path(req['path'], 5, 5, True)[1]
policy = POLICIES[int(
req['headers']['X-Backend-Storage-Policy-Index'])]
all_nodes.extend(policy.object_ring.get_part_nodes(part))
all_nodes.extend(policy.object_ring.get_more_nodes(part))
for i, node in enumerate(all_nodes):
node_map[node_key(node)] = i
# normalize node_frags to a list of fragments for each node even
# if there's only one fragment in the dataset provided.
for i, frags in enumerate(node_frags):
if isinstance(frags, dict):
node_frags[i] = [frags]
def get_response(req):
if not node_map:
_build_node_map(req)
try:
node_index = node_map[(req['ip'], req['port'])]
except KeyError:
raise Exception("Couldn't find node %s:%s in %r" % (
req['ip'], req['port'], all_nodes))
try:
frags = node_frags[node_index]
except KeyError:
raise Exception('Found node %r:%r at index %s - '
'but only got %s stub response nodes' % (
req['ip'], req['port'], node_index,
len(node_frags)))
try:
stub = random.choice(frags)
except IndexError:
stub = None
if stub:
body = stub['obj']['frags'][stub['frag']]
headers = {
'X-Object-Sysmeta-Ec-Content-Length': len(
stub['obj']['body']),
'X-Object-Sysmeta-Ec-Etag': stub['obj']['etag'],
'X-Object-Sysmeta-Ec-Frag-Index': stub['frag'],
}
resp = StubResponse(200, body, headers)
else:
resp = StubResponse(404)
return resp
return get_response
def test_GET_with_frags_swapped_around(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-657]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
_part, primary_nodes = self.obj_ring.get_nodes('a', 'c', 'o')
node_key = lambda n: (n['ip'], n['port'])
response_map = {
node_key(n): StubResponse(200, ec_archive_bodies[i], {
'X-Object-Sysmeta-Ec-Content-Length': len(test_data),
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Object-Sysmeta-Ec-Frag-Index': i,
}) for i, n in enumerate(primary_nodes)
}
# swap a parity response into a data node
data_node = random.choice(primary_nodes[:self.policy.ec_ndata])
parity_node = random.choice(primary_nodes[self.policy.ec_ndata:])
(response_map[node_key(data_node)],
response_map[node_key(parity_node)]) = \
(response_map[node_key(parity_node)],
response_map[node_key(data_node)])
def get_response(req):
req_key = (req['ip'], req['port'])
return response_map.pop(req_key)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(log), self.policy.ec_ndata)
self.assertEqual(len(response_map),
len(primary_nodes) - self.policy.ec_ndata)
def test_GET_with_single_missed_overwrite_does_not_need_handoff(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed over write
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj2, 'frag': 6},
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj2, 'frag': 9},
{'obj': obj2, 'frag': 10}, # parity
{'obj': obj2, 'frag': 11}, # parity
{'obj': obj2, 'frag': 12}, # parity
{'obj': obj2, 'frag': 13}, # parity
# {'obj': obj2, 'frag': 2}, # handoff (not used in this test)
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# because the primary nodes are shuffled, it's possible the proxy
# didn't even notice the missed overwrite frag - but it might have
self.assertLessEqual(len(log), self.policy.ec_ndata + 1)
self.assertLessEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertTrue(len(frags) <= self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_many_missed_overwrite_will_need_handoff(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj2, 'frag': 0},
{'obj': obj2, 'frag': 1},
{'obj': obj1, 'frag': 2}, # missed
{'obj': obj2, 'frag': 3},
{'obj': obj2, 'frag': 4},
{'obj': obj2, 'frag': 5},
{'obj': obj1, 'frag': 6}, # missed
{'obj': obj2, 'frag': 7},
{'obj': obj2, 'frag': 8},
{'obj': obj1, 'frag': 9}, # missed
{'obj': obj1, 'frag': 10}, # missed
{'obj': obj1, 'frag': 11}, # missed
{'obj': obj2, 'frag': 12},
{'obj': obj2, 'frag': 13},
{'obj': obj2, 'frag': 6}, # handoff
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# there's not enough of the obj2 etag on the primaries, we would
# have collected responses for both etags, and would have made
# one more request to the handoff node
self.assertEqual(len(log), self.replicas() + 1)
self.assertEqual(len(collected_responses), 2)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertTrue(len(frags) <= self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_succeed(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{},
{'obj': obj2, 'frag': 9},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['etag'], obj2['etag'])
self.assertEqual(md5(resp.body).hexdigest(), obj2['etag'])
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# we go exactly as long as we have to, finding two different
# etags and some 404's (i.e. collected_responses[None])
self.assertEqual(len(log), len(node_frags))
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertTrue(len(frags) <= self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_with_missing_and_mixed_frags_will_dig_deep_but_stop(self):
obj1 = self._make_ec_object_stub()
obj2 = self._make_ec_object_stub()
node_frags = [
{'obj': obj1, 'frag': 0},
{'obj': obj2, 'frag': 0},
{},
{'obj': obj1, 'frag': 1},
{'obj': obj2, 'frag': 1},
{},
{'obj': obj1, 'frag': 2},
{'obj': obj2, 'frag': 2},
{},
{'obj': obj1, 'frag': 3},
{'obj': obj2, 'frag': 3},
{},
{'obj': obj1, 'frag': 4},
{'obj': obj2, 'frag': 4},
{},
{'obj': obj1, 'frag': 5},
{'obj': obj2, 'frag': 5},
{},
{'obj': obj1, 'frag': 6},
{'obj': obj2, 'frag': 6},
{},
{'obj': obj1, 'frag': 7},
{'obj': obj2, 'frag': 7},
{},
{'obj': obj1, 'frag': 8},
{'obj': obj2, 'frag': 8},
{},
{},
]
fake_response = self._fake_ec_node_response(node_frags)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(fake_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
collected_responses = defaultdict(set)
for conn in log:
etag = conn.resp.headers['X-Object-Sysmeta-Ec-Etag']
index = conn.resp.headers['X-Object-Sysmeta-Ec-Frag-Index']
collected_responses[etag].add(index)
# default node_iter will exhaust at 2 * replicas
self.assertEqual(len(log), 2 * self.replicas())
self.assertEqual(len(collected_responses), 3)
# ... regardless we should never need to fetch more than ec_ndata
# frags for any given etag
for etag, frags in collected_responses.items():
self.assertTrue(len(frags) <= self.policy.ec_ndata,
'collected %s frags for etag %s' % (
len(frags), etag))
def test_GET_mixed_success_with_range(self):
fragment_size = self.policy.fragment_size
ec_stub = self._make_ec_object_stub()
frag_archives = ec_stub['frags']
frag_archive_size = len(ec_stub['frags'][0])
headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(ec_stub['body']),
'X-Object-Sysmeta-Ec-Etag': ec_stub['etag'],
}
responses = [
StubResponse(206, frag_archives[0][:fragment_size], headers),
StubResponse(206, frag_archives[1][:fragment_size], headers),
StubResponse(206, frag_archives[2][:fragment_size], headers),
StubResponse(206, frag_archives[3][:fragment_size], headers),
StubResponse(206, frag_archives[4][:fragment_size], headers),
# data nodes with old frag
StubResponse(416),
StubResponse(416),
StubResponse(206, frag_archives[7][:fragment_size], headers),
StubResponse(206, frag_archives[8][:fragment_size], headers),
StubResponse(206, frag_archives[9][:fragment_size], headers),
# hopefully we ask for two more
StubResponse(206, frag_archives[10][:fragment_size], headers),
StubResponse(206, frag_archives[11][:fragment_size], headers),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={'Range': 'bytes=0-3'})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 206)
self.assertEqual(resp.body, 'test')
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_with_range_unsatisfiable_mixed_success(self):
responses = [
StubResponse(416),
StubResponse(416),
StubResponse(416),
StubResponse(416),
StubResponse(416),
StubResponse(416),
StubResponse(416),
# sneak in bogus extra responses
StubResponse(404),
StubResponse(206),
# and then just "enough" more 416's
StubResponse(416),
StubResponse(416),
StubResponse(416),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o', headers={
'Range': 'bytes=%s-' % 100000000000000})
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
# ec_ndata responses that must agree, plus the bogus extras
self.assertEqual(len(log), self.policy.ec_ndata + 2)
def test_GET_mixed_ranged_responses_success(self):
segment_size = self.policy.ec_segment_size
fragment_size = self.policy.fragment_size
new_data = ('test' * segment_size)[:-492]
new_etag = md5(new_data).hexdigest()
new_archives = self._make_ec_archive_bodies(new_data)
old_data = ('junk' * segment_size)[:-492]
old_etag = md5(old_data).hexdigest()
old_archives = self._make_ec_archive_bodies(old_data)
frag_archive_size = len(new_archives[0])
new_headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(new_data),
'X-Object-Sysmeta-Ec-Etag': new_etag,
}
old_headers = {
'Content-Type': 'text/plain',
'Content-Length': fragment_size,
'Content-Range': 'bytes 0-%s/%s' % (fragment_size - 1,
frag_archive_size),
'X-Object-Sysmeta-Ec-Content-Length': len(old_data),
'X-Object-Sysmeta-Ec-Etag': old_etag,
}
# 7 primaries with stale frags, 3 handoffs failed to get new frags
responses = [
StubResponse(206, old_archives[0][:fragment_size], old_headers),
StubResponse(206, new_archives[1][:fragment_size], new_headers),
StubResponse(206, old_archives[2][:fragment_size], old_headers),
StubResponse(206, new_archives[3][:fragment_size], new_headers),
StubResponse(206, old_archives[4][:fragment_size], old_headers),
StubResponse(206, new_archives[5][:fragment_size], new_headers),
StubResponse(206, old_archives[6][:fragment_size], old_headers),
StubResponse(206, new_archives[7][:fragment_size], new_headers),
StubResponse(206, old_archives[8][:fragment_size], old_headers),
StubResponse(206, new_archives[9][:fragment_size], new_headers),
StubResponse(206, old_archives[10][:fragment_size], old_headers),
StubResponse(206, new_archives[11][:fragment_size], new_headers),
StubResponse(206, old_archives[12][:fragment_size], old_headers),
StubResponse(206, new_archives[13][:fragment_size], new_headers),
StubResponse(206, new_archives[0][:fragment_size], new_headers),
StubResponse(404),
StubResponse(404),
StubResponse(206, new_archives[6][:fragment_size], new_headers),
StubResponse(404),
StubResponse(206, new_archives[10][:fragment_size], new_headers),
StubResponse(206, new_archives[12][:fragment_size], new_headers),
]
def get_response(req):
return responses.pop(0) if responses else StubResponse(404)
req = swob.Request.blank('/v1/a/c/o')
with capture_http_requests(get_response) as log:
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, new_data[:segment_size])
self.assertEqual(len(log), self.policy.ec_ndata + 10)
def test_GET_mismatched_fragment_archives(self):
segment_size = self.policy.ec_segment_size
test_data1 = ('test' * segment_size)[:-333]
# N.B. the object data *length* here is different
test_data2 = ('blah1' * segment_size)[:-333]
etag1 = md5(test_data1).hexdigest()
etag2 = md5(test_data2).hexdigest()
ec_archive_bodies1 = self._make_ec_archive_bodies(test_data1)
ec_archive_bodies2 = self._make_ec_archive_bodies(test_data2)
headers1 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
# here we're going to *lie* and say the etag here matches
headers2 = {'X-Object-Sysmeta-Ec-Etag': etag1,
'X-Object-Sysmeta-Ec-Content-Length': '333'}
responses1 = [(200, body, headers1)
for body in ec_archive_bodies1]
responses2 = [(200, body, headers2)
for body in ec_archive_bodies2]
req = swob.Request.blank('/v1/a/c/o')
# sanity check responses1
responses = responses1[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag1)
# sanity check responses2
responses = responses2[:self.policy.ec_ndata]
status_codes, body_iter, headers = zip(*responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(md5(resp.body).hexdigest(), etag2)
# now mix the responses a bit
mix_index = random.randint(0, self.policy.ec_ndata - 1)
mixed_responses = responses1[:self.policy.ec_ndata]
mixed_responses[mix_index] = responses2[mix_index]
status_codes, body_iter, headers = zip(*mixed_responses)
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
try:
resp.body
except ECDriverError:
resp._app_iter.close()
else:
self.fail('invalid ec fragment response body did not blow up!')
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
msg = error_lines[0]
self.assertTrue('Error decoding fragments' in msg)
self.assertTrue('/a/c/o' in msg)
log_msg_args, log_msg_kwargs = self.logger.log_dict['error'][0]
self.assertEqual(log_msg_kwargs['exc_info'][0], ECDriverError)
def test_GET_read_timeout(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.01
responses = [(200, SlowBody(body, 0.1), headers)
for body in ec_archive_bodies]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(*responses + [
(404, '', {}) for i in range(
self.policy.object_ring.max_more_nodes)])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
# do this inside the fake http context manager, it'll try to
# resume but won't be able to give us all the right bytes
self.assertNotEqual(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(self.replicas(), len(error_lines))
nparity = self.policy.ec_nparity
for line in error_lines[:nparity]:
self.assertTrue('retrying' in line)
for line in error_lines[nparity:]:
self.assertTrue('ChunkReadTimeout (0.01s)' in line)
def test_GET_read_timeout_resume(self):
segment_size = self.policy.ec_segment_size
test_data = ('test' * segment_size)[:-333]
etag = md5(test_data).hexdigest()
ec_archive_bodies = self._make_ec_archive_bodies(test_data)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
self.app.recoverable_node_timeout = 0.05
# first one is slow
responses = [(200, SlowBody(ec_archive_bodies[0], 0.1), headers)]
# ... the rest are fine
responses += [(200, body, headers)
for body in ec_archive_bodies[1:]]
req = swob.Request.blank('/v1/a/c/o')
status_codes, body_iter, headers = zip(
*responses[:self.policy.ec_ndata + 1])
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertTrue(md5(resp.body).hexdigest(), etag)
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(1, len(error_lines))
self.assertTrue('retrying' in error_lines[0])
def test_fix_response_HEAD(self):
headers = {'X-Object-Sysmeta-Ec-Content-Length': '10',
'X-Object-Sysmeta-Ec-Etag': 'foo'}
# sucsessful HEAD
responses = [(200, '', headers)]
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, '')
# 200OK shows original object content length
self.assertEqual(resp.headers['Content-Length'], '10')
self.assertEqual(resp.headers['Etag'], 'foo')
# not found HEAD
responses = [(404, '', {})] * self.replicas() * 2
status_codes, body_iter, headers = zip(*responses)
req = swift.common.swob.Request.blank('/v1/a/c/o', method='HEAD')
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 404)
# 404 shows actual response body size (i.e. 0 for HEAD)
self.assertEqual(resp.headers['Content-Length'], '0')
def test_PUT_with_slow_commits(self):
# It's important that this timeout be much less than the delay in
# the slow commit responses so that the slow commits are not waited
# for.
self.app.post_quorum_timeout = 0.01
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
# plenty of slow commits
response_sleep = 5.0
codes = [FakeStatus(201, response_sleep=response_sleep)
for i in range(self.replicas())]
# swap out some with regular fast responses
number_of_fast_responses_needed_to_be_quick_enough = \
self.policy.quorum
fast_indexes = random.sample(
range(self.replicas()),
number_of_fast_responses_needed_to_be_quick_enough)
for i in fast_indexes:
codes[i] = 201
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
start = time.time()
resp = req.get_response(self.app)
response_time = time.time() - start
self.assertEqual(resp.status_int, 201)
self.assertTrue(response_time < response_sleep)
def test_PUT_with_just_enough_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata + 1)
codes += [503] * (self.policy.ec_nparity - 1)
self.assertEqual(len(codes), self.replicas())
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_PUT_with_less_durable_responses(self):
req = swift.common.swob.Request.blank('/v1/a/c/o', method='PUT',
body='')
codes = [201] * (self.policy.ec_ndata)
codes += [503] * (self.policy.ec_nparity)
self.assertEqual(len(codes), self.replicas())
random.shuffle(codes)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*codes, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 503)
def test_COPY_with_ranges(self):
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method='COPY',
headers={'Destination': 'c1/o',
'Range': 'bytes=5-10'})
# turn a real body into fragments
segment_size = self.policy.ec_segment_size
real_body = ('asdf' * segment_size)[:-10]
# split it up into chunks
chunks = [real_body[x:x + segment_size]
for x in range(0, len(real_body), segment_size)]
# we need only first chunk to rebuild 5-10 range
fragments = self.policy.pyeclib_driver.encode(chunks[0])
fragment_payloads = []
fragment_payloads.append(fragments)
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body))}
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
responses += [(201, '', {})] * self.obj_ring.replicas
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 201)
def test_GET_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('GET', real_body,
segment_size, '%s-' % (segment_size + 10))
def test_COPY_with_invalid_ranges(self):
# real body size is segment_size - 10 (just 1 segment)
segment_size = self.policy.ec_segment_size
real_body = ('a' * segment_size)[:-10]
# range is out of real body but in segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size - 10))
# range is out of both real body and segment size
self._test_invalid_ranges('COPY', real_body,
segment_size, '%s-' % (segment_size + 10))
def _test_invalid_ranges(self, method, real_body, segment_size, req_range):
# make a request with range starts from more than real size.
body_etag = md5(real_body).hexdigest()
req = swift.common.swob.Request.blank(
'/v1/a/c/o', method=method,
headers={'Destination': 'c1/o',
'Range': 'bytes=%s' % (req_range)})
fragments = self.policy.pyeclib_driver.encode(real_body)
fragment_payloads = [fragments]
node_fragments = zip(*fragment_payloads)
self.assertEqual(len(node_fragments), self.replicas()) # sanity
headers = {'X-Object-Sysmeta-Ec-Content-Length': str(len(real_body)),
'X-Object-Sysmeta-Ec-Etag': body_etag}
start = int(req_range.split('-')[0])
self.assertTrue(start >= 0) # sanity
title, exp = swob.RESPONSE_REASONS[416]
range_not_satisfiable_body = \
'<html><h1>%s</h1><p>%s</p></html>' % (title, exp)
if start >= segment_size:
responses = [(416, range_not_satisfiable_body, headers)
for i in range(POLICIES.default.ec_ndata)]
else:
responses = [(200, ''.join(node_fragments[i]), headers)
for i in range(POLICIES.default.ec_ndata)]
status_codes, body_iter, headers = zip(*responses)
expect_headers = {
'X-Obj-Metadata-Footer': 'yes',
'X-Obj-Multiphase-Commit': 'yes'
}
with set_http_connect(*status_codes, body_iter=body_iter,
headers=headers, expect_headers=expect_headers):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 416)
self.assertEqual(resp.content_length, len(range_not_satisfiable_body))
self.assertEqual(resp.body, range_not_satisfiable_body)
self.assertEqual(resp.etag, body_etag)
self.assertEqual(resp.headers['Accept-Ranges'], 'bytes')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "46a974088f7fcdd035b4301d65e017e6",
"timestamp": "",
"source": "github",
"line_count": 2465,
"max_line_length": 79,
"avg_line_length": 42.42068965517242,
"alnum_prop": 0.5583979649411382,
"repo_name": "aerwin3/swift",
"id": "d18ac4299bc6e74de3ec3e7d0bdcf7aee509c749",
"size": "105184",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/proxy/controllers/test_obj.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6931295"
},
{
"name": "Shell",
"bytes": "1497"
}
],
"symlink_target": ""
}
|
"""
Specific exception types
"""
class UnknownChannel(Exception):
pass
|
{
"content_hash": "c590d830002083d4e2554e8447958b52",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 32,
"avg_line_length": 11,
"alnum_prop": 0.7012987012987013,
"repo_name": "PythonSanSebastian/python-rtmbot",
"id": "bbccff2953fa4792cea7fd2a90ba3128a519af4f",
"size": "77",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rtmbot/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1645"
},
{
"name": "Python",
"bytes": "15071"
}
],
"symlink_target": ""
}
|
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionsInWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionsInWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
# TODO: do we need for indexing like Java?
month = ndb.IntegerProperty()
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class Session(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty()
speaker = ndb.StringProperty()
duration = ndb.IntegerProperty()
organizerUserId = ndb.StringProperty()
typeOfSession = ndb.StringProperty(repeated=True)
date = ndb.DateProperty()
startTime = ndb.TimeProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
duration = messages.IntegerField(4)
typeOfSession = messages.StringField(5, repeated=True)
date = messages.StringField(6) # DateTimeField()
startTime = messages.StringField(7)
organizerUserId = messages.StringField(8)
websafeKey = messages.StringField(9)
class SessionForms(messages.Message):
"""SessionForms -- multiple Sessions outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm
inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
|
{
"content_hash": "22f6f9124735a8aac3647a62beb450ac",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 74,
"avg_line_length": 31.50980392156863,
"alnum_prop": 0.7181082762912259,
"repo_name": "anudhagat/ConferenceCentral",
"id": "1b13ebe8b28855ffcf039d0fe579e5c3b49d7d5a",
"size": "4844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "44463"
}
],
"symlink_target": ""
}
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Copyright 2017 Paul T. Grogan, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.context} package.
"""
import unittest
from ...player import Federation
from ...simulation import Simulator
from ...context import Context
from ...context.location import Surface, Orbit
from ...context.event import Demand, ValueSchedule
"""
Test cases for L{ofspy.context.Context} class.
"""
class ContextTestCase(unittest.TestCase):
def setUp(self):
self.default = Context(seed=0)
self.locs = []
for s in range(6):
self.locs.append(Surface(s, name='SUR{0}'.format(s+1)))
self.locs.append(Orbit(s, 'LEO', name='LEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'MEO', name='MEO{0}'.format(s+1)))
self.locs.append(Orbit(s, 'GEO', name='GEO{0}'.format(s+1)))
self.evts = []
for d in range(8):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(1,500),(4,400)], -50),
name='SAR1.{0}'.format(d+1)))
for d in range(12):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(2,450),(5,350)], -100),
name='SAR2.{0}'.format(d+1)))
for d in range(23):
self.evts.append(Demand(None, 'SAR', 1,
ValueSchedule([(3,400),(6,300)], -150),
name='SAR3.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(1,600),(4,500)], -50),
name='VIS1.{0}'.format(d+1)))
for d in range(17):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(2,500),(5,400)], -100),
name='VIS2.{0}'.format(d+1)))
for d in range(8):
self.evts.append(Demand(None, 'VIS', 1,
ValueSchedule([(3,450),(6,350)], -150),
name='VIS3.{0}'.format(d+1)))
self.default = Context(locations=self.locs, events=self.evts,
federations=[Federation()], seed=0)
self.sim = Simulator(entities=[self.default],
initTime=0, timeStep=1, maxTime=3)
def tearDown(self):
self.default = None
self.locs = None
self.evts = None
def test_propagate(self):
self.assertEqual(self.default.propagate(self.locs[0], 0), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 1), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[0], 2), self.locs[0])
self.assertEqual(self.default.propagate(self.locs[1], 0), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 1), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], 2), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[1], 3), self.locs[1])
self.assertEqual(self.default.propagate(self.locs[1], 4), self.locs[9])
self.assertEqual(self.default.propagate(self.locs[1], -1), self.locs[17])
self.assertEqual(self.default.propagate(self.locs[2], 0), self.locs[2])
self.assertEqual(self.default.propagate(self.locs[2], 1), self.locs[6])
self.assertEqual(self.default.propagate(self.locs[2], 2), self.locs[10])
self.assertEqual(self.default.propagate(self.locs[3], 0), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 1), self.locs[3])
self.assertEqual(self.default.propagate(self.locs[3], 2), self.locs[3])
def test_init(self):
self.assertEqual(self.default.currentEvents, [])
self.assertEqual(self.default.futureEvents, [])
self.assertEqual(self.default.pastEvents, [])
self.default.init(self.sim)
self.assertEqual(self.default.currentEvents, [])
self.assertNotEqual(self.default.futureEvents, [])
self.assertEqual(len(self.default.futureEvents),
len(self.default.events))
self.assertEqual(self.default.pastEvents, [])
def test_tick(self):
self.default.init(self.sim)
self.default.tick(self.sim)
def test_tock(self):
self.default.init(self.sim)
self.default.tick(self.sim)
self.default.tock()
self.assertEqual(len(self.default.currentEvents), 6)
self.assertEqual(len(self.default.futureEvents),
len(self.default.events) - 6)
|
{
"content_hash": "92492e51b8809c855944ba3afc9d871c",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 81,
"avg_line_length": 45.46551724137931,
"alnum_prop": 0.586841107318923,
"repo_name": "ptgrogan/ofspy",
"id": "62600956aa1b3b5b276cb8332c7132136613bebf",
"size": "5274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ofspy/test/context/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "359047"
}
],
"symlink_target": ""
}
|
import unittest2
from tbans.consts.fcm.platform_type import PlatformType
class TestPlatformType(unittest2.TestCase):
def test_validate_invalid(self):
with self.assertRaises(ValueError):
PlatformType.validate(3)
def test_validate(self):
PlatformType.validate(PlatformType.ANDROID)
PlatformType.validate(PlatformType.APNS)
PlatformType.validate(PlatformType.WEBPUSH)
def test_collapse_key_key_invalid_platform(self):
with self.assertRaises(ValueError):
PlatformType.collapse_key_key(-1)
def test_collapse_key_key(self):
self.assertEqual(PlatformType.collapse_key_key(PlatformType.ANDROID), 'collapse_key')
self.assertEqual(PlatformType.collapse_key_key(PlatformType.APNS), 'apns-collapse-id')
self.assertEqual(PlatformType.collapse_key_key(PlatformType.WEBPUSH), 'Topic')
def test_priority_key_invalid_platform(self):
with self.assertRaises(ValueError):
PlatformType.priority_key(-1)
def test_priority_key(self):
self.assertEqual(PlatformType.priority_key(PlatformType.ANDROID), 'priority')
self.assertEqual(PlatformType.priority_key(PlatformType.APNS), 'apns-priority')
self.assertEqual(PlatformType.priority_key(PlatformType.WEBPUSH), 'Urgency')
|
{
"content_hash": "b392487bc38b6595a8293290865b3024",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 94,
"avg_line_length": 39.72727272727273,
"alnum_prop": 0.7231121281464531,
"repo_name": "jaredhasenklein/the-blue-alliance",
"id": "e5b879df2aab8fbf48c5969a3fe7a891aafcbc61",
"size": "1311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tbans_tests/consts/fcm/test_platform_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "386108"
},
{
"name": "Dockerfile",
"bytes": "1510"
},
{
"name": "HTML",
"bytes": "907117"
},
{
"name": "JavaScript",
"bytes": "502773"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2672617"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "13940"
}
],
"symlink_target": ""
}
|
from mpf.core.platform import SwitchConfig
from mpf.core.rgb_color import RGBColor
from mpf.exceptions.config_file_error import ConfigFileError
from mpf.tests.MpfTestCase import MpfTestCase, MagicMock, test_config, expect_startup_error
from mpf.tests.loop import MockSerial
class BaseMockFast(MockSerial):
def __init__(self):
super().__init__()
self.type = None
self.queue = []
self.expected_commands = {}
self.ignore_commands = {}
def read(self, length):
del length
if not self.queue:
return
msg = (self.queue.pop() + '\r').encode()
return msg
def read_ready(self):
return bool(len(self.queue) > 0)
def write_ready(self):
return True
def _parse(self, msg):
return False
def write(self, msg):
"""Write message."""
parts = msg.split(b'\r')
# remove last newline
assert parts.pop() == b''
for part in parts:
self._handle_msg(part)
return len(msg)
def _handle_msg(self, msg):
msg_len = len(msg)
cmd = msg.decode()
# strip newline
# ignore init garbage
if cmd == (' ' * 256 * 4):
return msg_len
if cmd[:3] == "WD:" and cmd != "WD:1":
self.queue.append("WD:P")
return msg_len
if cmd in self.ignore_commands:
self.queue.append(cmd[:3] + "P")
return msg_len
if self._parse(cmd):
return msg_len
if cmd in self.expected_commands:
if self.expected_commands[cmd]:
self.queue.append(self.expected_commands[cmd])
del self.expected_commands[cmd]
return msg_len
else:
raise Exception("Unexpected command for " + self.type + ": " + str(cmd))
def stop(self):
pass
class MockFastDmd(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "DMD"
def write(self, msg):
"""Write message."""
parts = msg.split(b'\r')
# remove last newline
if parts[len(parts) - 1] == b'':
parts.pop()
for part in parts:
self._handle_msg(part)
return len(msg)
def _handle_msg(self, msg):
msg_len = len(msg)
if msg == (b' ' * 256 * 4):
return msg_len
cmd = msg
if cmd[:3] == "WD:":
self.queue.append("WD:P")
return msg_len
if cmd in self.ignore_commands:
self.queue.append(cmd[:3] + "P")
return msg_len
if cmd in self.expected_commands:
if self.expected_commands[cmd]:
self.queue.append(self.expected_commands[cmd])
del self.expected_commands[cmd]
return msg_len
else:
raise Exception(self.type + ": " + str(cmd))
class MockFastRgb(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "RGB"
self.ignore_commands["L1:23,FF"] = True
self.leds = {}
def _parse(self, cmd):
if cmd[:3] == "RS:":
remaining = cmd[3:]
while True:
self.leds[remaining[0:2]] = remaining[2:8]
remaining = remaining[9:]
if not remaining:
break
self.queue.append("RX:P")
return True
class MockFastNet(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "NET"
self.id = "FP-CPU-2000-1 2.00"
self.sa = "09,050000000000000000"
self.ch = "2000"
self.expected_commands = None
self.attached_boards = {
'NN:00': 'NN:00,FP-I/O-3208-2 ,02.00,08,20,04,06,00,00,00,00', # 3208 board
'NN:01': 'NN:01,FP-I/O-0804-1 ,02.00,04,08,04,06,00,00,00,00', # 0804 board
'NN:02': 'NN:02,FP-I/O-1616-2 ,02.00,10,10,04,06,00,00,00,00', # 1616 board
'NN:03': 'NN:03,FP-I/O-1616-2 ,02.00,10,10,04,06,00,00,00,00', # 1616 board
'NN:04': 'NN:04,,,,,,,,,,', # no board
}
class MockFastSeg(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "SEG"
class TestFastBase(MpfTestCase):
"""Base class for FAST platform tests, using a default V2 network."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.net_cpu = None
self.seg_cpu = None
self.rgb_cpu = None
self.dmd_cpu = None
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/fast/'
def get_platform(self):
return False
def _mock_loop(self):
if self.net_cpu:
self.clock.mock_serial("com4", self.net_cpu)
if self.seg_cpu:
self.clock.mock_serial("com3", self.seg_cpu)
if self.rgb_cpu:
self.clock.mock_serial("com5", self.rgb_cpu)
if self.dmd_cpu:
self.clock.mock_serial("com6", self.dmd_cpu)
def create_connections(self):
self.net_cpu = MockFastNet()
self.rgb_cpu = MockFastRgb()
self.dmd_cpu = MockFastDmd()
self.seg_cpu = MockFastSeg()
def create_expected_commands(self):
self.net_cpu.expected_commands = {
'BR:': '#!B:02', # there might be some garbage in front of the command
'ID:': f'ID:{self.net_cpu.id}',
f'CH:{self.net_cpu.ch},FF': 'CH:P',
**self.net_cpu.attached_boards,
"SA:": f"SA:{self.net_cpu.sa}",
"SL:01,01,04,04": "SL:P",
"SL:02,01,04,04": "SL:P",
"SL:03,01,04,04": "SL:P",
"SL:0B,01,04,04": "SL:P",
"SL:0C,01,04,04": "SL:P",
"SL:16,01,04,04": "SL:P",
"SL:07,01,1A,05": "SL:P",
"SL:1A,01,04,04": "SL:P",
"SL:39,01,04,04": "SL:P",
"DL:00,00,00,00": "DL:P",
"DL:01,00,00,00": "DL:P",
"DL:04,00,00,00": "DL:P",
"DL:06,00,00,00": "DL:P",
"DL:07,00,00,00": "DL:P",
"DL:11,00,00,00": "DL:P",
"DL:12,00,00,00": "DL:P",
"DL:13,00,00,00": "DL:P",
"DL:16,00,00,00": "DL:P",
"DL:17,00,00,00": "DL:P",
"DL:20,00,00,00": "DL:P",
"DL:21,00,00,00": "DL:P",
"DL:01,C1,00,18,00,FF,FF,00": "DL:P", # configure digital output
"XO:03,7F": "XO:P",
"XO:14,7F": "XO:P"
}
self.dmd_cpu.expected_commands = {
b'ID:': 'ID:DMD FP-CPU-002-1 00.88',
}
self.rgb_cpu.expected_commands = {
'ID:': 'ID:RGB FP-CPU-002-1 00.89',
"RF:0": "RF:P",
"RA:000000": "RA:P",
"RF:00": "RF:P",
}
self.seg_cpu.expected_commands = {
'ID:': 'ID:SEG FP-CPU-002-1 00.10',
}
def tearDown(self):
if self.dmd_cpu:
self.dmd_cpu.expected_commands = {
b'BL:AA55': "!SRE"
}
if self.rgb_cpu:
self.rgb_cpu.expected_commands = {
"BL:AA55": "!SRE"
}
if self.net_cpu:
self.net_cpu.expected_commands = {
"WD:1": "WD:P"
}
super().tearDown()
if not self.startup_error:
self.assertFalse(self.net_cpu and self.net_cpu.expected_commands)
self.assertFalse(self.rgb_cpu and self.rgb_cpu.expected_commands)
self.assertFalse(self.dmd_cpu and self.dmd_cpu.expected_commands)
def setUp(self):
self.expected_duration = 2
self.create_connections()
self.create_expected_commands()
super().setUp()
# If a test is testing a bad config file and causes a startup exception,
# the machine will shut down. Safety check before we add futures to the loop.
if not self.machine.is_shutting_down:
# There are startup calls that keep the serial traffic busy. Many tests define
# self.net_cpu.expected_commands assuming that the serial bus is quiet. Add a
# tiny delay here to let the startup traffic clear out so that tests don't get
# slammed with unexpected network traffic.
# Note that the above scenario only causes tests to fail on Windows machines!
self.advance_time_and_run(0.1)
def test_coils(self):
self._test_pulse()
self._test_long_pulse()
self._test_timed_enable()
self._test_default_timed_enable()
self._test_enable_exception()
self._test_allow_enable()
self._test_pwm_ssm()
self._test_coil_configure()
# test hardware scan
info_str = """NET CPU: NET FP-CPU-2000-1 2.00
RGB CPU: RGB FP-CPU-002-1 00.89
DMD CPU: DMD FP-CPU-002-1 00.88
Segment Controller: SEG FP-CPU-002-1 00.10
Boards:
Board 0 - Model: FP-I/O-3208-2 Firmware: 02.00 Switches: 32 Drivers: 8
Board 1 - Model: FP-I/O-0804-1 Firmware: 02.00 Switches: 8 Drivers: 4
Board 2 - Model: FP-I/O-1616-2 Firmware: 02.00 Switches: 16 Drivers: 16
Board 3 - Model: FP-I/O-1616-2 Firmware: 02.00 Switches: 16 Drivers: 16
"""
self.assertEqual(info_str, self.machine.default_platform.get_info_string())
def _test_coil_configure(self):
self.assertEqual("FAST Board 0", self.machine.coils["c_test"].hw_driver.get_board_name())
self.assertEqual("FAST Board 3", self.machine.coils["c_flipper_hold"].hw_driver.get_board_name())
# last driver on board
self.net_cpu.expected_commands = {
"DL:2B,00,00,00": "DL:P"
}
coil = self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '3-15',
{"connection": "network", "recycle_ms": 10})
self.assertEqual('2B', coil.number)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# board 0 has 8 drivers. configuring driver 9 should not work
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '0-8',
{"connection": "network", "recycle_ms": 10})
# only boards 0-3 exist
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '4-0',
{"connection": "network", "recycle_ms": 10})
# only 8 + 4 + 16 + 16 = 44 = 0x2C driver exist
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '44',
{"connection": "network", "recycle_ms": 10})
def _test_pulse(self):
self.net_cpu.expected_commands = {
"DL:04,89,00,10,17,FF,00,00,00": "DL:P"
}
# pulse coil 4
self.machine.coils["c_test"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_long_pulse(self):
# enable command
self.net_cpu.expected_commands = {
"DL:12,C1,00,18,00,FF,FF,00": "DL:P"
}
self.machine.coils["c_long_pulse"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# disable command
self.net_cpu.expected_commands = {
"TL:12,02": "TL:P"
}
self.advance_time_and_run(1)
# pulse_ms is 2000ms, so after 1s, this should not be sent
self.assertTrue(self.net_cpu.expected_commands)
self.advance_time_and_run(1)
# but after 2s, it should be
self.assertFalse(self.net_cpu.expected_commands)
def _test_timed_enable(self):
# enable command
self.net_cpu.expected_commands = {
"DL:16,89,00,10,14,FF,C8,88,00": "DL:P"
}
self.machine.coils["c_timed_enable"].timed_enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_default_timed_enable(self):
# enable command
self.net_cpu.expected_commands = {
"DL:17,89,00,10,14,FF,C8,88,00": "DL:P"
}
self.machine.coils["c_default_timed_enable"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_enable_exception(self):
# enable coil which does not have allow_enable
with self.assertRaises(AssertionError):
self.machine.coils["c_test"].enable()
self.advance_time_and_run(.1)
def _test_allow_enable(self):
self.net_cpu.expected_commands = {
"DL:06,C1,00,18,17,FF,FF,00": "DL:P"
}
self.machine.coils["c_test_allow_enable"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pwm_ssm(self):
self.net_cpu.expected_commands = {
"DL:13,C1,00,18,0A,FF,84224244,00": "DL:P"
}
self.machine.coils["c_hold_ssm"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_nano_reboot(self):
# NANO reboots
self.net_cpu.queue.append("!B:00")
self.advance_time_and_run(.1)
# assert that MPF will stop
self.assertTrue(self.machine.stop_future.done())
def test_rules(self):
self._test_enable_exception_hw_rule()
self._test_two_rules_one_switch()
self._test_hw_rule_pulse()
self._test_hw_rule_pulse_pwm32()
self._test_hw_rule_pulse_inverted_switch()
self._test_hw_rule_same_board()
def _test_hw_rule_same_board(self):
self.net_cpu.expected_commands = {
"DL:21,01,07,10,0A,FF,00,00,14": "DL:P"
}
# coil and switch are on different boards but first 8 switches always work
self.machine.autofire_coils["ac_different_boards"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# switch and coil on board 3. should work
self.net_cpu.expected_commands = {
"DL:21,01,39,10,0A,FF,00,00,14": "DL:P",
"SL:39,01,02,02": "SL:P"
}
self.machine.autofire_coils["ac_board_3"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DL:21,01,16,10,0A,FF,00,00,14": "DL:P",
}
# coil and switch are on different boards
self.machine.autofire_coils["ac_broken_combination"].enable()
self.advance_time_and_run(.1)
def _test_enable_exception_hw_rule(self):
# enable coil which does not have allow_enable
with self.assertRaises(AssertionError):
self.machine.flippers["f_test_single"].config['main_coil_overwrite']['hold_power'] = 1.0
self.machine.flippers["f_test_single"].enable()
self.machine.flippers["f_test_single"].config['main_coil_overwrite']['hold_power'] = None
def _test_two_rules_one_switch(self):
self.net_cpu.expected_commands = {
"SL:03,01,02,02": "SL:P",
"DL:04,01,03,10,17,FF,00,00,1B": "DL:P",
"DL:06,01,03,10,17,FF,00,00,2E": "DL:P"
}
self.post_event("ac_same_switch")
self.hit_and_release_switch("s_flipper")
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse(self):
self.net_cpu.expected_commands = {
"DL:07,01,16,10,0A,FF,00,00,14": "DL:P", # hw rule
"SL:16,01,02,02": "SL:P" # debounce quick on switch
}
self.machine.autofire_coils["ac_slingshot_test"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DL:07,81": "DL:P"
}
self.machine.autofire_coils["ac_slingshot_test"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse_pwm32(self):
self.net_cpu.expected_commands = {
"DL:11,89,00,10,0A,AAAAAAAA,00,00,00": "DL:P"
}
self.machine.coils["c_pulse_pwm32_mask"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DL:11,C1,00,18,0A,AAAAAAAA,4A4A4A4A,00": "DL:P"
}
self.machine.coils["c_pulse_pwm32_mask"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse_inverted_switch(self):
self.net_cpu.expected_commands = {
"DL:07,11,1A,10,0A,FF,00,00,14": "DL:P",
"SL:1A,01,02,02": "SL:P"
}
self.machine.autofire_coils["ac_inverted_switch"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_firmware_update(self):
self.maxDiff = None
commands = []
def _catch_update(cmd):
commands.append(cmd)
return len(cmd)
parse_func = self.net_cpu.write
self.net_cpu.write = _catch_update
output = self.machine.default_platform.update_firmware()
self.advance_time_and_run()
self.net_cpu.write = parse_func
# check if we send the dummy update
self.assertEqual([b'BL:AA55\r>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>\rBL:AA55\r<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
b'<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
b'<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\rBL:AA55\r>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\rDUMMY UPDAT'
b'E\r', b'WD:3e8\r', b'WD:3e8\r'], commands)
expected_output = """NET CPU is version 2.00
Found an update to version 2.04 for the NET CPU. Will flash file firmware/FAST_NET_01_04_00.txt
Update done.
"""
self.assertEqual(expected_output, output)
def test_servo(self):
# go to min position
self.net_cpu.expected_commands = {
"XO:03,00": "XO:P"
}
self.machine.servos["servo1"].go_to_position(0)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# go to max position
self.net_cpu.expected_commands = {
"XO:03,FF": "XO:P"
}
self.machine.servos["servo1"].go_to_position(1)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _switch_hit_cb(self, **kwargs):
self.switch_hit = True
def test_switches(self):
self._test_switch_changes()
self._test_switch_changes_nc()
self._test_switch_configure()
def _test_switch_configure(self):
# last switch on first board
self.net_cpu.expected_commands = {
"SL:1F,01,04,04": "SL:P"
}
self.machine.default_platform.configure_switch('0-31', SwitchConfig(name="", debounce='auto', invert=0), {})
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# next should not work
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('0-32', SwitchConfig(name="", debounce='auto', invert=0), {})
self.net_cpu.expected_commands = {
"SL:47,01,04,04": "SL:P"
}
self.machine.default_platform.configure_switch('3-15', SwitchConfig(name="", debounce='auto', invert=0), {})
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# invalid board
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('4-0', SwitchConfig(name="", debounce='auto', invert=0), {})
# last switch is 0x47. 0x48 = 72
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('72', SwitchConfig(name="", debounce='auto', invert=0), {})
def _test_switch_changes(self):
self.assertSwitchState("s_flipper", 0)
self.assertSwitchState("s_flipper_eos", 1)
self.switch_hit = False
self.advance_time_and_run(1)
self.assertSwitchState("s_test", 0)
self.assertFalse(self.switch_hit)
self.machine.events.add_handler("s_test_active", self._switch_hit_cb)
self.machine.default_platform.process_received_message("-L:07", "NET")
self.advance_time_and_run(1)
self.assertTrue(self.switch_hit)
self.assertSwitchState("s_test", 1)
self.switch_hit = False
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test", 1)
self.machine.default_platform.process_received_message("/L:07", "NET")
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test", 0)
def _test_switch_changes_nc(self):
self.switch_hit = False
self.advance_time_and_run(1)
self.assertSwitchState("s_test_nc", 1)
self.assertFalse(self.switch_hit)
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test_nc", 1)
self.machine.default_platform.process_received_message("-L:1A", "NET")
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test_nc", 0)
self.machine.events.add_handler("s_test_nc_active", self._switch_hit_cb)
self.machine.default_platform.process_received_message("/L:1A", "NET")
self.advance_time_and_run(1)
self.assertSwitchState("s_test_nc", 1)
self.assertTrue(self.switch_hit)
self.switch_hit = False
def test_flipper_single_coil(self):
# manual flip no hw rule
self.net_cpu.expected_commands = {
"DL:20,89,00,10,0A,FF,00,00,00": "DL:P",
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable no hw rule
self.net_cpu.expected_commands = {
"DL:20,C1,00,18,0A,FF,01,00": "DL:P"
}
self.machine.coils["c_flipper_main"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable no hw rule
self.net_cpu.expected_commands = {
"TL:20,02": "TL:P"
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# flipper rule enable
self.net_cpu.expected_commands = {
"DL:20,01,01,18,0B,FF,01,00,00": "DL:P",
"SL:01,01,02,02": "SL:P"
}
self.machine.flippers["f_test_single"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip with hw rule in action
self.net_cpu.expected_commands = {
"DL:20,89,00,10,0A,FF,00,00,00": "DL:P", # configure and pulse
"DL:20,01,01,18,0B,FF,01,00,00": "DL:P", # restore rule
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip with hw rule in action without reconfigure (same pulse)
self.net_cpu.expected_commands = {
"TL:20,01": "TL:P", # pulse
}
self.machine.coils["c_flipper_main"].pulse(11)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable with hw rule (same pulse)
self.net_cpu.expected_commands = {
"TL:20,03": "TL:P"
}
self.machine.coils["c_flipper_main"].enable(pulse_ms=11)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable with hw rule
self.net_cpu.expected_commands = {
"TL:20,02": "TL:P",
"TL:20,00": "TL:P" # reenable autofire rule
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable with hw rule (different pulse)
self.net_cpu.expected_commands = {
"DL:20,C1,00,18,0A,FF,01,00": "DL:P", # configure pwm + enable
}
self.machine.coils["c_flipper_main"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable with hw rule
self.net_cpu.expected_commands = {
"TL:20,02": "TL:P",
"DL:20,01,01,18,0B,FF,01,00,00": "DN_P", # configure rules
"TL:20,00": "TL:P" # reenable autofire rule
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# disable rule
self.net_cpu.expected_commands = {
"DL:20,81": "DL:P"
}
self.machine.flippers["f_test_single"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip no hw rule
self.net_cpu.expected_commands = {
"DL:20,89,00,10,0A,FF,00,00,00": "DL:P"
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip again with cached config
self.net_cpu.expected_commands = {
"TL:20,01": "TL:P",
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_flipper_two_coils(self):
# we pulse the main coil (20)
# hold coil (21) is pulsed + enabled
self.net_cpu.expected_commands = {
"DL:20,01,01,18,0A,FF,00,00,00": "DL:P",
"DL:21,01,01,18,0A,FF,01,00,00": "DL:P",
"SL:01,01,02,02": "SL:P",
}
self.machine.flippers["f_test_hold"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DL:20,81": "DL:P",
"DL:21,81": "DL:P"
}
self.machine.flippers["f_test_hold"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_dmd_update(self):
# test configure
dmd = self.machine.default_platform.configure_dmd()
# test set frame to buffer
frame = bytearray()
for i in range(4096):
frame.append(64 + i % 192)
frame = bytes(frame)
# test draw
self.dmd_cpu.expected_commands = {
b'BM:' + frame: False
}
dmd.update(frame)
self.advance_time_and_run(.1)
self.assertFalse(self.dmd_cpu.expected_commands)
def test_bootloader_crash(self):
# Test that the machine stops if the RGB processor sends a bootloader msg
self.machine.stop = MagicMock()
self.machine.default_platform.process_received_message("!B:00", "RGB")
self.advance_time_and_run(1)
self.assertTrue(self.machine.stop.called)
def test_bootloader_crash_ignored(self):
# Test that RGB processor bootloader msgs can be ignored
self.machine.default_platform.config['ignore_rgb_crash'] = True
self.mock_event('fast_rgb_rebooted')
self.machine.stop = MagicMock()
self.machine.default_platform.process_received_message("!B:00", "RGB")
self.advance_time_and_run(1)
self.assertFalse(self.machine.stop.called)
self.assertEventCalled('fast_rgb_rebooted')
def test_lights_and_leds(self):
self._test_matrix_light()
self._test_pdb_gi_light()
self._test_pdb_led()
def _test_matrix_light(self):
# test enable of matrix light
self.net_cpu.expected_commands = {
"L1:23,FF": "L1:P",
}
self.machine.lights["test_pdb_light"].on()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test enable of matrix light with brightness
self.net_cpu.expected_commands = {
"L1:23,80": "L1:P",
}
self.machine.lights["test_pdb_light"].on(brightness=128)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test disable of matrix light
self.net_cpu.expected_commands = {
"L1:23,00": "L1:P",
}
self.machine.lights["test_pdb_light"].off()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test disable of matrix light with brightness
self.net_cpu.expected_commands = {
"L1:23,00": "L1:P",
}
self.machine.lights["test_pdb_light"].on(brightness=255, fade_ms=100)
self.advance_time_and_run(.02)
self.assertFalse(self.net_cpu.expected_commands)
# step 1
self.net_cpu.expected_commands = {
"L1:23,32": "L1:P",
"L1:23,33": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 2
self.net_cpu.expected_commands = {
"L1:23,65": "L1:P",
"L1:23,66": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 3
self.net_cpu.expected_commands = {
"L1:23,98": "L1:P",
"L1:23,99": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 4
self.net_cpu.expected_commands = {
"L1:23,CB": "L1:P",
"L1:23,CC": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 5
self.net_cpu.expected_commands = {
"L1:23,FE": "L1:P",
"L1:23,FF": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 6 if step 5 did not send FF
if "L1:23,FE" not in self.net_cpu.expected_commands:
self.net_cpu.expected_commands = {
"L1:23,FF": "L1:P",
}
self.advance_time_and_run(.02)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pdb_gi_light(self):
# test gi on
device = self.machine.lights["test_gi"]
self.net_cpu.expected_commands = {
"GI:2A,FF": "GI:P",
}
device.on()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,80": "GI:P",
}
device.on(brightness=128)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,F5": "GI:P",
}
device.on(brightness=245)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test gi off
self.net_cpu.expected_commands = {
"GI:2A,00": "GI:P",
}
device.off()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,F5": "GI:P",
}
device.on(brightness=245)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,00": "GI:P",
}
device.on(brightness=0)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pdb_led(self):
self.advance_time_and_run()
device = self.machine.lights["test_led"]
device2 = self.machine.lights["test_led2"]
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.assertEqual("000000", self.rgb_cpu.leds['98'])
# test led on
device.on()
self.advance_time_and_run(1)
self.assertEqual("ffffff", self.rgb_cpu.leds['97'])
self.assertEqual("000000", self.rgb_cpu.leds['98'])
device2.color("001122")
# test led off
device.off()
self.advance_time_and_run(1)
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.assertEqual("001122", self.rgb_cpu.leds['98'])
# test led color
device.color(RGBColor((2, 23, 42)))
self.advance_time_and_run(1)
self.assertEqual("02172a", self.rgb_cpu.leds['97'])
# test led off
device.off()
self.advance_time_and_run(1)
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.advance_time_and_run(.02)
# fade led over 100ms
device.color(RGBColor((100, 100, 100)), fade_ms=100)
self.advance_time_and_run(.03)
self.assertTrue(10 < int(self.rgb_cpu.leds['97'][0:2], 16) < 40)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(.03)
self.assertTrue(40 < int(self.rgb_cpu.leds['97'][0:2], 16) < 60)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(.03)
self.assertTrue(60 < int(self.rgb_cpu.leds['97'][0:2], 16) < 90)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(2)
self.assertEqual("646464", self.rgb_cpu.leds['97'])
@expect_startup_error()
@test_config("error_lights.yaml")
def test_light_errors(self):
self.assertIsInstance(self.startup_error, ConfigFileError)
self.assertEqual(7, self.startup_error.get_error_no())
self.assertEqual("light.test_led", self.startup_error.get_logger_name())
self.assertIsInstance(self.startup_error.__cause__, ConfigFileError)
self.assertEqual(9, self.startup_error.__cause__.get_error_no())
self.assertEqual("FAST", self.startup_error.__cause__.get_logger_name())
self.assertEqual("Light syntax is number-channel (but was \"3\") for light test_led.",
self.startup_error.__cause__._message)
|
{
"content_hash": "4efae5fec932ce833412ff6566d43e4f",
"timestamp": "",
"source": "github",
"line_count": 986,
"max_line_length": 121,
"avg_line_length": 36.504056795131845,
"alnum_prop": 0.5523851860083905,
"repo_name": "missionpinball/mpf",
"id": "e82439aa0bf09f46252f962f5319d7c254b79a44",
"size": "35993",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/tests/test_Fast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['LinearTrend'] , ['NoCycle'] , ['LSTM'] );
|
{
"content_hash": "d016963d2ba75650a440b2fa055619c3",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 37.25,
"alnum_prop": 0.697986577181208,
"repo_name": "antoinecarme/pyaf",
"id": "7115b43867838637f047842e4c5063d314ecca6a",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_LinearTrend_NoCycle_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask, render_template, send_from_directory
tmpl_dir = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__, template_folder=tmpl_dir, static_folder=tmpl_dir)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/<path:filename>')
def static_url(filename):
return send_from_directory(app.static_folder, filename)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
|
{
"content_hash": "a87d2a6a5e55a9b4e213dda9a28c0d74",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 71,
"avg_line_length": 23.94736842105263,
"alnum_prop": 0.676923076923077,
"repo_name": "dongweiming/sed_and_awk",
"id": "e135b7cbe9aa3e051f7d211be40e4869a9827650",
"size": "469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2449"
},
{
"name": "CSS",
"bytes": "50427"
},
{
"name": "GLSL",
"bytes": "1654"
},
{
"name": "HTML",
"bytes": "52459"
},
{
"name": "JavaScript",
"bytes": "17582"
},
{
"name": "Python",
"bytes": "469"
},
{
"name": "sed",
"bytes": "128"
}
],
"symlink_target": ""
}
|
import uuid
from glance.api import policy
from glance.openstack.common import local
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, roles=None,
is_admin=False, read_only=False, show_deleted=False,
owner_is_tenant=True, service_catalog=None,
policy_enforcer=None, domain=None, user_domain=None,
project_domain=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.roles = roles or []
self.read_only = read_only
self._show_deleted = show_deleted
self.owner_is_tenant = owner_is_tenant
self.request_id = str(uuid.uuid4())
self.service_catalog = service_catalog
self.policy_enforcer = policy_enforcer or policy.Enforcer()
self.is_admin = is_admin
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
if not self.is_admin:
self.is_admin = \
self.policy_enforcer.check_is_admin(self)
if not hasattr(local.store, 'context'):
self.update_store()
def to_dict(self):
# NOTE(ameade): These keys are named to correspond with the default
# format string for logging the context in openstack common
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {
'request_id': self.request_id,
#NOTE(bcwaldon): openstack-common logging expects 'user'
'user': self.user,
'user_id': self.user,
#NOTE(bcwaldon): openstack-common logging expects 'tenant'
'tenant': self.tenant,
'tenant_id': self.tenant,
'project_id': self.tenant,
'is_admin': self.is_admin,
'read_deleted': self.show_deleted,
'roles': self.roles,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'user_identity': user_idt
}
@classmethod
def from_dict(cls, values):
return cls(**values)
def update_store(self):
local.store.context = self
@property
def owner(self):
"""Return the owner to correlate with an image."""
return self.tenant if self.owner_is_tenant else self.user
@property
def show_deleted(self):
"""Admins can see deleted by default"""
if self._show_deleted or self.is_admin:
return True
return False
|
{
"content_hash": "1337f4ffc0f2225a4c5f5e3dc4ef68db",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 34.71910112359551,
"alnum_prop": 0.565695792880259,
"repo_name": "redhat-openstack/glance",
"id": "e3905e82570482239f7bd3297c4c9e286cf57275",
"size": "3731",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "glance/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "12183"
},
{
"name": "Python",
"bytes": "3304893"
},
{
"name": "Shell",
"bytes": "7168"
}
],
"symlink_target": ""
}
|
from astropy.stats.histogram import calculate_bin_edges
__all__ = ["hist"]
def hist(x, bins=10, ax=None, max_bins=1e5, **kwargs):
"""Enhanced histogram function
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as pylab.hist().
This function was ported from astroML: https://www.astroml.org/
Parameters
----------
x : array-like
array of data to be histogrammed
bins : int, list, or str, optional
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
ax : `~matplotlib.axes.Axes` instance, optional
Specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
max_bins : int, optional
Maximum number of bins allowed. With more than a few thousand bins
the performance of matplotlib will not be great. If the number of
bins is large *and* the number of input data points is large then
the it will take a very long time to compute the histogram.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# Note that we only calculate the bin edges...matplotlib will calculate
# the actual histogram.
range = kwargs.get("range", None)
weights = kwargs.get("weights", None)
bins = calculate_bin_edges(x, bins, range=range, weights=weights)
if len(bins) > max_bins:
raise ValueError(
"Histogram has too many bins: "
"{nbin}. Use max_bins to increase the number "
"of allowed bins or range to restrict "
"the histogram range.".format(nbin=len(bins))
)
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
|
{
"content_hash": "92f8974061c298a646499fe5ccc67b44",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 32.32876712328767,
"alnum_prop": 0.6411016949152543,
"repo_name": "pllim/astropy",
"id": "0c1de320cd69739f0c436f623f3bf67f20e67827",
"size": "2425",
"binary": false,
"copies": "3",
"ref": "refs/heads/placeholder",
"path": "astropy/visualization/hist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78776"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12404182"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import os
import xml.etree.ElementTree as ET
from flexget import plugin
from flexget.utils.imdb import is_valid_imdb_title_id
from flexget.event import event
log = logging.getLogger('nfo_lookup')
class NfoLookup(object):
"""
Retrieves information from a local '.nfo' info file.
The read metadata will be add as 'nfo_something' in the entry. Also, if an 'id' is found in the '.nfo' file then the
'imdb_id' field will be set to its value. This means that if the imdb_lookup plugin is used in addition to this
plugin it will be able to use the ID from '.nfo' file to get the correct movie.
The nfo file is used by Kodi.
Example:
nfo_lookup: yes
WARNING: This plugin will read a file with extension '.nfo' and the same name as the entry filename as an XML file
using xml.etree.ElementTree from the standard python library. As such, it is vulnerable to XML vulnerabilities
described in the link below
https://docs.python.org/3/library/xml.html#xml-vulnerabilities
Use this only with nfo files you have created yourself.
"""
schema = {'type': 'boolean'}
nfo_file_extension = '.nfo'
# This priority makes sure this plugin runs before the imdb_lookup plugin, if it is also used. That way setting
# imdb_id here will help imdb_lookup find the correct movie.
@plugin.priority(150)
def on_task_metainfo(self, task, config):
# check if disabled (value set to false)
if not config:
# Config was set to 'no' instead of yes. Don't do anything then.
return
for entry in task.entries:
# If this entry was obtained from the filesystem plugin it should have a filename field. If it does not have
# one then there is nothing we can do in this plugin.
filename = entry.get('filename')
location = entry.get('location')
# If there is no 'filename' field there is also no nfo file
if filename is None or location is None:
log.warning("Entry %s didn't come from the filesystem plugin", entry.get('title'))
continue
else:
# This will be None if there is no nfo file
nfo_filename = self.get_nfo_filename(entry)
if nfo_filename is None:
log.warning("Entry %s has no corresponding %s file", entry.get('title'), self.nfo_file_extension)
continue
# Populate the fields from the information in the .nfo file Note that at this point `nfo_filename` has the
# name of an existing '.nfo' file
self.lookup(entry, nfo_filename)
def lookup(self, entry, nfo_filename):
# If there is already data from a previous parse then we don't need to do anything
if entry.get('nfo_id') is not None:
log.warning("Entry %s was already parsed by nfo_lookup and it will be skipped. ", entry.get('title'))
return
# nfo_filename Should not be None at this point
assert nfo_filename is not None
# Get all values we can from the nfo file. If the nfo file can't be parsed then a warning is logged and we
# return without changing the entry
try:
nfo_reader = NfoReader(nfo_filename)
fields = nfo_reader.get_fields_from_nfo_file()
except BadXmlFile:
log.warning("Invalid '.nfo' file for entry %s", entry.get('title'))
return
entry.update(fields)
# If a valid IMDB id was found in the nfo file, set the imdb_id field of the entry. This will help the
# imdb_lookup plugin to get the correct data if it is also used.
if 'nfo_id' in fields:
if is_valid_imdb_title_id(entry.get('nfo_id', '')):
entry.update({'imdb_id': fields['nfo_id']})
else:
log.warning("ID found in nfo file for entry '%s', but it was not a valid IMDB ID", entry.get('title'))
def get_nfo_filename(self, entry):
"""
Get the filename of the nfo file from the 'location' in the entry.
Returns
-------
str
The file name of the 'nfo' file, or None it there is no 'nfo' file.
"""
location = entry.get('location')
nfo_full_filename = os.path.splitext(location)[0] + self.nfo_file_extension
if os.path.isfile(nfo_full_filename):
return nfo_full_filename
class BadXmlFile(Exception):
"""
Exception that is raised if the nfo file can't be parsed due to some invalid nfo file.
"""
pass
class NfoReader(object):
"""
Class in charge of parsing the '.nfo' file and getting a dictionary of fields.
The '.nfo' file is an XML file. Some fields can only appear once, such as 'title', 'id', 'plot', etc., while other
fields can appear multiple times (with different values), such as 'thumb', 'genre', etc. These fields are listed in
the `_fields` attribute.
"""
def __init__(self, filename):
try:
tree = ET.parse(filename)
root = tree.getroot()
except ET.ParseError:
raise BadXmlFile()
if os.path.exists(filename):
self._nfo_filename = filename
self._root = root
else:
raise BadXmlFile()
# Each key in the dictionary correspond to a field that should be read from the nfo file. The values are a tuple
# with a boolean and a callable. The boolean indicates if the field can appear multiple times, while the
# callable is a function to read the field value from the XML element.
#
# In the future we could extend the nfo_lookup plugin to accept 'set' in its configuration to add new entries to
# this dictionary to handle other tags in the nfo file and add the data to the entry.
self._fields = {"title": (False, NfoReader._single_elem_getter_func),
"originaltitle": (False, NfoReader._single_elem_getter_func),
"sorttitle": (False, NfoReader._single_elem_getter_func),
"rating": (False, NfoReader._single_elem_getter_func),
"year": (False, NfoReader._single_elem_getter_func),
"votes": (False, NfoReader._single_elem_getter_func),
"plot": (False, NfoReader._single_elem_getter_func),
"runtime": (False, NfoReader._single_elem_getter_func),
"id": (False, NfoReader._single_elem_getter_func),
"filenameandpath": (False, NfoReader._single_elem_getter_func),
"trailer": (False, NfoReader._single_elem_getter_func),
"thumb": (True, NfoReader._single_elem_getter_func),
"genre": (True, NfoReader._single_elem_getter_func),
"director": (True, NfoReader._single_elem_getter_func),
# Actor field has child elements, such as 'name' and 'role'
"actor": (True, NfoReader._composite_elem_getter_func),
"studio": (True, NfoReader._single_elem_getter_func),
"country": (True, NfoReader._single_elem_getter_func)}
@staticmethod
def _single_elem_getter_func(x):
"""
Method to get the text value of simple XML element that does not contain child nodes.
"""
return x.text
@staticmethod
def _composite_elem_getter_func(x):
"""
Method to get XML elements that have children as a dictionary.
"""
return {i.tag: i.text for i in x}
def _extract_single_field(self, name, getter_func):
"""
Use this method to get fields from the root XML tree that only appear once, such as 'title', 'year', etc.
"""
f = self._root.find(name)
if f is not None:
return getter_func(f)
def _extract_multiple_field(self, name, getter_func):
"""
Use this method to get fields from the root XML tree that can appear more than once, such as 'actor', 'genre',
'director', etc. The result will be a list of values.
"""
values = [getter_func(i) for i in self._root.findall(name)]
if len(values) > 0:
return values
def get_fields_from_nfo_file(self):
"""
Returns a dictionary with all firlds read from the '.nfo' file.
The keys are named as 'nfo_something'.
"""
d = {}
if self._root is None:
return d
# TODO: Right now it only works for movies
if self._root.tag != 'movie':
return d
for name, values in self._fields.items():
multiple_bool = values[0]
getter_func = values[1]
nfo_field_name = 'nfo_{0}'.format(name)
if multiple_bool:
v = self._extract_multiple_field(name, getter_func)
else:
v = self._extract_single_field(name, getter_func)
if v is not None:
d[nfo_field_name] = v
return d
@event('plugin.register')
def register_plugin():
plugin.register(NfoLookup, 'nfo_lookup', api_ver=2)
|
{
"content_hash": "4830076e25704c3cda8b48d9e96123b4",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 120,
"avg_line_length": 41.178260869565214,
"alnum_prop": 0.5999366487171365,
"repo_name": "LynxyssCZ/Flexget",
"id": "37ddc4ba17951cdc0bcf03b266651ad1913f6eed",
"size": "9471",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/plugins/metainfo/nfo_lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3371493"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
import os
from configSiteMover import config_sm
import SiteMover
from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover
from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover
class objectstoreSiteMover(SiteMover.SiteMover):
"""
ObjectstoreSiteMover
It uses the url to decide which ObjectstoreSiteMover implementation to be used.
"""
copyCommand = "objectstore"
checksum_command = "adler32"
def __init__(self, setup_path='', useTimerCommand=True, *args, **kwrds):
self._setup = setup_path
self._useTimerCommand = useTimerCommand
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
gpfn = gpfn.replace("s3+rucio", "s3")
if gpfn.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
if gpfn.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % gpfn
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
logPath = pdict.get('logPath', '')
if logPath != "":
surl = logPath
else:
surl = os.path.join(destination, lfn)
surl = surl.replace("s3+rucio", "s3")
if surl.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover. put_data(source, destination, fsize, fchecksum, **pdict)
if surl.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover. put_data(source, surl, fsize, fchecksum, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % destination, destination, fsize, fchecksum, config_sm.ARCH_DEFAULT
if __name__ == '__main__':
os.environ['PilotHomeDir'] = os.getcwd()
from SiteInformation import SiteInformation
s1 = SiteInformation()
#s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE')
f = objectstoreSiteMover()
gpfn = "nonsens_gpfn"
lfn = "AOD.310713._000004.pool.root.1"
path = os.getcwd()
fsize = "4261010441"
fchecksum = "9145af38"
dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00"
report = {}
#print f.getGlobalFilePaths(dsname)
#print f.findGlobalFilePath(lfn, dsname)
#print f.getLocalROOTSetup()
#path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename"
"""
source = "/bin/hostname"
dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = 17848
localChecksum = "89b93830"
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = 17848
localChecksum = "89b93830"
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
"""
# test S3 object store
source = "/bin/hostname"
#dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = None
localChecksum = None
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='')
gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = None
localChecksum = None
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
|
{
"content_hash": "c225e39d7e340c73f47bdd75f2dddeae",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 256,
"avg_line_length": 47.627450980392155,
"alnum_prop": 0.6873198847262247,
"repo_name": "PanDAWMS/pilot",
"id": "0aab9dbbc4006ac10614eb6e13f1101929dde5bc",
"size": "5242",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "objectstoreSiteMover.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4990965"
},
{
"name": "Shell",
"bytes": "23530"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.slices.x", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "7d2347a47640a0db4a4de5eda5a492fd",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 36.92857142857143,
"alnum_prop": 0.5764023210831721,
"repo_name": "plotly/python-api",
"id": "3271a78fa214b3660e6c56c9a8067749a59221d6",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/volume/slices/x/_fill.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from dropbot import __version__ as pkg_version
readme = open('README.md').read()
requirements = [
'sleekxmpp==1.3.1',
'eveapi==1.2.6',
'redis==2.10.2',
'requests==2.3.0',
'humanize==0.5',
'dnspython==1.11.1',
'networkx==1.9',
]
test_requirements = [
'mock==1.0.1',
]
setup(
name='dropbot',
version=pkg_version,
description='A XMPP bot to provide simple services to NOG8S and Predditors in general',
long_description=readme,
author='Andrew Williams',
author_email='andy@tensixtyone.com',
url='https://github.com/nikdoof/dropbot/',
packages=[
'dropbot',
],
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements,
entry_points = {
'console_scripts': ['dropbot=dropbot.cli:main'],
}
)
|
{
"content_hash": "aab387ea5c4d0f2a3b66169a12ae02de",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 91,
"avg_line_length": 24.673076923076923,
"alnum_prop": 0.6118472330475448,
"repo_name": "nikdoof/dropbot",
"id": "001fc39cda2cde5276285311b022f8feef3b0ca3",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68337"
},
{
"name": "Shell",
"bytes": "761"
}
],
"symlink_target": ""
}
|
from flask import request, url_for
from funcy import project, partial
from flask_restful import abort
from redash import models
from redash.handlers.base import (
BaseResource,
get_object_or_404,
paginate,
filter_by_tags,
order_results as _order_results,
)
from redash.permissions import (
can_modify,
require_admin_or_owner,
require_object_modify_permission,
require_permission,
)
from redash.security import csp_allows_embeding
from redash.serializers import (
DashboardSerializer,
public_dashboard,
)
from sqlalchemy.orm.exc import StaleDataError
# Ordering map for relationships
order_map = {
"name": "lowercase_name",
"-name": "-lowercase_name",
"created_at": "created_at",
"-created_at": "-created_at",
}
order_results = partial(
_order_results, default_order="-created_at", allowed_orders=order_map
)
class DashboardListResource(BaseResource):
@require_permission("list_dashboards")
def get(self):
"""
Lists all accessible dashboards.
:qparam number page_size: Number of queries to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number q: Full text search term
Responds with an array of :ref:`dashboard <dashboard-response-label>`
objects.
"""
search_term = request.args.get("q")
if search_term:
results = models.Dashboard.search(
self.current_org,
self.current_user.group_ids,
self.current_user.id,
search_term,
)
else:
results = models.Dashboard.all(
self.current_org, self.current_user.group_ids, self.current_user.id
)
results = filter_by_tags(results, models.Dashboard.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get("page", 1, type=int)
page_size = request.args.get("page_size", 25, type=int)
response = paginate(
ordered_results,
page=page,
page_size=page_size,
serializer=DashboardSerializer,
)
if search_term:
self.record_event(
{"action": "search", "object_type": "dashboard", "term": search_term}
)
else:
self.record_event({"action": "list", "object_type": "dashboard"})
return response
@require_permission("create_dashboard")
def post(self):
"""
Creates a new dashboard.
:<json string name: Dashboard name
Responds with a :ref:`dashboard <dashboard-response-label>`.
"""
dashboard_properties = request.get_json(force=True)
dashboard = models.Dashboard(
name=dashboard_properties["name"],
org=self.current_org,
user=self.current_user,
is_draft=True,
layout="[]",
)
models.db.session.add(dashboard)
models.db.session.commit()
return DashboardSerializer(dashboard).serialize()
class MyDashboardsResource(BaseResource):
@require_permission("list_dashboards")
def get(self):
"""
Retrieve a list of dashboards created by the current user.
:qparam number page_size: Number of dashboards to return per page
:qparam number page: Page number to retrieve
:qparam number order: Name of column to order by
:qparam number search: Full text search term
Responds with an array of :ref:`dashboard <dashboard-response-label>`
objects.
"""
search_term = request.args.get("q", "")
if search_term:
results = models.Dashboard.search_by_user(search_term, self.current_user)
else:
results = models.Dashboard.by_user(self.current_user)
results = filter_by_tags(results, models.Dashboard.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
ordered_results = order_results(results, fallback=not bool(search_term))
page = request.args.get("page", 1, type=int)
page_size = request.args.get("page_size", 25, type=int)
return paginate(
ordered_results,
page,
page_size,
DashboardSerializer
)
class DashboardResource(BaseResource):
@require_permission("list_dashboards")
def get(self, dashboard_id=None):
"""
Retrieves a dashboard.
:qparam number id: Id of dashboard to retrieve.
.. _dashboard-response-label:
:>json number id: Dashboard ID
:>json string name:
:>json string slug:
:>json number user_id: ID of the dashboard creator
:>json string created_at: ISO format timestamp for dashboard creation
:>json string updated_at: ISO format timestamp for last dashboard modification
:>json number version: Revision number of dashboard
:>json boolean dashboard_filters_enabled: Whether filters are enabled or not
:>json boolean is_archived: Whether this dashboard has been removed from the index or not
:>json boolean is_draft: Whether this dashboard is a draft or not.
:>json array layout: Array of arrays containing widget IDs, corresponding to the rows and columns the widgets are displayed in
:>json array widgets: Array of arrays containing :ref:`widget <widget-response-label>` data
:>json object options: Dashboard options
.. _widget-response-label:
Widget structure:
:>json number widget.id: Widget ID
:>json number widget.width: Widget size
:>json object widget.options: Widget options
:>json number widget.dashboard_id: ID of dashboard containing this widget
:>json string widget.text: Widget contents, if this is a text-box widget
:>json object widget.visualization: Widget contents, if this is a visualization widget
:>json string widget.created_at: ISO format timestamp for widget creation
:>json string widget.updated_at: ISO format timestamp for last widget modification
"""
if request.args.get("legacy") is not None:
fn = models.Dashboard.get_by_slug_and_org
else:
fn = models.Dashboard.get_by_id_and_org
dashboard = get_object_or_404(fn, dashboard_id, self.current_org)
response = DashboardSerializer(
dashboard, with_widgets=True, user=self.current_user
).serialize()
api_key = models.ApiKey.get_by_object(dashboard)
if api_key:
response["public_url"] = url_for(
"redash.public_dashboard",
token=api_key.api_key,
org_slug=self.current_org.slug,
_external=True,
)
response["api_key"] = api_key.api_key
response["can_edit"] = can_modify(dashboard, self.current_user)
self.record_event(
{"action": "view", "object_id": dashboard.id, "object_type": "dashboard"}
)
return response
@require_permission("edit_dashboard")
def post(self, dashboard_id):
"""
Modifies a dashboard.
:qparam number id: Id of dashboard to retrieve.
Responds with the updated :ref:`dashboard <dashboard-response-label>`.
:status 200: success
:status 409: Version conflict -- dashboard modified since last read
"""
dashboard_properties = request.get_json(force=True)
# TODO: either convert all requests to use slugs or ids
dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)
require_object_modify_permission(dashboard, self.current_user)
updates = project(
dashboard_properties,
(
"name",
"layout",
"version",
"tags",
"is_draft",
"is_archived",
"dashboard_filters_enabled",
"options",
),
)
# SQLAlchemy handles the case where a concurrent transaction beats us
# to the update. But we still have to make sure that we're not starting
# out behind.
if "version" in updates and updates["version"] != dashboard.version:
abort(409)
updates["changed_by"] = self.current_user
self.update_model(dashboard, updates)
models.db.session.add(dashboard)
try:
models.db.session.commit()
except StaleDataError:
abort(409)
result = DashboardSerializer(
dashboard, with_widgets=True, user=self.current_user
).serialize()
self.record_event(
{"action": "edit", "object_id": dashboard.id, "object_type": "dashboard"}
)
return result
@require_permission("edit_dashboard")
def delete(self, dashboard_id):
"""
Archives a dashboard.
:qparam number id: Id of dashboard to retrieve.
Responds with the archived :ref:`dashboard <dashboard-response-label>`.
"""
dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)
dashboard.is_archived = True
dashboard.record_changes(changed_by=self.current_user)
models.db.session.add(dashboard)
d = DashboardSerializer(
dashboard, with_widgets=True, user=self.current_user
).serialize()
models.db.session.commit()
self.record_event(
{"action": "archive", "object_id": dashboard.id, "object_type": "dashboard"}
)
return d
class PublicDashboardResource(BaseResource):
decorators = BaseResource.decorators + [csp_allows_embeding]
def get(self, token):
"""
Retrieve a public dashboard.
:param token: An API key for a public dashboard.
:>json array widgets: An array of arrays of :ref:`public widgets <public-widget-label>`, corresponding to the rows and columns the widgets are displayed in
"""
if self.current_org.get_setting("disable_public_urls"):
abort(400, message="Public URLs are disabled.")
if not isinstance(self.current_user, models.ApiUser):
api_key = get_object_or_404(models.ApiKey.get_by_api_key, token)
dashboard = api_key.object
else:
dashboard = self.current_user.object
return public_dashboard(dashboard)
class DashboardShareResource(BaseResource):
def post(self, dashboard_id):
"""
Allow anonymous access to a dashboard.
:param dashboard_id: The numeric ID of the dashboard to share.
:>json string public_url: The URL for anonymous access to the dashboard.
:>json api_key: The API key to use when accessing it.
"""
dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)
require_admin_or_owner(dashboard.user_id)
api_key = models.ApiKey.create_for_object(dashboard, self.current_user)
models.db.session.flush()
models.db.session.commit()
public_url = url_for(
"redash.public_dashboard",
token=api_key.api_key,
org_slug=self.current_org.slug,
_external=True,
)
self.record_event(
{
"action": "activate_api_key",
"object_id": dashboard.id,
"object_type": "dashboard",
}
)
return {"public_url": public_url, "api_key": api_key.api_key}
def delete(self, dashboard_id):
"""
Disable anonymous access to a dashboard.
:param dashboard_id: The numeric ID of the dashboard to unshare.
"""
dashboard = models.Dashboard.get_by_id_and_org(dashboard_id, self.current_org)
require_admin_or_owner(dashboard.user_id)
api_key = models.ApiKey.get_by_object(dashboard)
if api_key:
api_key.active = False
models.db.session.add(api_key)
models.db.session.commit()
self.record_event(
{
"action": "deactivate_api_key",
"object_id": dashboard.id,
"object_type": "dashboard",
}
)
class DashboardTagsResource(BaseResource):
@require_permission("list_dashboards")
def get(self):
"""
Lists all accessible dashboards.
"""
tags = models.Dashboard.all_tags(self.current_org, self.current_user)
return {"tags": [{"name": name, "count": count} for name, count in tags]}
class DashboardFavoriteListResource(BaseResource):
def get(self):
search_term = request.args.get("q")
if search_term:
base_query = models.Dashboard.search(
self.current_org,
self.current_user.group_ids,
self.current_user.id,
search_term,
)
favorites = models.Dashboard.favorites(
self.current_user, base_query=base_query
)
else:
favorites = models.Dashboard.favorites(self.current_user)
favorites = filter_by_tags(favorites, models.Dashboard.tags)
# order results according to passed order parameter,
# special-casing search queries where the database
# provides an order by search rank
favorites = order_results(favorites, fallback=not bool(search_term))
page = request.args.get("page", 1, type=int)
page_size = request.args.get("page_size", 25, type=int)
# TODO: we don't need to check for favorite status here
response = paginate(favorites, page, page_size, DashboardSerializer)
self.record_event(
{
"action": "load_favorites",
"object_type": "dashboard",
"params": {
"q": search_term,
"tags": request.args.getlist("tags"),
"page": page,
},
}
)
return response
|
{
"content_hash": "d79bc7572c95fa48bf227a1a0b6c0412",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 163,
"avg_line_length": 33.7995337995338,
"alnum_prop": 0.6021379310344828,
"repo_name": "getredash/redash",
"id": "b86fb5e30d882eadc7745e7018b6884f302f709c",
"size": "14500",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "redash/handlers/dashboards.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "Dockerfile",
"bytes": "3500"
},
{
"name": "HTML",
"bytes": "32865"
},
{
"name": "JavaScript",
"bytes": "990852"
},
{
"name": "Less",
"bytes": "196598"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1238254"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "TypeScript",
"bytes": "521588"
}
],
"symlink_target": ""
}
|
import json
import unittest
from eve.utils import config
from tests import BaseTest, SimpleDoc, ComplexDoc
class TestHttpPut(BaseTest, unittest.TestCase):
def setUp(self):
response = self.client.post('/simpledoc/',
data='{"a": "jimmy", "b": 23}',
content_type='application/json')
json_data = response.get_json()
self.url = '/simpledoc/%s' % json_data[config.ID_FIELD]
response = self.client.get(self.url).get_json()
self.etag = response[config.ETAG]
self._id = response[config.ID_FIELD]
self.updated = response[config.LAST_UPDATED]
def tearDown(self):
SimpleDoc.objects().delete()
def do_put(self, url=None, data=None, headers=None):
if url is None:
url = self.url
if headers is None:
headers=[('If-Match', self.etag)]
return self.client.put(url, data=data,
content_type='application/json',
headers=headers)
def test_unknown_id(self):
response = self.do_put('/simpledoc/unknown', data='{"a": "greg"}')
self.assertEqual(response.status_code, 404)
def test_bad_etag(self):
response = self.do_put(data='{"a": "greg"}', headers=(('If-Match', 'blabla'),))
self.assertEqual(response.status_code, 412)
def test_ifmatch_missing(self):
response = self.do_put(data='{"a": "greg"}', headers=())
self.assertEqual(response.status_code, 403)
def test_put_overwrite_all(self):
response = self.do_put(data='{"a": "greg", "b": 300}')
response = self.client.get(self.url).get_json()
self.assertIn('a', response)
self.assertEqual(response['a'], "greg")
self.assertIn('b', response)
self.assertEqual(response['b'], 300)
def test_put_overwrite_subset(self):
self.do_put(data='{"a": "greg"}')
response = self.client.get(self.url).get_json()
self.assertIn('a', response)
self.assertEqual(response['a'], "greg")
self.assertNotIn('b', response)
def test_put_subresource(self):
# create new resource and subresource
s = SimpleDoc(a="Answer to everything", b=42).save()
d = ComplexDoc(l=['a', 'b'], n=999, r=s).save()
response = self.client.get('/simpledoc/%s/complexdoc/%s' % (s.id, d.id))
etag = response.get_json()[config.ETAG]
headers = [('If-Match', etag)]
# new putted document
put_data = {'l': ['x', 'y', 'z'], 'r': str(s.id)}
put_url = '/simpledoc/%s/complexdoc/%s' % (s.id, d.id)
response = self.client.put(put_url, data=json.dumps(put_data),
content_type='application/json', headers=headers)
self.assertEqual(response.status_code, 200)
resp_json = response.get_json()
self.assertEqual(resp_json[config.STATUS], "OK")
# check, if really edited
response = self.client.get('/simpledoc/%s/complexdoc/%s' % (s.id, d.id))
json_data = response.get_json()
self.assertListEqual(json_data['l'], ['x', 'y', 'z'])
self.assertNotIn('n', json_data)
s.delete()
d.delete()
|
{
"content_hash": "a554b46c0c5fac6bbbc7b2d780fbf546",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 87,
"avg_line_length": 39.36144578313253,
"alnum_prop": 0.5681052953780227,
"repo_name": "MongoEngine/eve-mongoengine",
"id": "f98975855acc34bd1d7874312785d1c30192732a",
"size": "3268",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_put.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95933"
}
],
"symlink_target": ""
}
|
import os
import ctypes
WORKING_DIR = os.environ.get("TRT_WORKING_DIR") or os.path.dirname(os.path.realpath(__file__))
IS_WINDOWS = os.name == "nt"
if IS_WINDOWS:
HARDMAX_PLUGIN_LIBRARY_NAME = "customHardmaxPlugin.dll"
HARDMAX_PLUGIN_LIBRARY = [
os.path.join(WORKING_DIR, "build", "Debug", HARDMAX_PLUGIN_LIBRARY_NAME),
os.path.join(WORKING_DIR, "build", "Release", HARDMAX_PLUGIN_LIBRARY_NAME),
]
else:
HARDMAX_PLUGIN_LIBRARY_NAME = "libcustomHardmaxPlugin.so"
HARDMAX_PLUGIN_LIBRARY = [os.path.join(WORKING_DIR, "build", HARDMAX_PLUGIN_LIBRARY_NAME)]
def load_plugin_lib():
for plugin_lib in HARDMAX_PLUGIN_LIBRARY:
if os.path.isfile(plugin_lib):
try:
# Python specifies that winmode is 0 by default, but some implementations
# incorrectly default to None instead. See:
# https://docs.python.org/3.8/library/ctypes.html
# https://github.com/python/cpython/blob/3.10/Lib/ctypes/__init__.py#L343
ctypes.CDLL(plugin_lib, winmode=0)
except TypeError:
# winmode only introduced in python 3.8
ctypes.CDLL(plugin_lib)
return
raise IOError(
"\n{}\n{}\n{}\n".format(
"Failed to load library ({}).".format(HARDMAX_PLUGIN_LIBRARY_NAME),
"Please build the Hardmax sample plugin.",
"For more information, see the included README.md",
)
)
|
{
"content_hash": "d2519c1a7dbbe891624b3df11892f6b5",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 94,
"avg_line_length": 41.27777777777778,
"alnum_prop": 0.6157469717362046,
"repo_name": "NVIDIA/TensorRT",
"id": "4e2c3946c9781c96425a646c853516b3a4e50b16",
"size": "2175",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/python/onnx_custom_plugin/load_plugin_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
}
|
"""Helper script for making sure that the configuration of the logger works. Called by test-logger.sh"""
from peyotl import get_logger
_LOG = get_logger()
_LOG.debug("a debug message")
_LOG.info("an info with umlaut ü message")
_LOG.warning("a warning message")
_LOG.error("an error message")
_LOG.critical("a critical message")
try:
raise RuntimeError("A testing runtime error")
except RuntimeError:
_LOG.exception("expected exception")
|
{
"content_hash": "9fdf50730b5b7cfa99b2f22f9ad9ad6a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 104,
"avg_line_length": 37.166666666666664,
"alnum_prop": 0.742152466367713,
"repo_name": "OpenTreeOfLife/peyotl",
"id": "a5c5ee593088432c365c4a7b4b6f61dce7529d75",
"size": "493",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/logger_test_messages.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1834266"
},
{
"name": "Python",
"bytes": "1010278"
},
{
"name": "Shell",
"bytes": "28989"
},
{
"name": "XSLT",
"bytes": "573"
}
],
"symlink_target": ""
}
|
import itertools
import weakref
class MetaSignals(type):
"""
register the list of signals in the class varable signals,
including signals in superclasses.
"""
def __init__(cls, name, bases, d):
signals = d.get("signals", [])
for superclass in cls.__bases__:
signals.extend(getattr(superclass, 'signals', []))
signals = dict([(x,None) for x in signals]).keys()
d["signals"] = signals
register_signal(cls, signals)
super(MetaSignals, cls).__init__(name, bases, d)
def setdefaultattr(obj, name, value):
# like dict.setdefault() for object attributes
if hasattr(obj, name):
return getattr(obj, name)
setattr(obj, name, value)
return value
class Key(object):
"""
Minimal class, whose only purpose is to produce objects with a
unique hash
"""
__slots__ = []
class Signals(object):
_signal_attr = '_urwid_signals' # attribute to attach to signal senders
def __init__(self):
self._supported = {}
def register(self, sig_cls, signals):
"""
:param sig_class: the class of an object that will be sending signals
:type sig_class: class
:param signals: a list of signals that may be sent, typically each
signal is represented by a string
:type signals: signal names
This function must be called for a class before connecting any
signal callbacks or emiting any signals from that class' objects
"""
self._supported[sig_cls] = signals
def connect(self, obj, name, callback, user_arg=None, weak_args=None, user_args=None):
"""
:param obj: the object sending a signal
:type obj: object
:param name: the signal to listen for, typically a string
:type name: signal name
:param callback: the function to call when that signal is sent
:type callback: function
:param user_arg: deprecated additional argument to callback (appended
after the arguments passed when the signal is
emitted). If None no arguments will be added.
Don't use this argument, use user_args instead.
:param weak_args: additional arguments passed to the callback
(before any arguments passed when the signal
is emitted and before any user_args).
These arguments are stored as weak references
(but converted back into their original value
before passing them to callback) to prevent
any objects referenced (indirectly) from
weak_args from being kept alive just because
they are referenced by this signal handler.
Use this argument only as a keyword argument,
since user_arg might be removed in the future.
:type weak_args: iterable
:param user_args: additional arguments to pass to the callback,
(before any arguments passed when the signal
is emitted but after any weak_args).
Use this argument only as a keyword argument,
since user_arg might be removed in the future.
:type user_args: iterable
When a matching signal is sent, callback will be called. The
arguments it receives will be the user_args passed at connect
time (as individual arguments) followed by all the positional
parameters sent with the signal.
As an example of using weak_args, consider the following snippet:
>>> import urwid
>>> debug = urwid.Text('')
>>> def handler(widget, newtext):
... debug.set_text("Edit widget changed to %s" % newtext)
>>> edit = urwid.Edit('')
>>> key = urwid.connect_signal(edit, 'change', handler)
If you now build some interface using "edit" and "debug", the
"debug" widget will show whatever you type in the "edit" widget.
However, if you remove all references to the "debug" widget, it
will still be kept alive by the signal handler. This because the
signal handler is a closure that (implicitly) references the
"edit" widget. If you want to allow the "debug" widget to be
garbage collected, you can create a "fake" or "weak" closure
(it's not really a closure, since it doesn't reference any
outside variables, so it's just a dynamic function):
>>> debug = urwid.Text('')
>>> def handler(weak_debug, widget, newtext):
... weak_debug.set_text("Edit widget changed to %s" % newtext)
>>> edit = urwid.Edit('')
>>> key = urwid.connect_signal(edit, 'change', handler, weak_args=[debug])
Here the weak_debug parameter in print_debug is the value passed
in the weak_args list to connect_signal. Note that the
weak_debug value passed is not a weak reference anymore, the
signals code transparently dereferences the weakref parameter
before passing it to print_debug.
Returns a key associated by this signal handler, which can be
used to disconnect the signal later on using
urwid.disconnect_signal_by_key. Alternatively, the signal
handler can also be disconnected by calling
urwid.disconnect_signal, which doesn't need this key.
"""
sig_cls = obj.__class__
if not name in self._supported.get(sig_cls, []):
raise NameError("No such signal %r for object %r" %
(name, obj))
# Just generate an arbitrary (but unique) key
key = Key()
signals = setdefaultattr(obj, self._signal_attr, {})
handlers = signals.setdefault(name, [])
# Remove the signal handler when any of the weakref'd arguments
# are garbage collected. Note that this means that the handlers
# dictionary can be modified _at any time_, so it should never
# be iterated directly (e.g. iterate only over .keys() and
# .items(), never over .iterkeys(), .iteritems() or the object
# itself).
# We let the callback keep a weakref to the object as well, to
# prevent a circular reference between the handler and the
# object (via the weakrefs, which keep strong references to
# their callbacks) from existing.
obj_weak = weakref.ref(obj)
def weakref_callback(weakref):
o = obj_weak()
if o:
try:
del getattr(o, self._signal_attr, {})[name][key]
except KeyError:
pass
user_args = self._prepare_user_args(weak_args, user_args, weakref_callback)
handlers.append((key, callback, user_arg, user_args))
return key
def _prepare_user_args(self, weak_args, user_args, callback = None):
# Turn weak_args into weakrefs and prepend them to user_args
return [weakref.ref(a, callback) for a in (weak_args or [])] + (user_args or [])
def disconnect(self, obj, name, callback, user_arg=None, weak_args=None, user_args=None):
"""
:param obj: the object to disconnect the signal from
:type obj: object
:param name: the signal to disconnect, typically a string
:type name: signal name
:param callback: the callback function passed to connect_signal
:type callback: function
:param user_arg: the user_arg parameter passed to connect_signal
:param weak_args: the weak_args parameter passed to connect_signal
:param user_args: the weak_args parameter passed to connect_signal
This function will remove a callback from the list connected
to a signal with connect_signal(). The arguments passed should
be exactly the same as those passed to connect_signal().
If the callback is not connected or already disconnected, this
function will simply do nothing.
"""
signals = setdefaultattr(obj, self._signal_attr, {})
if name not in signals:
return
handlers = signals[name]
# Do the same processing as in connect, so we can compare the
# resulting tuple.
user_args = self._prepare_user_args(weak_args, user_args)
# Remove the given handler
for h in handlers:
if h[1:] == (callback, user_arg, user_args):
return self.disconnect_by_key(obj, name, h[0])
def disconnect_by_key(self, obj, name, key):
"""
:param obj: the object to disconnect the signal from
:type obj: object
:param name: the signal to disconnect, typically a string
:type name: signal name
:param key: the key for this signal handler, as returned by
connect_signal().
:type key: Key
This function will remove a callback from the list connected
to a signal with connect_signal(). The key passed should be the
value returned by connect_signal().
If the callback is not connected or already disconnected, this
function will simply do nothing.
"""
signals = setdefaultattr(obj, self._signal_attr, {})
handlers = signals.get(name, [])
handlers[:] = [h for h in handlers if h[0] is not key]
def emit(self, obj, name, *args):
"""
:param obj: the object sending a signal
:type obj: object
:param name: the signal to send, typically a string
:type name: signal name
:param \*args: zero or more positional arguments to pass to the signal
callback functions
This function calls each of the callbacks connected to this signal
with the args arguments as positional parameters.
This function returns True if any of the callbacks returned True.
"""
result = False
signals = getattr(obj, self._signal_attr, {})
handlers = signals.get(name, [])
for key, callback, user_arg, user_args in handlers:
result |= self._call_callback(callback, user_arg, user_args, args)
return result
def _call_callback(self, callback, user_arg, user_args, emit_args):
if user_args:
args_to_pass = []
for arg in user_args:
if isinstance(arg, weakref.ReferenceType):
arg = arg()
if arg is None:
# If the weakref is None, the referenced object
# was cleaned up. We just skip the entire
# callback in this case. The weakref cleanup
# handler will have removed the callback when
# this happens, so no need to actually remove
# the callback here.
return False
args_to_pass.append(arg)
args_to_pass.extend(emit_args)
else:
# Optimization: Don't create a new list when there are
# no user_args
args_to_pass = emit_args
# The deprecated user_arg argument was added to the end
# instead of the beginning.
if user_arg is not None:
args_to_pass = itertools.chain(args_to_pass, (user_arg,))
return bool(callback(*args_to_pass))
_signals = Signals()
emit_signal = _signals.emit
register_signal = _signals.register
connect_signal = _signals.connect
disconnect_signal = _signals.disconnect
disconnect_signal_by_key = _signals.disconnect_by_key
|
{
"content_hash": "0a39419a687bd072c89d71b2951ba36b",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 93,
"avg_line_length": 42.03214285714286,
"alnum_prop": 0.6041294927351517,
"repo_name": "fkolacek/FIT-VUT",
"id": "b716939c9623128e5a17ffca649af42eb4274b72",
"size": "12660",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/site-packages/urwid-1.3.0-py2.7-linux-x86_64.egg/urwid/signals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
__author__ = 'esteele'
# Primary sources have two characters
# Implied or derived sources have three characters
FIND_A_BIBLE_SOURCE_ABBREV = "FB"
WALS_SOURCE_ABBREV = "WA"
JOSHUA_PROJECT_SOURCE_ABBREV = "JP"
AUSTLANG_SOURCE_ABBREV = "AL"
AUSTLANG_ABS_SOURCE_ABBREV = "AB"
SIL_RCEM_SOURCE_ABBREV = "SI"
SIL_RCEM_IMPLIED_SOURCE_ABBREV = "SII"
AUS_CENSUS_2011_ABBREV = "CN"
TINDALE_SOURCE_ABBREV = "TI"
source_abbrev_name_dict = {
SIL_RCEM_SOURCE_ABBREV: "SIL Retired Codes",
SIL_RCEM_IMPLIED_SOURCE_ABBREV: "SIL Retired Codes (implied)",
FIND_A_BIBLE_SOURCE_ABBREV: "Find A Bible",
WALS_SOURCE_ABBREV: "WALS",
JOSHUA_PROJECT_SOURCE_ABBREV: "Joshua Project",
AUSTLANG_SOURCE_ABBREV: "AUSTLANG",
AUSTLANG_ABS_SOURCE_ABBREV: "Australian Bureau of Statistics",
AUS_CENSUS_2011_ABBREV: "Census 2011",
TINDALE_SOURCE_ABBREV: "Tindale's Catalogue of "
"Australian Aboriginal Tribes",
}
TRANSLATION_STATE_WHOLE_BIBLE = 5 # Findabible
TRANSLATION_STATE_NEW_TESTAMENT = 4 # Findabible
TRANSLATION_STATE_PORTIONS = 3 # Joshua Project isn't specific
TRANSLATION_STATE_COMPLETE_BOOK = 2 # Findabible
TRANSLATION_STATE_NO_SCRIPTURE = 1 # Findabible
TRANSLATION_STATE_NO_RECORD = 0
TRANSLATION_STATE_UNKNOWN_YEAR = -1
TRANSLATION_STATE_POSITIVE_YEAR = 2013 # Listed being present but no year
TRANSLATION_STATE_STATE_KEY = "TS"
TRANSLATION_STATE_YEAR_KEY = "YR"
translation_abbrev_name_dict = {
TRANSLATION_STATE_COMPLETE_BOOK: "A book of scripture",
TRANSLATION_STATE_PORTIONS: "Portions of scripture",
TRANSLATION_STATE_NEW_TESTAMENT: "New Testament",
TRANSLATION_STATE_WHOLE_BIBLE: "Whole Bible",
TRANSLATION_STATE_NO_SCRIPTURE: "No scripture",
TRANSLATION_STATE_NO_RECORD: "No record of any translation"
}
translation_abbrev_css_class_dict = {
TRANSLATION_STATE_COMPLETE_BOOK: "scripture_book",
TRANSLATION_STATE_PORTIONS: "scripture_portions",
TRANSLATION_STATE_NEW_TESTAMENT: "scripture_nt",
TRANSLATION_STATE_WHOLE_BIBLE: "scripture_wb",
TRANSLATION_STATE_NO_SCRIPTURE: "scripture_none",
TRANSLATION_STATE_NO_RECORD: "scripture_record_absent"
}
ISO_RETIRED_CSS_STATE = "retired_iso"
ISO_ACTIVE_CSS_STATE = "active_iso"
RELTYPE_RETIREMENT_CHANGE = "C" # SIL
RELTYPE_RETIREMENT_DUPLICATE = "D" # SIL
RELTYPE_RETIREMENT_NON_EXISTENT = "N" # SIL
RELTYPE_RETIREMENT_SPLIT_INTO = "S" # SIL
RELTYPE_RETIREMENT_MERGED_INTO = "M" # SIL
RELTYPE_RETIREMENT_SPLIT_FROM = "SF" # SIL (implied)
RELTYPE_RETIREMENT_MERGED_FROM = "MF" # SIL (implied)
DIALECT_NAME = "DN"
relationship_abbrev_name_dict = {
RELTYPE_RETIREMENT_CHANGE: "retired, changed into",
RELTYPE_RETIREMENT_DUPLICATE: "retired, duplicates",
RELTYPE_RETIREMENT_NON_EXISTENT: "retired, does not exist",
RELTYPE_RETIREMENT_SPLIT_INTO: "retired, split into",
RELTYPE_RETIREMENT_MERGED_INTO: "retired, merged into",
RELTYPE_RETIREMENT_SPLIT_FROM: "split from retired ISO",
RELTYPE_RETIREMENT_MERGED_FROM: "merged from retired ISO",
}
ISO_MULTI_MATCH = "m"
ISO_NO_MATCH = "n"
# See note in JPHarvestAdapter:get_L1_speaker_count_for_iso
SPEAKER_COUNT_NONE_EXPECTED = -1
SPEAKER_COUNT_UNKNOWN = -2
SPEAKER_COUNT_AMBIGUOUS = -3 # Census data
# It's helpful to have it > 0. This number should never be shown directly
SPEAKER_COUNT_FEW = 1.5
# Let's say that "few" is less than 10
SPEAKER_COUNT_FEW_THRESHOLD = 10
SPEAKER_COUNT_MANY_THRESHOLD = 100
SPEAKER_COUNT_FEW_CSS_CLASS = "speakers_few"
SPEAKER_COUNT_SOME_CSS_CLASS = "speakers_some"
SPEAKER_COUNT_MANY_CSS_CLASS = "speakers_many"
SPEAKER_COUNT_UNKNOWN_CSS_CLASS = "speakers_unknown"
ENGLISH_COMPETENCY_UNKNOWN_PESSIMISTIC = -1
ENGLISH_COMPETENCY_UNKNOWN_OPTIMISTIC = -1
LATITUDE_UNKNOWN = 999
LONGITUDE_UNKNOWN = 999
def generate_l1_css_dict():
# TODO: Move this into a class so that it's not executed for
# every module import
d = dict.fromkeys(
range(1, SPEAKER_COUNT_FEW_THRESHOLD), SPEAKER_COUNT_FEW_CSS_CLASS)
d.update(dict.fromkeys(
range(SPEAKER_COUNT_FEW_THRESHOLD, SPEAKER_COUNT_MANY_THRESHOLD),
SPEAKER_COUNT_SOME_CSS_CLASS))
d.update({
0: "speakers_none",
SPEAKER_COUNT_NONE_EXPECTED: "speakers_none",
SPEAKER_COUNT_FEW: SPEAKER_COUNT_FEW_CSS_CLASS,
SPEAKER_COUNT_UNKNOWN: SPEAKER_COUNT_UNKNOWN_CSS_CLASS,
})
# Set default value
return defaultdict(lambda: SPEAKER_COUNT_MANY_CSS_CLASS, d)
l1_speaker_css_class_dict = generate_l1_css_dict()
TABLE_SPEAKER_COUNT_COL = "SC"
# We can't associate the following ABS names with ISOs because the
# particular spelling doesn't exist in our names or aliases. I've looked
# at the Austlang records and have worked out the mappings below.
# It's possible that this extra set of mappings exists because austlang
# mappings to ISO codes are incomplete, or because I've incorrectly
# made an association, or that austlang records are more fine grained
# than the records in this data, perhaps because austlang prefers not
# to associate dialects with iso codes.
# We include the full ABS name, even the nfd "Not further defined"
# (grouped bucket - coarser granularity and the nec
# "Not elsewhere classified" (bucket of last resort)
ABS_ISO_EXTRA_MAPPINGS = {
'Bilinarra': "nbj", # dialect of Ngarinyman (nbj) => 59 speakers
'Eastern Arrernte': "aer",
'Galpu': "dhg", # Dialect of Djangu => 146 speakers
'Garrwa': "wrk", # Matches wrk and gbc but gbc has been retired
'Gun-nartpa': "bvr", # Dialect of Burarra => 89 speakers
'Gundjeihmi': "gup", # Dialect of Gunwinngu => 29 speakers
'Kanai': "unn", # Is Kurnai
# 'Kaurna', # Not in Ethnologue => 58 speakers. WALS code kaq. Adelaide
'Mudburra': "dmw", # Matches dmw and mwd but mwd has been retired
'Murrinh Patha': "mwf", # Is Murrinh-Patha
"Ngan'gikurunggurr": "nam", # Is Nangikurrunggurr
'Nyungar': "nys", # Matches nys and xrg, but xrg has no speakers
'Nhangu, nec': "jay", # Is Yan-nhangu (even though it is a Yolngu
# language)
'Pitjantjatjara': "pjt", # While Yankunytjatjara is related to
# Pitjantjatjara, it has its own Census code so this is just about pjt
# 'Thaynakwith', # Unable to find anything. => 3 speakers
'Wagilak': "rit", # Is Ritarungo => 16 speakers
'Wangkatha': "pti",
'Western Arrarnta': "are",
'Wik Ngathan': "wig", # Is Wig-Ngathana => 4 speakers XXX seems small
'Yumplatok (Torres Strait Creole)': "tcs", # => 5368 speakers
}
|
{
"content_hash": "1c24c00335bb4482e313d166f6b37adc",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 75,
"avg_line_length": 39.75757575757576,
"alnum_prop": 0.7092987804878049,
"repo_name": "edwinsteele/language_explorer",
"id": "b891e032632eda51197e69e1fd2eb45e5013448f",
"size": "6560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language_explorer/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "26586"
},
{
"name": "Python",
"bytes": "131661"
},
{
"name": "Shell",
"bytes": "10041"
}
],
"symlink_target": ""
}
|
"""
A module for decoding "Actions" additional PSD data format.
"""
from __future__ import absolute_import, unicode_literals
from psd_tools.utils import read_unicode_string, read_fmt
from psd_tools.constants import OSType, ReferenceOSType, UnitFloatType
from psd_tools.debug import pretty_namedtuple
from psd_tools.utils import trimmed_repr
import warnings
Descriptor = pretty_namedtuple('Descriptor', 'name classID items')
Reference = pretty_namedtuple('Reference', 'items')
Property = pretty_namedtuple('Property', 'name classID keyID')
UnitFloat = pretty_namedtuple('UnitFloat', 'unit value')
Double = pretty_namedtuple('Double', 'value')
Class = pretty_namedtuple('Class', 'name classID')
String = pretty_namedtuple('String', 'value')
EnumReference = pretty_namedtuple('EnumReference', 'name classID typeID enum')
Boolean = pretty_namedtuple('Boolean', 'value')
Offset = pretty_namedtuple('Offset', 'name classID value')
Alias = pretty_namedtuple('Alias', 'value')
List = pretty_namedtuple('List', 'items')
Integer = pretty_namedtuple('Integer', 'value')
Enum = pretty_namedtuple('Enum', 'type value')
Identifier = pretty_namedtuple('Identifier', 'value')
Index = pretty_namedtuple('Index', 'value')
Name = pretty_namedtuple('Name', 'value')
ObjectArray = pretty_namedtuple('ObjectArray', 'classObj items')
ObjectArrayItem = pretty_namedtuple('ObjectArrayItem', 'keyID value')
_RawData = pretty_namedtuple('RawData', 'value')
class RawData(_RawData):
def __repr__(self):
return "RawData(value=%s)" % trimmed_repr(self.value)
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("RawData(...)")
else:
with p.group(1, "RawData(", ")"):
p.breakable()
p.text("value=")
if isinstance(self.value, bytes):
p.text(trimmed_repr(self.value))
else:
p.pretty(self.value)
def get_ostype_decode_func(ostype):
return {
OSType.REFERENCE: decode_ref,
OSType.DESCRIPTOR: decode_descriptor,
OSType.LIST: decode_list,
OSType.DOUBLE: decode_double,
OSType.UNIT_FLOAT: decode_unit_float,
OSType.UNIT_FLOATS: decode_unit_floats,
OSType.STRING: decode_string,
OSType.ENUMERATED: decode_enum,
OSType.INTEGER: decode_integer,
OSType.BOOLEAN: decode_bool,
OSType.GLOBAL_OBJECT: decode_descriptor,
OSType.CLASS1: decode_class,
OSType.CLASS2: decode_class,
OSType.ALIAS: decode_alias,
OSType.RAW_DATA: decode_raw,
OSType.OBJECT_ARRAY: decode_object_array,
}.get(ostype, None)
def get_reference_ostype_decode_func(ostype):
return {
ReferenceOSType.PROPERTY: decode_prop,
ReferenceOSType.CLASS: decode_class,
ReferenceOSType.OFFSET: decode_offset,
ReferenceOSType.IDENTIFIER: decode_identifier,
ReferenceOSType.INDEX: decode_index,
ReferenceOSType.NAME: decode_name,
ReferenceOSType.ENUMERATED_REFERENCE: decode_enum_ref,
}.get(ostype, None)
def decode_descriptor(_, fp):
name = read_unicode_string(fp)[:-1]
classID_length = read_fmt("I", fp)[0]
classID = fp.read(classID_length or 4)
items = []
item_count = read_fmt("I", fp)[0]
for n in range(item_count):
item_length = read_fmt("I", fp)[0]
key = fp.read(item_length or 4)
ostype = fp.read(4)
decode_ostype = get_ostype_decode_func(ostype)
if not decode_ostype:
raise UnknownOSType('Unknown descriptor item of type %r' % ostype)
value = decode_ostype(key, fp)
if value is not None:
items.append((key, value))
return Descriptor(name, classID, items)
def decode_ref(key, fp):
item_count = read_fmt("I", fp)[0]
items = []
for _ in range(item_count):
ostype = fp.read(4)
decode_ostype = get_reference_ostype_decode_func(ostype)
if decode_ostype:
raise UnknownOSType('Unknown reference item of type %r' % ostype)
value = decode_ostype(key, fp)
if value is not None:
items.append(value)
return Reference(items)
def decode_prop(key, fp):
name = read_unicode_string(fp)[:-1]
classID_length = read_fmt("I", fp)[0]
classID = fp.read(classID_length or 4)
keyID_length = read_fmt("I", fp)[0]
keyID = fp.read(keyID_length or 4)
return Property(name, classID, keyID)
def decode_unit_float(key, fp):
unit_key = fp.read(4)
if not UnitFloatType.is_known(unit_key):
warnings.warn('Unknown UnitFloatType: %r' % unit_key)
value = read_fmt("d", fp)[0]
return UnitFloat(UnitFloatType.name_of(unit_key), value)
def decode_unit_floats(key, fp):
unit_key = fp.read(4)
if not UnitFloatType.is_known(unit_key):
warnings.warn('Unknown UnitFloatType: %r' % unit_key)
floats_count = read_fmt("I", fp)[0]
floats = []
for n in range(floats_count):
value = read_fmt("d", fp)[0]
floats.append(UnitFloat(UnitFloatType.name_of(unit_key), value))
return floats
def decode_double(key, fp):
return Double(read_fmt("d", fp)[0])
def decode_class(key, fp):
name = read_unicode_string(fp)[:-1]
classID_length = read_fmt("I", fp)[0]
classID = fp.read(classID_length or 4)
return Class(name, classID)
def decode_string(key, fp):
value = read_unicode_string(fp)[:-1]
return String(value)
def decode_enum_ref(key, fp):
name = read_unicode_string(fp)[:-1]
classID_length = read_fmt("I", fp)[0]
classID = fp.read(classID_length or 4)
typeID_length = read_fmt("I", fp)[0]
typeID = fp.read(typeID_length or 4)
enum_length = read_fmt("I", fp)[0]
enum = fp.read(enum_length or 4)
return EnumReference(name, classID, typeID, enum)
def decode_offset(key, fp):
name = read_unicode_string(fp)[:-1]
classID_length = read_fmt("I", fp)[0]
classID = fp.read(classID_length or 4)
offset = read_fmt("I", fp)[0]
return Offset(name, classID, offset)
def decode_bool(key, fp):
return Boolean(read_fmt("?", fp)[0])
def decode_alias(key, fp):
length = read_fmt("I", fp)[0]
value = fp.read(length)
return Alias(value)
def decode_list(key, fp):
items_count = read_fmt("I", fp)[0]
items = []
for _ in range(items_count):
ostype = fp.read(4)
decode_ostype = get_ostype_decode_func(ostype)
if not decode_ostype:
raise UnknownOSType('Unknown list item of type %r' % ostype)
value = decode_ostype(key, fp)
if value is not None:
items.append(value)
return List(items)
def decode_integer(key, fp):
return Integer(read_fmt("i", fp)[0])
def decode_enum(key, fp):
type_length = read_fmt("I", fp)[0]
type_ = fp.read(type_length or 4)
value_length = read_fmt("I", fp)[0]
value = fp.read(value_length or 4)
return Enum(type_, value)
def decode_identifier(key, fp):
return Identifier(read_fmt("I", fp)[0])
def decode_index(key, fp):
return Index(read_fmt("I", fp)[0])
def decode_name(key, fp):
value = read_unicode_string(fp)[:-1]
return Name(value)
def decode_raw(key, fp):
# This is the only thing we know about:
# The first unsigned int determines the size of the raw data.
size = read_fmt("I", fp)[0]
data = fp.read(size)
return RawData(data)
def decode_object_array(key, fp):
items_per_object_count = read_fmt("I", fp)[0]
classObj = decode_class(None, fp)
items_count = read_fmt("I", fp)[0]
items = []
for n in range(items_count):
object_array_item = decode_object_array_item(None, fp)
if object_array_item is not None:
items.append(object_array_item)
return ObjectArray(classObj, items)
def decode_object_array_item(key, fp):
keyID_length = read_fmt("I", fp)[0]
keyID = fp.read(keyID_length or 4)
ostype = fp.read(4)
decode_ostype = get_ostype_decode_func(ostype)
if not decode_ostype:
raise UnknownOSType('Unknown list item of type %r' % ostype)
value = decode_ostype(key, fp)
return ObjectArrayItem(keyID, value)
class UnknownOSType(ValueError):
pass
|
{
"content_hash": "77edfefc5df8cb60370a5854132bce97",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 78,
"avg_line_length": 31.66030534351145,
"alnum_prop": 0.6335141651597348,
"repo_name": "tommo/gii",
"id": "4030300e77856e78c2fee96798f4d29a8d2de65c",
"size": "8319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/3rdparty/common/psd_tools/decoder/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
}
|
""""""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import subprocess
import unittest
import os
FIXTURE0 = """ 0.100167119 3,183 cache-misses """
ANSWER0 = 3183
FIXTURE1 = """# time counts events\n 0.100167119 3,183 cache-misses \n 0.200354348 4,045 cache-misses \n """
ANSWER1 = [3183, 4045]
FIXTURE2 = """ 3.501390851 471,219,787 stalled-cycles-frontend\n 14.005319456 2,249,115 stalled-cycles-frontend """
ANSWER2 = [471219787, 2249115]
EVENT_TYPE_CM = "cache-misses"
EVENT_TYPE_SCF = "stalled-cycles-frontend"
EVENT_TYPE_I = "instructions"
EVENT_TYPES = set([EVENT_TYPE_CM, EVENT_TYPE_SCF, EVENT_TYPE_I])
EVENT_TYPE = EVENT_TYPE_CM
def process_line(line):
"""Process a single output line from perf-stat, extract only a value (skip help lines)"""
line_bits = line.split()
#print(line_bits)
try:
value = float(line_bits[1].replace(',', ''))
except ValueError:
if line_bits[2] in EVENT_TYPES:
# we only get here if we've got a value and a key
key = line_bits[2]
value = None
except IndexError:
value = None
return value
def process_lines(lines):
"""Process many lines of perf-stat output, extract the values"""
# we're assuming we have \n as line endings in this long string
values = []
for line in lines.split('\n'):
value = process_line(line)
if value:
values.append(value)
return values
class Test(unittest.TestCase):
def test1(self):
answer0 = process_line(FIXTURE0)
self.assertEqual(ANSWER0, answer0)
def test_process_lines(self):
values = process_lines(FIXTURE0)
self.assertEqual(values, [ANSWER0])
def test_process_lines2(self):
# check we can process the cache-misses messages
values = process_lines(FIXTURE1)
self.assertEqual(values, ANSWER1)
# check that if we have repeated help messages, we still extract the
# values we expect
values = process_lines(FIXTURE1+FIXTURE1)
self.assertEqual(values, ANSWER1+ANSWER1)
def test_process_lines3(self):
# check we can process stalled-cycles-frontend messages
values = process_lines(FIXTURE2)
self.assertEqual(values, ANSWER2)
def run_capture_perf(pid):
"""Start a perf stat process monitoring pid every 100ms"""
cmd = "perf stat --pid {pid} --event {event_type} -I 100".format(pid=pid, event_type=EVENT_TYPE)
#print("run_capture_perf running:", cmd) # debug message
proc = subprocess.Popen(cmd.split(), stderr=subprocess.PIPE)
return proc
def finish_perf(proc):
"""Finish collecting data, parse and return"""
# once the job has finished, kill recording
proc.kill()
# now block to gather all output data
(stdoutdata, stderrdata) = proc.communicate()
# example stderrdata output:
# # time counts events
# 0.100173796 2,761 cache-misses
# 0.200387519 4,232 cache-misses
# 0.300540762 5,277 cache-misses
# 0.400778748 3,916 cache-misses
stderrdata = stderrdata.decode('ascii') # assume ascii
values = process_lines(stderrdata)
return values
if __name__ == "__main__":
# simple test for a hardcoded pid gathered over 0.5 seconds
pid = os.getpid()
print("Using pid:", pid)
proc = run_capture_perf(pid)
import time
time.sleep(0.5)
values = finish_perf(proc)
print(values)
|
{
"content_hash": "65fda8decc403805e0b8d7a4ff721c7a",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 192,
"avg_line_length": 35.559633027522935,
"alnum_prop": 0.6289989680082559,
"repo_name": "ianozsvald/ipython_memory_usage",
"id": "bf77f9989b5470b3ae5df9159a922628fb288b97",
"size": "3876",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/ipython_memory_usage/perf_process.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "15834"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
import time
from . import junit_output
ABS_PATH_PREFIX = os.getcwd() + os.sep
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def Starting(self):
pass
def Done(self):
pass
def AboutToRun(self, test):
pass
def HasRun(self, test, has_unexpected_output):
pass
def Heartbeat(self):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(self.runner.GetCommand(failed))
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, test):
print 'Starting %s...' % test.GetLabel()
sys.stdout.flush()
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
def Heartbeat(self):
print 'Still working...'
sys.stdout.flush()
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def AboutToRun(self, test):
self.PrintProgress(test.GetLabel())
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
progress = 0 if not self.runner.total else (
((self.runner.total - self.runner.remaining) * 100) //
self.runner.total)
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'progress': progress,
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, junitout, junittestsuite):
self.progress_indicator = progress_indicator
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, progress_indicator, json_test_results, arch, mode):
self.progress_indicator = progress_indicator
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.results = []
self.tests = []
def Starting(self):
self.progress_indicator.runner = self.runner
self.progress_indicator.Starting()
def Done(self):
self.progress_indicator.Done()
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
# Sort tests by duration.
timed_tests = [t for t in self.tests if t.duration is not None]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
slowest_tests = [
{
"name": test.GetLabel(),
"flags": test.flags,
"command": EscapeCommand(self.runner.GetCommand(test)).replace(
ABS_PATH_PREFIX, ""),
"duration": test.duration,
} for test in timed_tests[:20]
]
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
"slowest_tests": slowest_tests,
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def AboutToRun(self, test):
self.progress_indicator.AboutToRun(test)
def HasRun(self, test, has_unexpected_output):
self.progress_indicator.HasRun(test, has_unexpected_output)
# Buffer all tests for sorting the durations in the end.
self.tests.append(test)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
"command": EscapeCommand(self.runner.GetCommand(test)).replace(
ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
"duration": test.duration,
})
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
|
{
"content_hash": "d22cef68f2b6691a399fac76ecac2ea5",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 28.99127906976744,
"alnum_prop": 0.6237842173869448,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "a02e9842b1ea3181851dc98cc59791905a58cc2d",
"size": "11546",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "v8/tools/testrunner/local/progress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
"""Device-related support functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
def canonicalize(d, default=None):
"""Canonicalize device string.
If d has missing components, the rest would be deduced from the `default`
argument or from '/replica:0/task:0/device:CPU:0'. For example:
If d = '/cpu:0', default='/job:worker/task:1', it returns
'/job:worker/replica:0/task:1/device:CPU:0'.
If d = '/cpu:0', default='/job:worker', it returns
'/job:worker/replica:0/task:0/device:CPU:0'.
If d = '/gpu:0', default=None, it returns
'/replica:0/task:0/device:GPU:0'.
Note: This uses "job:localhost" as the default if executing eagerly.
Args:
d: a device string or tf.config.LogicalDevice
default: a string for default device if d doesn't have all components.
Returns:
a canonicalized device string.
"""
if isinstance(d, context.LogicalDevice):
d = tf_device.DeviceSpec.from_string(d.name)
else:
d = tf_device.DeviceSpec.from_string(d)
assert d.device_type is None or d.device_type == d.device_type.upper(), (
"Device type '%s' must be all-caps." % (d.device_type,))
# Fill in missing device fields using defaults.
result = tf_device.DeviceSpec(
replica=0, task=0, device_type="CPU", device_index=0)
if ops.executing_eagerly_outside_functions():
# The default job is localhost if eager execution is enabled
result = result.replace(job="localhost")
if default:
# Overrides any defaults with values from the default device if given.
result = result.make_merged_spec(
tf_device.DeviceSpec.from_string(default))
# Apply `d` last, so that it's values take precidence over the defaults.
result = result.make_merged_spec(d)
return result.to_string()
def resolve(d):
"""Canonicalize `d` with current device as default."""
return canonicalize(d, default=current())
class _FakeNodeDef(object):
"""A fake NodeDef for _FakeOperation."""
def __init__(self):
self.op = ""
self.name = ""
class _FakeOperation(object):
"""A fake Operation object to pass to device functions."""
def __init__(self):
self.device = ""
self.type = ""
self.name = ""
self.node_def = _FakeNodeDef()
def _set_device(self, device):
self.device = ops._device_string(device) # pylint: disable=protected-access
def _set_device_from_string(self, device_str):
self.device = device_str
def current():
"""Return a string (not canonicalized) for the current device."""
# TODO(josh11b): Work out how this function interacts with ops.colocate_with.
if ops.executing_eagerly_outside_functions():
d = context.context().device_name
else:
op = _FakeOperation()
ops.get_default_graph()._apply_device_functions(op) # pylint: disable=protected-access
d = op.device
return d
def get_host_for_device(device):
"""Returns the corresponding host device for the given device."""
spec = tf_device.DeviceSpec.from_string(device)
return tf_device.DeviceSpec(
job=spec.job, replica=spec.replica, task=spec.task,
device_type="CPU", device_index=0).to_string()
def local_devices_from_num_gpus(num_gpus):
"""Returns device strings for local GPUs or CPU."""
return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
("/device:CPU:0",))
|
{
"content_hash": "b6ca73201d39a13cf98c7dd959aab4ad",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 91,
"avg_line_length": 32.7037037037037,
"alnum_prop": 0.687995469988675,
"repo_name": "adit-chandra/tensorflow",
"id": "db6009d1a45e7fa2f436ebd3ffa6625425e7a52a",
"size": "4221",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/device_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76734263"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299322"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38764318"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='fnordstalk',
version='0.0.4',
description='A tool to save beanstalk stats to fnordmetric',
author='Stephen Holiday',
author_email='stephen.holiday@gmail.com',
url='https://github.com/sholiday/fnordstalk',
install_requires=(
'pyfnordmetric',
'beanstalkc'
),
scripts=['fnordstalk.py'],
entry_points={
'console_scripts': [
'fnordstalk = fnordstalk:main',
]
},
py_modules=['stalk'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
],
long_description = """
A tool to save beanstalk stats to fnordmetric
"""
)
|
{
"content_hash": "d61ba09cc5e52ec5610702cf7833185e",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 64,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.5765765765765766,
"repo_name": "sholiday/fnordstalk",
"id": "6bad95c551b7c926f9b3baa040449fb0870ba19b",
"size": "1132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7066"
}
],
"symlink_target": ""
}
|
import json
import pika
import socket
import sys
import time
import traceback
from dfa.common import dfa_logger as logging
LOG = logging.getLogger(__name__)
class DCNMListener(object):
"""This AMQP client class listens to DCNM's AMQP notification and
interacts with openstack for further tenant and network information.
It also communicates with CPNR to populate DHCP data.
"""
def __init__(self, name, ip, user, password, pqueue=None,
c_pri=100, d_pri=100):
"""Create a new instance of AMQP client.
:param dict params: AMQP configuration parameters,
e.g. AMQP server ip, port, user name, password, name of AMQP
exchange and queue for DCNM notification.
"""
# extract from input params
self._server_ip = ip
self._user = user
self._pwd = password
self._pq = pqueue
self._create_pri = c_pri
self._delete_pri = d_pri
# exchange, queue name for receiving vCD events.
self._port = 5672
self._dcnm_exchange_name = 'DCNMExchange'
self._dcnm_queue_name = socket.gethostname()
# specify the key of interest.
key = ('success.cisco.dcnm.event.auto-config.organization.'
'partition.network.*')
self._conn = None
self.consume_channel = None
try:
credentials = None
if self._user:
credentials = pika.PlainCredentials(self._user, self._pwd)
# create connection, channel
self._conn = pika.BlockingConnection(
pika.ConnectionParameters(
host=self._server_ip,
port=self._port,
credentials=credentials))
# create channels for consuming
self.consume_channel = self._conn.channel()
# declare vCD exchange
self.consume_channel.exchange_declare(
exchange=self._dcnm_exchange_name,
exchange_type='topic',
durable=True,
auto_delete=False)
result = self.consume_channel.queue_declare(
queue=self._dcnm_queue_name,
durable=True,
auto_delete=False)
self._dcnm_queue_name = result.method.queue
self.consume_channel.queue_bind(exchange=self._dcnm_exchange_name,
queue=self._dcnm_queue_name,
routing_key=key)
# for info only
msg_count = result.method.message_count
LOG.debug('The exchange %(exch)s queue %(que)s has totally '
' %(count)s messages.', {
'exch': self._dcnm_exchange_name,
'que': self._dcnm_queue_name,
'count': msg_count})
LOG.debug('DCNM Listener initialization done....')
except:
LOG.exception('Failed to initialize DCNMListener.')
def _cb_dcnm_msg(self, method, body):
""" Callback function to process DCNM network creation/update/deletion
message received by AMQP.
It also communicates with DCNM to extract info for CPNR record
insertion/deletion.
:param pika.channel.Channel ch: The channel instance.
:param pika.Spec.Basic.Deliver method: The basic deliver method
which includes routing key.
:param pika.Spec.BasicProperties properties: properties
:param str body: The message body.
"""
LOG.debug('Routing_key: %s, body: %s.' % (method.routing_key, body))
partition_keyword = 'auto-config.organization.partition'
network_keyword = partition_keyword + '.network'
network_create_key = network_keyword + '.create'
network_update_key = network_keyword + '.update'
msg = json.loads(body)
LOG.debug('_cb_dcnm_msg: RX message: %s' % msg)
if not msg:
LOG.debug("error, return")
return
url = msg['link']
url_fields = url.split('/')
pre_project_name = url_fields[4]
pre_partition_name = url_fields[6]
pre_seg_id = url_fields[9]
data = {"project_name": pre_project_name,
"partition_name": pre_partition_name,
"segmentation_id": pre_seg_id}
if network_create_key in method.routing_key or (
network_update_key in method.routing_key):
pri = self._create_pri
event_type = 'dcnm.network.create'
else:
pri = self._delete_pri
event_type = 'dcnm.network.delete'
if self._pq is not None:
payload = (event_type, data)
self._pq.put((pri, time.ctime, payload))
def process_amqp_msgs(self):
"""Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
"""
LOG.info('Starting process_amqp_msgs...')
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(
self._dcnm_queue_name)
if mtd_fr:
# Queue has messages.
LOG.info('RX message: %s' % body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag)
else:
# Queue is empty.
try:
self._conn.sleep(1)
except AttributeError:
time.sleep(1)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_str = traceback.format_exception(exc_type,
exc_value, exc_tb)
LOG.exception(("Failed to read from queue: %(queue)s "
"%(exc_type)s, %(exc_value)s, %(exc_tb)s."), {
'queue': self._dcnm_queue_name,
'exc_type': exc_type,
'exc_value': exc_value,
'exc_tb': tb_str})
|
{
"content_hash": "e29e1838cd7ca7bd42059d9811590090",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 78,
"avg_line_length": 39.096385542168676,
"alnum_prop": 0.5274268104776579,
"repo_name": "CiscoSystems/fabric_enabler",
"id": "87aa8fecfc11871c278a2197e3cbb685e6f49f22",
"size": "7128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfa/server/dfa_listen_dcnm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "920145"
},
{
"name": "Shell",
"bytes": "8220"
}
],
"symlink_target": ""
}
|
'''A python package that handles the serialization/deserialization of XML SOAP
requests/responses from Tanium to/from python objects.
'''
from .object_types.all_objects import *
# from .session import Session
# from .question_asker import QuestionAsker
from .object_types.base import BaseType
from .object_types.result_set import ResultSet
from .object_types.result_info import ResultInfo
|
{
"content_hash": "ea63b2197cb054ec035128cf1d623fa8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 78,
"avg_line_length": 39,
"alnum_prop": 0.8076923076923077,
"repo_name": "tanium/pytan",
"id": "436c33a3bfa71852087a6821b956cff07dec9435",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/taniumpy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13251"
},
{
"name": "CSS",
"bytes": "32442"
},
{
"name": "HTML",
"bytes": "1232764"
},
{
"name": "JavaScript",
"bytes": "375167"
},
{
"name": "Makefile",
"bytes": "4287"
},
{
"name": "Python",
"bytes": "2541262"
},
{
"name": "Shell",
"bytes": "3194"
}
],
"symlink_target": ""
}
|
import mock
from rally import consts
from rally import exceptions
from rally import objects
from rally.plugins.openstack.context.keystone import users
from tests.unit import test
CTX = "rally.plugins.openstack.context.keystone.users"
class UserGeneratorTestCase(test.TestCase):
tenants_num = 1
users_per_tenant = 5
users_num = tenants_num * users_per_tenant
threads = 10
@property
def context(self):
return {
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
"resource_management_workers": self.threads,
}
},
"admin": {"endpoint": mock.MagicMock()},
"task": {"uuid": "task_id"}
}
def setUp(self):
super(UserGeneratorTestCase, self).setUp()
self.osclients_patcher = mock.patch("%s.osclients" % CTX)
self.osclients = self.osclients_patcher.start()
def tearDown(self):
self.osclients_patcher.stop()
super(UserGeneratorTestCase, self).tearDown()
@mock.patch("%s.network.wrap" % CTX)
def test__remove_default_security_group_not_needed(self, mock_wrap):
services = {"compute": consts.Service.NOVA}
self.osclients.Clients().services.return_value = services
user_generator = users.UserGenerator(self.context)
user_generator._remove_default_security_group()
self.assertFalse(mock_wrap.called)
@mock.patch("%s.network.wrap" % CTX)
def test__remove_default_security_group_neutron_no_sg(self, mock_wrap):
net_wrapper = mock.Mock(SERVICE_IMPL=consts.Service.NEUTRON)
net_wrapper.supports_security_group.return_value = (False, None)
mock_wrap.return_value = net_wrapper
user_generator = users.UserGenerator(self.context)
admin_clients = mock.Mock()
admin_clients.services.return_value = {
"compute": consts.Service.NOVA,
"neutron": consts.Service.NEUTRON}
user_clients = [mock.Mock(), mock.Mock()]
self.osclients.Clients.side_effect = [admin_clients] + user_clients
user_generator._remove_default_security_group()
mock_wrap.assert_called_once_with(admin_clients)
net_wrapper.supports_security_group.assert_called_once_with()
@mock.patch("rally.common.utils.iterate_per_tenants")
@mock.patch("%s.network" % CTX)
@mock.patch("rally.task.utils.check_service_status",
return_value=False)
def test__remove_default_security_group(
self, mock_check_service_status, mock_network,
mock_iterate_per_tenants):
net_wrapper = mock.Mock(SERVICE_IMPL=consts.Service.NEUTRON)
net_wrapper.supports_security_group.return_value = (True, None)
mock_network.wrap.return_value = net_wrapper
user_generator = users.UserGenerator(self.context)
admin_clients = mock.Mock()
admin_clients.services.return_value = {
"compute": consts.Service.NOVA,
"neutron": consts.Service.NEUTRON}
user_clients = [mock.Mock(), mock.Mock()]
self.osclients.Clients.side_effect = [admin_clients] + user_clients
mock_iterate_per_tenants.return_value = [
(mock.MagicMock(), "t1"),
(mock.MagicMock(), "t2")]
user_generator._remove_default_security_group()
mock_network.wrap.assert_called_once_with(admin_clients)
mock_iterate_per_tenants.assert_called_once_with(
user_generator.context["users"])
expected = [mock.call(user_generator.endpoint)] + [
mock.call(u["endpoint"])
for u, t in mock_iterate_per_tenants.return_value]
self.osclients.Clients.assert_has_calls(expected, any_order=True)
expected_deletes = []
for clients in user_clients:
user_nova = clients.nova.return_value
user_nova.security_groups.find.assert_called_once_with(
name="default")
expected_deletes.append(
mock.call(user_nova.security_groups.find.return_value.id))
nova_admin = admin_clients.neutron.return_value
nova_admin.delete_security_group.assert_has_calls(expected_deletes,
any_order=True)
@mock.patch("rally.task.utils.check_service_status",
return_value=True)
def test__remove_associated_networks(self, mock_check_service_status):
def fake_get_network(req_network):
for network in networks:
if network.project_id == req_network.project_id:
return network
networks = [mock.MagicMock(project_id="t1"),
mock.MagicMock(project_id="t4")]
nova_admin = mock.MagicMock()
clients = mock.MagicMock()
self.osclients.Clients.return_value = clients
clients.services.return_value = {"compute": "nova"}
clients.nova.return_value = nova_admin
nova_admin.networks.list.return_value = networks
nova_admin.networks.get = fake_get_network
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": dict(id="t1", name="t1"),
"t2": dict(id="t2", name="t2")}
user_generator._remove_associated_networks()
mock_check_service_status.assert_called_once_with(mock.ANY,
"nova-network")
nova_admin.networks.disassociate.assert_called_once_with(networks[0])
@mock.patch("rally.task.utils.check_service_status",
return_value=True)
def test__remove_associated_networks_failure(self,
mock_check_service_status):
def fake_get_network(req_network):
for network in networks:
if network.project_id == req_network.project_id:
return network
networks = [mock.MagicMock(project_id="t1"),
mock.MagicMock(project_id="t4")]
nova_admin = mock.MagicMock()
clients = mock.MagicMock()
self.osclients.Clients.return_value = clients
clients.services.return_value = {"compute": "nova"}
clients.nova.return_value = nova_admin
nova_admin.networks.list.return_value = networks
nova_admin.networks.get = fake_get_network
nova_admin.networks.disassociate.side_effect = Exception()
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": dict(id="t1", name="t1"),
"t2": dict(id="t2", name="t2")}
user_generator._remove_associated_networks()
mock_check_service_status.assert_called_once_with(mock.ANY,
"nova-network")
nova_admin.networks.disassociate.assert_called_once_with(networks[0])
@mock.patch("%s.broker.time.sleep" % CTX)
@mock.patch("%s.keystone" % CTX)
def test__create_tenants(self, mock_keystone, mock_sleep):
user_generator = users.UserGenerator(self.context)
user_generator.config["tenants"] = 1
tenants = user_generator._create_tenants()
self.assertEqual(1, len(tenants))
id, tenant = tenants.popitem()
self.assertIn("name", tenant)
@mock.patch("%s.broker.time.sleep" % CTX)
@mock.patch("%s.keystone" % CTX)
def test__create_users(self, mock_keystone, mock_sleep):
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": dict(id="t1", name="t1"),
"t2": dict(id="t2", name="t2")}
user_generator.config["users_per_tenant"] = 2
users_ = user_generator._create_users()
self.assertEqual(4, len(users_))
for user in users_:
self.assertIn("id", user)
self.assertIn("endpoint", user)
@mock.patch("%s.keystone" % CTX)
def test__delete_tenants(self, mock_keystone):
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": dict(id="t1", name="t1"),
"t2": dict(id="t2", name="t2")}
user_generator._delete_tenants()
self.assertEqual(len(user_generator.context["tenants"]), 0)
@mock.patch("%s.keystone" % CTX)
def test__delete_tenants_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.delete_project.side_effect = Exception()
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": dict(id="t1", name="t1"),
"t2": dict(id="t2", name="t2")}
user_generator._delete_tenants()
self.assertEqual(len(user_generator.context["tenants"]), 0)
@mock.patch("%s.keystone" % CTX)
def test__delete_users(self, mock_keystone):
user_generator = users.UserGenerator(self.context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(len(user_generator.context["users"]), 0)
@mock.patch("%s.keystone" % CTX)
def test__delete_users_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.delete_user.side_effect = Exception()
user_generator = users.UserGenerator(self.context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(len(user_generator.context["users"]), 0)
@mock.patch("%s.keystone" % CTX)
def test_setup_and_cleanup(self, mock_keystone):
wrapped_keystone = mock.MagicMock()
mock_keystone.wrap.return_value = wrapped_keystone
with users.UserGenerator(self.context) as ctx:
ctx.setup()
self.assertEqual(len(ctx.context["users"]),
self.users_num)
self.assertEqual(len(ctx.context["tenants"]),
self.tenants_num)
# Cleanup (called by content manager)
self.assertEqual(len(ctx.context["users"]), 0)
self.assertEqual(len(ctx.context["tenants"]), 0)
@mock.patch("%s.keystone" % CTX)
def test_setup_and_cleanup_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.create_user.side_effect = Exception()
with users.UserGenerator(self.context) as ctx:
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
# Ensure that tenants get deleted anyway
self.assertEqual(len(ctx.context["tenants"]), 0)
@mock.patch("%s.keystone" % CTX)
def test_users_and_tenants_in_context(self, mock_keystone):
wrapped_keystone = mock.MagicMock()
mock_keystone.wrap.return_value = wrapped_keystone
task = {"uuid": "abcdef"}
config = {
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1
}
},
"admin": {"endpoint": mock.MagicMock()},
"task": task
}
user_list = [mock.MagicMock(id="id_%d" % i)
for i in range(self.users_num)]
wrapped_keystone.create_user.side_effect = user_list
with users.UserGenerator(config) as ctx:
ctx.setup()
create_tenant_calls = []
for i, t in enumerate(ctx.context["tenants"]):
pattern = users.UserGenerator.PATTERN_TENANT
create_tenant_calls.append(
mock.call(pattern % {"task_id": task["uuid"], "iter": i},
ctx.config["project_domain"]))
for user in ctx.context["users"]:
self.assertEqual(set(["id", "endpoint", "tenant_id"]),
set(user.keys()))
tenants_ids = []
for t in ctx.context["tenants"].keys():
tenants_ids.append(t)
for (user, tenant_id, orig_user) in zip(ctx.context["users"],
tenants_ids, user_list):
self.assertEqual(user["id"], orig_user.id)
self.assertEqual(user["tenant_id"], tenant_id)
@mock.patch("%s.keystone" % CTX)
def test_users_contains_correct_endpoint_type(self, mock_keystone):
endpoint = objects.Endpoint("foo_url", "foo", "foo_pass",
endpoint_type=consts.EndpointType.INTERNAL)
config = {
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1
}
},
"admin": {"endpoint": endpoint},
"task": {"uuid": "task_id"}
}
user_generator = users.UserGenerator(config)
users_ = user_generator._create_users()
for user in users_:
self.assertEqual("internal", user["endpoint"].endpoint_type)
@mock.patch("%s.keystone" % CTX)
def test_users_contains_default_endpoint_type(self, mock_keystone):
endpoint = objects.Endpoint("foo_url", "foo", "foo_pass")
config = {
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1
}
},
"admin": {"endpoint": endpoint},
"task": {"uuid": "task_id"}
}
user_generator = users.UserGenerator(config)
users_ = user_generator._create_users()
for user in users_:
self.assertEqual("public", user["endpoint"].endpoint_type)
|
{
"content_hash": "928a4539d7ef6c54f6001b4a670569a9",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 79,
"avg_line_length": 41.48396501457726,
"alnum_prop": 0.576779815869,
"repo_name": "shdowofdeath/rally",
"id": "d3fb3c640803ad33430e15c75670c1da5138e330",
"size": "14859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/context/keystone/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2421750"
},
{
"name": "Shell",
"bytes": "36795"
}
],
"symlink_target": ""
}
|
"""
An object for managing IPython profile directories.
Authors:
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import shutil
import errno
import time
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.path import get_ipython_package_dir, expand_path
from IPython.utils.traitlets import Unicode, Bool
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Module errors
#-----------------------------------------------------------------------------
class ProfileDirError(Exception):
pass
#-----------------------------------------------------------------------------
# Class for managing profile directories
#-----------------------------------------------------------------------------
class ProfileDir(LoggingConfigurable):
"""An object to manage the profile directory and its resources.
The profile directory is used by all IPython applications, to manage
configuration, logging and security.
This object knows how to find, create and manage these directories. This
should be used by any code that wants to handle profiles.
"""
security_dir_name = Unicode('security')
log_dir_name = Unicode('log')
startup_dir_name = Unicode('startup')
pid_dir_name = Unicode('pid')
security_dir = Unicode(u'')
log_dir = Unicode(u'')
startup_dir = Unicode(u'')
pid_dir = Unicode(u'')
location = Unicode(u'', config=True,
help="""Set the profile location directly. This overrides the logic used by the
`profile` option.""",
)
_location_isset = Bool(False) # flag for detecting multiply set location
def _location_changed(self, name, old, new):
if self._location_isset:
raise RuntimeError("Cannot set profile location more than once.")
self._location_isset = True
num_tries = 0
max_tries = 5
while not os.path.isdir(new):
try:
os.makedirs(new)
except OSError:
if num_tries > max_tries:
raise
num_tries += 1
time.sleep(0.5)
# ensure config files exist:
self.security_dir = os.path.join(new, self.security_dir_name)
self.log_dir = os.path.join(new, self.log_dir_name)
self.startup_dir = os.path.join(new, self.startup_dir_name)
self.pid_dir = os.path.join(new, self.pid_dir_name)
self.check_dirs()
def _log_dir_changed(self, name, old, new):
self.check_log_dir()
def _mkdir(self, path, mode=None):
"""ensure a directory exists at a given path
This is a version of os.mkdir, with the following differences:
- returns True if it created the directory, False otherwise
- ignores EEXIST, protecting against race conditions where
the dir may have been created in between the check and
the creation
- sets permissions if requested and the dir already exists
"""
if os.path.exists(path):
if mode and os.stat(path).st_mode != mode:
try:
os.chmod(path, mode)
except OSError:
self.log.warn(
"Could not set permissions on %s",
path
)
return False
try:
if mode:
os.mkdir(path, mode)
else:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
return False
else:
raise
return True
def check_log_dir(self):
self._mkdir(self.log_dir)
def _startup_dir_changed(self, name, old, new):
self.check_startup_dir()
def check_startup_dir(self):
self._mkdir(self.startup_dir)
readme = os.path.join(self.startup_dir, 'README')
src = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'README_STARTUP')
if not os.path.exists(src):
self.log.warn("Could not copy README_STARTUP to startup dir. Source file %s does not exist.", src)
if os.path.exists(src) and not os.path.exists(readme):
shutil.copy(src, readme)
def _security_dir_changed(self, name, old, new):
self.check_security_dir()
def check_security_dir(self):
self._mkdir(self.security_dir, 0o40700)
def _pid_dir_changed(self, name, old, new):
self.check_pid_dir()
def check_pid_dir(self):
self._mkdir(self.pid_dir, 0o40700)
def check_dirs(self):
self.check_security_dir()
self.check_log_dir()
self.check_pid_dir()
self.check_startup_dir()
def copy_config_file(self, config_file, path=None, overwrite=False):
"""Copy a default config file into the active profile directory.
Default configuration files are kept in :mod:`IPython.config.default`.
This function moves these from that location to the working profile
directory.
"""
dst = os.path.join(self.location, config_file)
if os.path.isfile(dst) and not overwrite:
return False
if path is None:
path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
src = os.path.join(path, config_file)
shutil.copy(src, dst)
return True
@classmethod
def create_profile_dir(cls, profile_dir, config=None):
"""Create a new profile directory given a full path.
Parameters
----------
profile_dir : str
The full path to the profile directory. If it does exist, it will
be used. If not, it will be created.
"""
return cls(location=profile_dir, config=config)
@classmethod
def create_profile_dir_by_name(cls, path, name=u'default', config=None):
"""Create a profile dir by profile name and path.
Parameters
----------
path : unicode
The path (directory) to put the profile directory in.
name : unicode
The name of the profile. The name of the profile directory will
be "profile_<profile>".
"""
if not os.path.isdir(path):
raise ProfileDirError('Directory not found: %s' % path)
profile_dir = os.path.join(path, u'profile_' + name)
return cls(location=profile_dir, config=config)
@classmethod
def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None):
"""Find an existing profile dir by profile name, return its ProfileDir.
This searches through a sequence of paths for a profile dir. If it
is not found, a :class:`ProfileDirError` exception will be raised.
The search path algorithm is:
1. ``os.getcwdu()``
2. ``ipython_dir``
Parameters
----------
ipython_dir : unicode or str
The IPython directory to use.
name : unicode or str
The name of the profile. The name of the profile directory
will be "profile_<profile>".
"""
dirname = u'profile_' + name
paths = [os.getcwdu(), ipython_dir]
for p in paths:
profile_dir = os.path.join(p, dirname)
if os.path.isdir(profile_dir):
return cls(location=profile_dir, config=config)
else:
raise ProfileDirError('Profile directory not found in paths: %s' % dirname)
@classmethod
def find_profile_dir(cls, profile_dir, config=None):
"""Find/create a profile dir and return its ProfileDir.
This will create the profile directory if it doesn't exist.
Parameters
----------
profile_dir : unicode or str
The path of the profile directory. This is expanded using
:func:`IPython.utils.genutils.expand_path`.
"""
profile_dir = expand_path(profile_dir)
if not os.path.isdir(profile_dir):
raise ProfileDirError('Profile directory not found: %s' % profile_dir)
return cls(location=profile_dir, config=config)
|
{
"content_hash": "0c44a835623991579964161c99c2909b",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 110,
"avg_line_length": 34.46124031007752,
"alnum_prop": 0.5490945900348667,
"repo_name": "noslenfa/tdjangorest",
"id": "03726c41c855dacea14df195268dcf0ed43b1bc8",
"size": "8909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uw/lib/python2.7/site-packages/IPython/core/profiledir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189930"
},
{
"name": "Groff",
"bytes": "7138"
},
{
"name": "HTML",
"bytes": "279754"
},
{
"name": "JavaScript",
"bytes": "1017625"
},
{
"name": "Makefile",
"bytes": "7062"
},
{
"name": "Python",
"bytes": "11886731"
},
{
"name": "Shell",
"bytes": "3741"
},
{
"name": "Smarty",
"bytes": "20972"
}
],
"symlink_target": ""
}
|
"""
Plots the 2D velocity magnitude from a IBAMR simulation at saved time-steps
using the visualization software VisIt.
"""
from snake.ibamr.simulation import IBAMRSimulation
simulation = IBAMRSimulation()
body_name = 'flyingSnake2dAoA35ds004filledInside' # file name (no extension)
simulation.plot_field_contours_visit('velocity-magnitude', (0.0, 2.0),
body=body_name,
solution_folder='numericalSolution',
view=(-0.75, -1.0, 1.50, 1.0),
width=800)
|
{
"content_hash": "93ae3428abdfdae8b423868960bcef71",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 37.625,
"alnum_prop": 0.5764119601328903,
"repo_name": "mesnardo/snake",
"id": "63f0b3128d6710f75a7aaddf5f7374bcd3db23b2",
"size": "602",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/ibamr/plotVelocityMagnitude.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3886"
},
{
"name": "PLSQL",
"bytes": "38"
},
{
"name": "Python",
"bytes": "285760"
}
],
"symlink_target": ""
}
|
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from station.models import *
from clinic.models import *
from clinicstation.models import *
from queue.models import *
from datetime import *
import sys
from django.core import serializers
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
import json
class QueueView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def __init__(self):
super(QueueView, self).__init__()
def get(self, request, format=None):
badRequest = False
notFound = False
aClinic = None
aStation = None
queues = None
queueStatus = None
internalError = False
ret = {}
clinicid = request.GET.get('clinic', '')
stationid = request.GET.get('station', '')
clinicstation = request.GET.get('clinicstation', '')
if not clinicid == '':
try:
clinicid = int(clinicid)
try:
aClinic = Clinic.objects.get(id=clinicid)
except:
aClinic = None
notFound = True
except:
badRequest = True
else:
badRequest = True # required arg
if not stationid == '':
try:
stationid = int(stationid)
try:
aStation = Station.objects.get(id=stationid)
except:
aStation = None
notFound = True
except:
pass
if not notFound and not badRequest:
kwargs = {}
kwargs["clinic"] = aClinic
if aStation:
kwargs["station"] = aStation
try:
queues = Queue.objects.filter(**kwargs)
if not queues or len(queues) == 0:
notFound = True
except:
notFound = True
if not notFound and not badRequest:
try:
queueStatus = QueueStatus.objects.filter(clinic=aClinic)
if not queueStatus or len(queueStatus) == 0:
notFound = True
elif queueStatus and len(queueStatus) > 1:
internalError = True
else:
queueStatus = queueStatus[0]
except:
internalError = True
if not notFound and not badRequest and not internalError:
ret["status"] = {"numwaiting": queueStatus.numwaiting,
"minq": queueStatus.minq,
"maxq": queueStatus.maxq,
"avgq": queueStatus.avgq,
"minwait": queueStatus.minwait,
"maxwait": queueStatus.maxwait,
"avgwait": queueStatus.avgwait}
ret["queues"] = []
for x in queues:
if not clinicstation == '' and int(clinicstation) != x.clinicstation_id:
# clinicstation does not match
continue
queueData = {}
aClinicStation = None
try:
aClinicStation = ClinicStation.objects.get(id=x.clinicstation_id)
except:
aClinicStation = None
if not aClinicStation:
internalError = True
break
queueData["name"] = aClinicStation.name
queueData["name_es"] = aClinicStation.name_es
queueData["clinicstation"] = aClinicStation.id
queueData["avgservicetime"] = x.avgservicetime
queueData["entries"] = []
try:
queueEntries = QueueEntry.objects.filter(queue=x.id).order_by("timein")
if not queueEntries:
queueEntries = []
except:
internalError = True
if internalError:
break
for y in queueEntries:
entryData = {}
entryData["id"] = y.id
entryData["patient"] = y.patient_id
entryData["timein"] = str(y.timein)
entryData["waittime"] = str(y.waittime)
entryData["estwaittime"] = str(y.estwaittime)
entryData["routingslip"] = y.routingslip_id
entryData["routingslipentry"] = y.routingslipentry_id
queueData["entries"].append(entryData)
ret["queues"].append(queueData)
if badRequest:
return HttpResponseBadRequest()
if notFound:
return HttpResponseNotFound()
if internalError:
return HttpResponseServerError()
return Response(ret)
class QueueEntryView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def __init__(self):
super(QueueEntryView, self).__init__()
def delete(self, request, queueentry_id=None, format=None):
queueetry = None
# see if the patient exists
try:
queueentry = QueueEntry.objects.get(id=queueentry_id)
except:
queueentry = None
if not queueentry:
raise NotFound
else:
queueentry.delete()
return Response({})
|
{
"content_hash": "9fbfdd6f369e4c6fd7bd1973eb11cd33",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 130,
"avg_line_length": 33.884393063583815,
"alnum_prop": 0.5214943705220061,
"repo_name": "slogan621/tscharts",
"id": "33cf0dc30926246d2d96a12eb223388e238e9992",
"size": "6483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "queue/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "763"
},
{
"name": "Python",
"bytes": "1690774"
},
{
"name": "Shell",
"bytes": "2706"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals
from django.core.management.base import CommandError
from django.db.models import Q
from django_evolution.compat.commands import BaseCommand
from django_evolution.compat.six.moves import input
from django_evolution.compat.translation import gettext as _
from django_evolution.models import Evolution
class Command(BaseCommand):
"""Wipes an evolutions from the history.
This is a very dangerous operation, and should only be done after a
full database backup.
"""
def add_arguments(self, parser):
"""Add arguments to the command.
Args:
parser (object):
The argument parser to add to.
"""
parser.add_argument(
'args',
metavar='EVOLUTION_LABEL',
nargs='+',
help=_('One or more evolution labels to wipe.'))
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument(
'--app-label',
action='store',
dest='app_label',
help='The app label the evolution label applies to.')
def handle(self, *evolution_labels, **options):
if not evolution_labels:
raise CommandError(
'One or more evolution labels must be provided.')
# Sanity-check each app to make sure it exists only once, and is
# in the given app (if specified).
to_wipe_ids = []
app_label = options['app_label']
for evolution_label in evolution_labels:
q = Q(label=evolution_label)
if app_label:
q = q & Q(app_label=app_label)
evolutions = list(Evolution.objects.filter(q).values('pk'))
if len(evolutions) == 0:
if app_label:
raise CommandError(
"Unable to find evolution '%s' for app label '%s'" %
(evolution_label, app_label))
else:
raise CommandError(
"Unable to find evolution '%s'" % evolution_label)
if len(evolutions) > 1:
if app_label:
raise CommandError(
"Too many evolutions named '%s' for app label '%s'" %
(evolution_label, app_label))
else:
raise CommandError(
"Too many evolutions named '%s'" % evolution_label)
to_wipe_ids.append(evolutions[0]['pk'])
if to_wipe_ids:
if options['interactive']:
confirm = input("""
You have requested to delete %s evolution(s). This may cause permanent
problems, and should only be done after a FULL BACKUP and under direct
guidance.
Are you sure you want to wipe these evolutions from the database?
Type 'yes' to continue, or 'no' to cancel: """ % len(to_wipe_ids))
else:
confirm = 'yes'
if confirm == 'yes':
Evolution.objects.filter(pk__in=to_wipe_ids).delete()
print('%s evolution(s) have been deleted.' % len(to_wipe_ids))
|
{
"content_hash": "abf5d0108a5252f96988b2ee8e22b528",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 34.6875,
"alnum_prop": 0.5570570570570571,
"repo_name": "beanbaginc/django-evolution",
"id": "82fb95f257c935e0dba7da748aabe6c229c8b4a5",
"size": "3330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_evolution/management/commands/wipe-evolution.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1892034"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
|
{
"content_hash": "a0ee12cdc1f3aa194074be570b99435a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 32,
"avg_line_length": 32,
"alnum_prop": 0.875,
"repo_name": "designcc/django-ccstartapp",
"id": "0fabb9f5311f015961b207d7d46bcb26acb37960",
"size": "32",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "app_name/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1161"
}
],
"symlink_target": ""
}
|
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import optparse
import os
import shutil
from StringIO import StringIO
import sys
import urlparse
import build_server
# Copy all the files necessary to run the server. These are cleaned up when the
# server quits.
build_server.main()
from fake_fetchers import ConfigureFakeFetchers
class _Response(object):
def __init__(self):
self.status = 200
self.out = StringIO()
self.headers = {}
def set_status(self, status):
self.status = status
class _Request(object):
def __init__(self, path):
self.headers = {}
self.path = path
self.url = 'http://localhost' + path
def _Render(path, local_path):
response = _Response()
Handler(_Request(urlparse.urlparse(path).path),
response,
local_path=local_path).get()
content = response.out.getvalue()
if isinstance(content, unicode):
content = content.encode('utf-8', 'replace')
return (content, response.status)
def _GetLocalPath():
if os.sep in sys.argv[0]:
return os.path.join(sys.argv[0].rsplit(os.sep, 1)[0], os.pardir, os.pardir)
return os.path.join(os.pardir, os.pardir)
class RequestHandler(BaseHTTPRequestHandler):
"""A HTTPRequestHandler that outputs the docs page generated by Handler.
"""
def do_GET(self):
(content, status) = _Render(self.path, RequestHandler.local_path)
self.send_response(status)
self.end_headers()
self.wfile.write(content)
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Runs a server to preview the extension documentation.',
usage='usage: %prog [option]...')
parser.add_option('-p', '--port', default='8000',
help='port to run the server on')
parser.add_option('-d', '--directory', default=_GetLocalPath(),
help='extensions directory to serve from - '
'should be chrome/common/extensions within a Chromium checkout')
parser.add_option('-r', '--render', default='',
help='statically render a page and print to stdout rather than starting '
'the server, e.g. apps/storage.html. The path may optionally end '
'with #n where n is the number of times to render the page before '
'printing it, e.g. apps/storage.html#50, to use for profiling.')
(opts, argv) = parser.parse_args()
if (not os.path.isdir(opts.directory) or
not os.path.isdir(os.path.join(opts.directory, 'docs')) or
not os.path.isdir(os.path.join(opts.directory, 'api'))):
print('Specified directory does not exist or does not contain extension '
'docs.')
exit()
ConfigureFakeFetchers(os.path.join(opts.directory, 'docs'))
from handler import Handler
if opts.render:
if opts.render.find('#') >= 0:
(path, iterations) = opts.render.rsplit('#', 1)
extra_iterations = int(iterations) - 1
else:
path = opts.render
extra_iterations = 0
(content, status) = _Render(path, opts.directory)
if status != 200:
print('Error status: %s' % status)
exit(1)
for _ in range(extra_iterations):
_Render(path, opts.directory)
# Static paths will show up as /stable/static/foo but this only makes sense
# from a webserver.
print(content.replace('/stable/static', 'static'))
exit()
print('Starting previewserver on port %s' % opts.port)
print('Reading from %s' % opts.directory)
print('')
print('The extension documentation can be found at:')
print('')
print(' http://localhost:%s/extensions/' % opts.port)
print('')
print('The apps documentation can be found at:')
print('')
print(' http://localhost:%s/apps/' % opts.port)
print('')
RequestHandler.local_path = opts.directory
server = HTTPServer(('', int(opts.port)), RequestHandler)
try:
server.serve_forever()
finally:
server.socket.close()
|
{
"content_hash": "0dec79a783ec80e564158f4902f948b8",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 31.991666666666667,
"alnum_prop": 0.6663193539984371,
"repo_name": "leiferikb/bitpop-private",
"id": "ff6585cd25d2da5731cbc9c9fa111f5564dc53cd",
"size": "4967",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "chrome/common/extensions/docs/server2/preview.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1871"
},
{
"name": "C",
"bytes": "1800028"
},
{
"name": "C++",
"bytes": "76499582"
},
{
"name": "CSS",
"bytes": "803682"
},
{
"name": "Java",
"bytes": "1234788"
},
{
"name": "JavaScript",
"bytes": "21793252"
},
{
"name": "Objective-C",
"bytes": "5358744"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "64410"
},
{
"name": "Python",
"bytes": "3017857"
},
{
"name": "Ruby",
"bytes": "650"
},
{
"name": "Shell",
"bytes": "322362"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "12138"
}
],
"symlink_target": ""
}
|
"""TensorFlow Debugger (tfdbg) Stepper Module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import shutil
import tempfile
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.debug import debug_data
from tensorflow.python.debug import debug_utils
from tensorflow.python.framework import ops
from tensorflow.python.ops import session_ops
# TODO(cais): Use nest.flatten once it handles nest Dicts correctly.
def _flatten_fetches(fetches):
"""Flatten list, tuple of fetches, or a single fetch into a list of fetches.
Args:
fetches: The fetches to flatten: Can be a single Tensor, Op, or a
potentially nested list, tuple or dict of such individual fetches.
Returns:
The fetches flattened to a list.
"""
flattened = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
flattened.extend(_flatten_fetches(fetch))
elif isinstance(fetches, dict):
for key in fetches:
flattened.extend(_flatten_fetches(fetches[key]))
else:
flattened.append(fetches)
return flattened
class NodeStepper(object):
"""TensorFlow Debugger (tfdbg) stepper.
The stepper provides ability to perform "continue to" actions on a graph,
given fetch and feeds. The stepper calculates the transitive closure of the
fetch. cont() (continue to) calls can only be performed on members of the
transitive closure.
On a cont() call, the stepper performs depth-first tracing of the input
tree of the target. When it reaches an input where one of the following is
available, it will supply the available value to the feed_dict of the cont()
call:
(1) Overriding (injected) values from the client.
(2) TensorHandles from previous cont() calls.
(3) Dumped intermediate Tensors from previous cont() calls.
(4) Feeds supplied during the construction of the stepper instance.
During the cont() call, intermediate Tensors are dumped to temporary
directories. The dumped Tensor values will be used in subsequent cont() calls
when they are required as data dependencies.
The temporary directories are automatically clean when the NodeStepper
instance exits as a context mananger.
Once the tracing is complete, it will issue a run() call on the
underlying session, using the aforementioned feed_dict prepared by the input
tracing, to achieve the "continue-to" action. The above process takes into
account whether the transitive closure of an input contains Variables that
are updated during previous cont() calls on this stepper instance. If such
updates exist, we say the transitive closure is "dirty" and the stepper
can restore the "clean" state of the Variable and avoid using the
TensorHandle.
Example of basic usage:
a = tf.Variable(1.0, name="a")
b = tf.Variable(2.0, anme="b")
c = tf.add(a, b, name="c")
d = tf.multiply(a, c, name="d")
sess = tf.Session()
sess.run(tf.initialize_all_varialbes())
stepper = NodeStepper(sess, d)
stepper.cont(c) # Caches the handle to Tensor c:0.
stepper.cont(d) # Uses handle to Tensor c:0, avoiding recomputing c.
"""
# Possible types of feed used during cont() calls.
FEED_TYPE_CLIENT = "client"
FEED_TYPE_HANDLE = "handle"
FEED_TYPE_OVERRIDE = "override"
FEED_TYPE_DUMPED_INTERMEDIATE = "dumped_intermediate"
def __init__(self, sess, fetches, feed_dict=None):
"""Constructor for Debugger.
Args:
sess: (Session) the TensorFlow Session to step in.
fetches: Same as the fetches input argument to `Session.run()`.
feed_dict: Same as the feed_dict input argument to `Session.run()`.
"""
self._sess = sess
self._fetches = fetches
flattened_fetches = _flatten_fetches(fetches)
self._fetch_names, self._fetch_list = self._get_fetch_and_name_lists(
flattened_fetches)
# A map from Variable name to initializer op.
self._variable_initializers = {}
# A map from Variable name to initial value, used when overriding or
# restoring Variable values.
self._variable_initial_values = {}
# Initialize the map for output recipients (targets).
self._output_targets = {}
# Sorted transitive closure of the fetched node.
# We also collect the list of the names of the reference-type Tensors,
# because we later need to avoid using intermediate dumps for such Tensors.
(self._sorted_nodes,
self._closure_elements,
self._ref_tensor_names) = self._dfs_visit(self._sess.graph,
self._fetch_list)
self._transitive_closure_set = set(self._sorted_nodes)
# A map from Variable name to the old values (before any cont() calls).
self._cached_variable_values = {}
# A cache map from tensor name to what variables may invalidate the tensor
self._cached_invalidation_path = {}
# Keep track of which variables are in a dirty state.
self._dirty_variables = set()
# Variables updated in the last cont() call.
self._last_updated = None
# Cached tensor handles: a dict with keys as tensor names and values as
# tensor handles.
self._tensor_handles = {}
# Cached intermediate tensor values: a dict mapping tensor names to
# DebugTensorDatum.
self._dumped_intermediate_tensors = {}
self._dump_session_root = tempfile.mkdtemp(prefix="tfdbg_stepper_")
# Feed dict from the client.
self._client_feed_dict = {}
if feed_dict:
for key in feed_dict:
if isinstance(key, ops.Tensor):
self._client_feed_dict[key.name] = feed_dict[key]
else:
self._client_feed_dict[key] = feed_dict[key]
# Overriding tensor values.
self._override_tensors = {}
# What the feed types were used by the last cont() call.
self._last_feed_types = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if os.path.isdir(self._dump_session_root):
shutil.rmtree(self._dump_session_root)
def _get_fetch_and_name_lists(self, flattened_fetches):
"""Get the lists of fetches and their names.
Args:
flattened_fetches: A list of fetches or their names. Can mix fetches and
names.
Returns:
(list of str): A list of the names of the fetches.
(list): A list of the fetches.
"""
fetch_names = []
fetch_list = []
for fetch in flattened_fetches:
if isinstance(fetch, six.string_types):
fetch_names.append(fetch)
fetch_list.append(self._sess.graph.as_graph_element(fetch))
else:
fetch_names.append(fetch.name)
fetch_list.append(fetch)
return fetch_names, fetch_list
def _dfs_visit(self, graph, elem_list):
"""Trace back the input of a graph element, using depth-first search.
Uses non-recursive implementation to prevent stack overflow for deep
graphs.
Also performs the following action(s):
1) When encountering a Variable, obtain its initializer op, to
facilitate possible subsequent restoration / overriding of variable
value.
Args:
graph: A TF graph instance.
elem_list: list of graph elements: a Tensor or an Operation.
Returns:
(list of str) A topologically-sorted list of all nodes (not tensors)
in the transitive closure of elem_list. Obviously, the topological sort
is not unique in general. The return value here is just an arbitrary
one of potentially many possible topological sorts.
(list of str) A list of all graph elements (nodes and/or tensors) in the
transitive closure.
"""
# These set should hold only strings, i.e, names of the nodes.
done = set() # Keep track of visited graph elements.
# A list of str: Names of the topologically-sorted graph elements.
node_inputs = dict() # New: Input map of nodes in the transitive closure.
elem_stack = copy.copy(elem_list)
# Graph elements in the transitive closure, including the nodes and tensors.
closure_elements = [elem.name for elem in elem_list]
ref_tensor_names = set()
for element in elem_list:
if isinstance(element, ops.Tensor) and element.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(element.name)
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
if curr_node.name not in node_inputs:
all_input_nodes = set()
for inp in all_inputs:
all_input_nodes.add(self._get_node(inp).name)
node_inputs[curr_node.name] = all_input_nodes
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Set up the non-control output map.
# if is_non_control_input:
if inp.name not in self._output_targets:
self._output_targets[inp.name] = set([curr_elem.name])
else:
self._output_targets[inp.name].add(curr_elem.name)
if (isinstance(inp, ops.Tensor) and
inp.op.type in ["Variable", "VariableV2"] and
inp.name not in self._variable_initializers):
# Obtain the initializer op of the variable, in case the Variable's
# value needs to be restored later.
initializer = graph.as_graph_element(inp.op.name + "/Assign")
self._variable_initializers[inp.name] = initializer
self._variable_initial_values[inp.name] = initializer.inputs[1]
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
closure_elements.append(inp.name)
if isinstance(inp, ops.Tensor) and inp.dtype._is_ref_dtype: # pylint: disable=protected-access
ref_tensor_names.add(inp.name)
# Now that we have traversed the transitive closure and obtained the
# node-input map, we can topologically sort them.
sorted_nodes = []
stack = []
for node in node_inputs:
if not node_inputs[node]:
stack.append(node)
for node in stack:
del node_inputs[node]
while stack:
curr_node = stack.pop()
sorted_nodes.append(curr_node)
# Iterate through the node-input map and remove the child.
pushes = []
for node in node_inputs:
if curr_node in node_inputs[node]:
node_inputs[node].remove(curr_node)
if not node_inputs[node]:
pushes.append(node)
# Delete new pushes from node-input map.
for node in pushes:
del node_inputs[node]
stack.extend(pushes)
return sorted_nodes, closure_elements, ref_tensor_names
def sorted_nodes(self):
"""Get a topologically-sorted list of node names of the stepper.
These are the names of the nodes (i.e., not Tensors) in the transitive
closure of the stepper, in a topologically-sorted order.
Returns:
(list of str): Sorted transitive inputs to the fetch of the stepper
instance. The fetch itself is included in the list.
"""
return self._sorted_nodes
def closure_elements(self):
"""Get a name list of the graph elements of the stepper.
Returns:
(list of str): names of the graph elements (i.e., nodes and tensors) in
the transitive closure of the stepper, in a random order.
"""
return self._closure_elements
def output_slots_in_closure(self, node_name):
"""Get the output tensors in the transitive closure from node.
Args:
node_name: (str) Name of the node in question.
Returns:
(list of int) Output slots of the output tensors of the node that are in
the transitive closure of the stepper.
"""
node = self._sess.graph.as_graph_element(node_name)
tensor_slots = []
for i, _ in enumerate(node.outputs):
tensor_name = node_name + ":%d" % i
if tensor_name in self._closure_elements:
tensor_slots.append(i)
return tensor_slots
def is_feedable(self, name):
"""Determine if a graph element if feedable.
Args:
name: (str) name of the graph element (Tensor or Operation)
Returns:
(bool) whether the graph element is feedable.
"""
if not isinstance(name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(name))
elem = self._sess.graph.as_graph_element(name)
return self._sess.graph.is_feedable(elem)
def override_tensor(self, tensor_name, overriding_val):
"""Override the value of a tensor.
Args:
tensor_name: (str) Name of the tensor to override.
overriding_val: (numpy.ndarray) Overriding tensor value.
Raises:
ValueError: If tensor_name does not correspond to a tensor in the input
tree to the fetched graph element of this stepper instance.
"""
if not isinstance(tensor_name, six.string_types):
raise TypeError("Expected type str; got type %s" % type(tensor_name))
node_name = self._get_node_name(tensor_name)
if node_name not in self._transitive_closure_set:
raise ValueError(
"Cannot override tensor \"%s\" because it does not exist in the "
"input tree to the fetch \"%s\"" %
(tensor_name, repr(self._fetch_names)))
self._override_tensors[tensor_name] = overriding_val
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def remove_override(self, tensor_name):
"""Remove the overriding value on a tensor.
Args:
tensor_name: (str) name of the tensor to remove the overriding value
from.
Raises:
ValueError: If no overriding value exists for tensor_name.
"""
if tensor_name not in self._override_tensors:
raise ValueError("No overriding value exists for tensor \"%s\"." %
tensor_name)
del self._override_tensors[tensor_name]
# Invalidate cache by tracing outputs.
self._invalidate_transitively_outgoing_cache(tensor_name)
def last_feed_types(self):
"""Obtain information about the feed in the last cont() call.
Returns:
(dict) A dict mapping tensor names to feed types.
"""
return self._last_feed_types
def cont(self,
target,
use_tensor_handles=True,
use_dumped_intermediates=True,
use_overrides=True,
invalidate_from_updated_variables=False,
restore_variable_values=False):
"""Continue till the completion of the specified target tensor.
Args:
target: A single fetched Tensor or Op, or a name (str) representing the
Tensor or Op. In the case of a name str, the graph will be searched
to find the corresponding Tensor or Op.
# TODO(cais): Support multiple fetches as in Session.run() interface.
use_tensor_handles: (bool) Whether this cont() run will use cached tensor
handles to avoid recomputation. Default: True.
use_dumped_intermediates: (bool) Whether this cont() call will use dumped
intermediate tensors to avoid recomputation.
use_overrides: (bool) Whether the overriding tensor values supplied by
the client are to be used in this cont() call. Default: True.
invalidate_from_updated_variables: (bool) Whether to invalidate the
tensor handles and intermediate tensor handles affected by the
Variable updates that happen in this cont() call.
restore_variable_values: (bool) Whether the old values of the variables
(before any cont() calls in this object) are to be restored.
Returns:
Value from Session.run() of the target.
Raises:
ValueError: If the target is specified as a string and the string does
not correspond to any tensors in the Session graph.
Or if the target of this cont() is not in the input list of the Stepper
object's target.
Or if target is a Placeholder.
"""
self._last_feed_types = {}
if isinstance(target, six.string_types):
# Fetch target is a string. Assume it is the name of the Tensor or Op and
# will attempt to find it in the Session's graph.
target_name = target
else:
target_name = target.name
graph_element = self._sess.graph.as_graph_element(target_name)
# Any additional tensor handles to obtain in this cont() action.
additional_handle_requests = []
if (isinstance(graph_element, ops.Tensor) and
graph_element.op.type == "Placeholder"):
self._last_feed_types[graph_element.name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[graph_element.name]
elif (isinstance(graph_element, ops.Operation) and
graph_element.type == "Placeholder"):
tensor_name = graph_element.name + ":0"
self._last_feed_types[tensor_name] = self.FEED_TYPE_CLIENT
return self._client_feed_dict[tensor_name]
if isinstance(graph_element, ops.Operation) and graph_element.outputs:
# Check if this op has any output tensors that also fall into this
# stepper's transitive closure.
node_outputs = [
output.name for output in graph_element.outputs
if output.name in self._closure_elements
]
if node_outputs:
# The target is an op with at least one output within the transitive
# closure. The cont() action will amount to using the 0-th
# output Tensor as the target, as well as obtaining handles to it
# and to the rest of the outputs tensors in the transitive closure
# (if any).
target_name = node_outputs[0]
additional_handle_requests = node_outputs[1:]
# Verify that the target is in the transitive closure of the stepper's
# fetch.
target_node_name = self._get_node_name(target_name)
if target_node_name not in self._transitive_closure_set:
raise ValueError(
"Target \"%s\" is not in the transitive closure for the fetch of the "
"stepper: \"%s\"." % (target_name, repr(self._fetch_names)))
# Check if a cached tensor handle can be used on the fetch directly.
if use_tensor_handles and target_name in self._tensor_handles:
self._last_feed_types[target_name] = self.FEED_TYPE_HANDLE
return self._tensor_handles[target_name].eval()
# Check if a dumped intermediate tensor can be used on the fetch directly.
if (use_dumped_intermediates and
target_name in self._dumped_intermediate_tensors):
self._last_feed_types[target_name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
return self._dumped_intermediate_tensors[target_name].get_tensor()
# Check if an overriding tensor value can be used directly.
if use_overrides and target_name in self._override_tensors:
# Override is available. Return the value right away.
self._last_feed_types[target_name] = self.FEED_TYPE_OVERRIDE
return self._override_tensors[target_name]
# Keep track of which variables are restored in this cont() call.
restored_variables = set()
# Keep track of which variables are "touched" (i.e., possibly updated) in
# this cont() call.
self._last_updated = set()
# =========================================================================
# Use a non-recursive method to trace the inputs from the node and set up
# the feeds.
feeds = {} # The feeds to be used in the Session.run() call.
fetched = self._sess.graph.as_graph_element(target_name)
elem_stack = [fetched]
done = set()
while elem_stack:
curr_elem = elem_stack.pop()
curr_node = self._get_node(curr_elem)
done.add(curr_node.name)
non_control_inputs = [inp for inp in curr_node.inputs]
control_inputs = [inp for inp in curr_node.control_inputs]
all_inputs = set(non_control_inputs + control_inputs)
# Iterate through the (non-control) inputs.
for inp in all_inputs:
# Determine whether the input is feedable. Reference-type tensors,
# e.g., Variables, should not be fed, because they can change.
if isinstance(inp, ops.Tensor):
is_inp_ref = inp.dtype._is_ref_dtype # pylint: disable=protected-access
can_feed = self._sess.graph.is_feedable(inp) and not is_inp_ref
else:
is_inp_ref = False
can_feed = False
if (restore_variable_values and inp.name in self._dirty_variables and
inp.name not in restored_variables and
inp.name not in self._last_updated):
# Do not restore Variables touched or restored previously in this
# cont() call.
initializer_op = self._variable_initializers[inp.name]
initial_value_tensor = self._variable_initial_values[inp.name]
self._sess.run(initializer_op,
feed_dict={
initial_value_tensor:
self._cached_variable_values[inp.name]
})
# Mark the variable as restored.
restored_variables.add(inp.name)
# Determine if this is a reference-type input from a variable, and
# the recipient node is not Identity. In that case, the Variable
# needs to be marked as dirty and its current value recorded, due to
# the fact that the receiving op may mutate the value of the Variable.
if (is_inp_ref and inp.op.type in ["Variable", "VariableV2"] and
curr_node.type != "Identity"):
# Mark the variable as dirty.
self._last_updated.add(inp.name)
# Obtain the old value of the variable and cache it.
if inp.name not in self._cached_variable_values:
old_value = self._sess.run(inp)
self._cached_variable_values[inp.name] = old_value
# N.B.: The order of the logical branches matters. For example,
# _client_feed_dict comes after _tensor_handles, so that tensor
# handles stored in cont() calls can override the original client
# feeds. Also for example, _override_tensors comes the first, so
# the manual overriding, if exists, can always take effect.
if use_overrides and can_feed and inp.name in self._override_tensors:
# Use client-supplied overriding tensor value.
feeds[inp] = self._override_tensors[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_OVERRIDE
elif (can_feed and inp not in feeds and
use_tensor_handles and inp.name in self._tensor_handles):
# Tensor handle found in cache.
feeds[inp] = self._tensor_handles[inp.name].eval()
self._last_feed_types[inp.name] = self.FEED_TYPE_HANDLE
elif (can_feed and inp not in feeds and
use_dumped_intermediates and
inp.name in self._dumped_intermediate_tensors):
# Dumped intermediate Tensor found.
feeds[inp] = self._dumped_intermediate_tensors[inp.name].get_tensor()
self._last_feed_types[inp.name] = self.FEED_TYPE_DUMPED_INTERMEDIATE
elif inp.name in self._client_feed_dict:
# This input is available in the client feed_dict.
feeds[inp] = self._client_feed_dict[inp.name]
self._last_feed_types[inp.name] = self.FEED_TYPE_CLIENT
else:
# There is no feed available for this input. So keep tracing its
# input(s).
inp_node = self._get_node(inp)
if inp_node.name in done:
# Already visited.
continue
elem_stack.append(inp)
done.add(inp_node.name)
# =========================================================================
if self._last_updated:
self._dirty_variables.update(self._last_updated)
for variable in restored_variables:
self._dirty_variables.remove(variable)
(dump_path,
run_options) = self._prepare_cont_call_dump_path_and_run_options()
if isinstance(fetched, ops.Operation):
# The fetched is an Operation: Will not get tensor handle.
self._sess.run(fetched, feed_dict=feeds, options=run_options)
return_value = None
else:
# This is a Tensor: Will get tensor handle and cache it.
# Will also get the additional requested tensor handles (if any).
tensors_to_get_handles_for = [fetched]
handle_names = [target_name]
tensors_to_get_handles_for.extend([
self._sess.graph.as_graph_element(h)
for h in additional_handle_requests
])
handle_names.extend(additional_handle_requests)
handles = self._sess.run(
[session_ops.get_session_handle(tensor) for tensor in
tensors_to_get_handles_for],
feed_dict=feeds,
options=run_options)
for handle_name, handle in zip(handle_names, handles):
self._tensor_handles[handle_name] = handle
return_value = self._tensor_handles[target_name].eval()
self._load_dumped_intermediate_tensors(dump_path, target_name)
if invalidate_from_updated_variables:
# Invalidate caches at the end.
for last_updated_variable in self._last_updated:
self._invalidate_transitively_outgoing_cache(last_updated_variable)
return return_value
def _prepare_cont_call_dump_path_and_run_options(self):
"""Prepare the dump path and RunOptions for next cont() call.
Returns:
dump_path: (str) Directory path to which the intermediate tensor will be
dumped.
run_options: (config_pb2.RunOptions) The RunOptions containing the tensor
watch options for this graph.
"""
run_options = config_pb2.RunOptions()
dump_path = self._cont_call_dump_path()
for element_name in self._closure_elements:
if ":" in element_name:
debug_utils.add_debug_tensor_watch(
run_options,
debug_data.get_node_name(element_name),
output_slot=debug_data.get_output_slot(element_name),
debug_urls=["file://" + dump_path])
return dump_path, run_options
def _cont_call_dump_path(self):
return os.path.join(self._dump_session_root,
"cont_%d" % int(time.time() * 1e6))
def _load_dumped_intermediate_tensors(self, dump_path, target_name):
dump_dir = debug_data.DebugDumpDir(dump_path, validate=False)
for dump in dump_dir.dumped_tensor_data:
if (dump.tensor_name not in self._ref_tensor_names and
dump.tensor_name not in self._tensor_handles and
dump.tensor_name not in self._override_tensors and
dump.tensor_name != target_name):
self._dumped_intermediate_tensors[dump.tensor_name] = dump
def _get_node_name(self, graph_element_name):
return graph_element_name.split(":")[0]
def _invalidate_transitively_outgoing_cache(self, source_element):
"""Invalidate the cached tensor handles by tracing output.
This method is used to invalidate caches such as cached TensorHandles
and intermediate tensor values when Variable mutation happens or when
client overrides tensor values.
Uses non-recursive implementation to avoid stack overflow on deep networks.
Args:
source_element: The source graph element (e.g., a Variable output slot)
to trace the output from.
"""
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
# First, use cached invalidation paths to eliminate some cached tensor
# handles and intermediate tensors.
to_delete_handles = []
for handle_name in self._tensor_handles:
if (handle_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[handle_name]):
to_delete_handles.append(handle_name)
for handle_name in to_delete_handles:
del self._tensor_handles[handle_name]
to_delete_intermediates = []
for intm_tensor_name in self._dumped_intermediate_tensors:
if (intm_tensor_name in self._cached_invalidation_path and
source_element in self._cached_invalidation_path[intm_tensor_name]):
to_delete_intermediates.append(intm_tensor_name)
for intermediate in to_delete_intermediates:
del self._dumped_intermediate_tensors[intermediate]
if not self._tensor_handles and not self._dumped_intermediate_tensors:
return
stack = [source_element]
done = set()
while stack:
curr_element = stack.pop()
done.add(curr_element)
if (curr_element in self._tensor_handles or
curr_element in self._dumped_intermediate_tensors):
# Cache the invalidation path for potential future use.
if curr_element not in self._cached_invalidation_path:
self._cached_invalidation_path[curr_element] = set([source_element])
else:
self._cached_invalidation_path[curr_element].add(source_element)
if curr_element in self._tensor_handles:
del self._tensor_handles[curr_element]
else:
del self._dumped_intermediate_tensors[curr_element]
targets = self._output_targets.get(curr_element, [])
for target in targets:
if target in done:
continue
else:
stack.append(target)
def finalize(self):
"""Run the final fetch(es).
Restore the dirty variables; ignore the client-supplied overriding tensor
values.
Returns:
The same return value as self.cont() as called on the final fetch.
"""
self.restore_variable_values()
return self._sess.run(self._fetches, feed_dict=self._client_feed_dict)
def restore_variable_values(self):
"""Restore variables to the initial values.
"Initial value" refers to the value when this NodeStepper instance was
first constructed.
"""
for var_name in self._dirty_variables:
self._sess.run(self._variable_initializers[var_name],
feed_dict={
self._variable_initial_values[var_name]:
self._cached_variable_values[var_name]
})
def handle_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensors for which TensorHandle is available.
"""
return [name for name in self._tensor_handles]
def handle_node_names(self):
"""Get list of names of the nodes for which handles are available.
Returns:
(set of str) List of names of the nodes.
"""
return set([self._get_node_name(name) for name in self._tensor_handles])
def intermediate_tensor_names(self):
"""Get list of the names of the Tensors for which dumps are available.
Returns:
(list of str) List of the names of the Tensors for which intermediate
dumps are available.
"""
return self._dumped_intermediate_tensors.keys()
def last_updated(self):
"""Get the names of the variables updated in the last cont() call.
Returns:
A set of the variable names updated in the previous cont() call.
If no cont() call has occurred before, returns None.
"""
return self._last_updated
def dirty_variables(self):
"""Get the set of variables that are currently "dirty".
"dirty" means:
previous cont() calls have updated the value of the Variable,
and the Variable's old value (the value before any cont() calls
happened) was not restored.
Returns:
(set) A set of dirty variables.
"""
return self._dirty_variables
def is_placeholder(self, graph_element_name):
"""Check whether a graph element is a Placeholder, by name.
Args:
graph_element_name: (str) Name of the tensor or op to be tested.
Returns:
(bool) Whether the graph element of the specified name is a Placeholder
op or the output Tensor of a Placeholder op.
Raises:
ValueError: If graph_element_name is not in the transitive closure of the
stepper instance.
"""
node_name = self._get_node_name(graph_element_name)
if node_name not in self.sorted_nodes():
raise ValueError(
"%s is not in the transitive closure of this NodeStepper "
"instance" % graph_element_name)
graph_element = self._sess.graph.as_graph_element(graph_element_name)
if not isinstance(graph_element, ops.Operation):
graph_element = graph_element.op
return graph_element.type == "Placeholder"
def placeholders(self):
"""Get the list of Placeholder Tensors in the transitive closure.
Returns:
(list of str) A list of Placeholder Tensors or ops in the transitive
closure.
"""
placeholders = []
for item in self.sorted_nodes():
if self.is_placeholder(item):
placeholders.append(item)
return placeholders
def get_tensor_value(self, tensor_name):
"""Get the value of a tensor that the stepper has access to.
Args:
tensor_name: (str) Name of the tensor.
Returns:
Value of the tensor, from overriding values or cached tensor handles.
Raises:
ValueError: If the value is not available as an overriding value
or through a TensorHandle.
"""
if self.is_placeholder(tensor_name):
if ":" not in tensor_name:
tensor_name += ":0"
return self._client_feed_dict[tensor_name]
elif tensor_name in self._override_tensors:
return self._override_tensors[tensor_name]
elif tensor_name in self._tensor_handles:
return self._tensor_handles[tensor_name].eval()
elif tensor_name in self._dumped_intermediate_tensors:
return self._dumped_intermediate_tensors[tensor_name].get_tensor()
else:
raise ValueError(
"This stepper instance does not have access to the value of "
"tensor \"%s\"" % tensor_name)
def override_names(self):
"""Return names of the TensorHandles that the debugger is holding.
Returns:
(list of str) Name of the tensor for which overriding tensor values are
available.
"""
return [name for name in self._override_tensors]
def _get_node(self, element):
"""Get the node of a graph element.
Args:
element: A graph element (Op, Tensor or Node)
Returns:
The node associated with element in the graph.
"""
node_name, _ = debug_data.parse_node_or_tensor_name(element.name)
return self._sess.graph.as_graph_element(node_name)
|
{
"content_hash": "73b71879612d68b64c74e025533c3668",
"timestamp": "",
"source": "github",
"line_count": 951,
"max_line_length": 109,
"avg_line_length": 36.44689800210305,
"alnum_prop": 0.6593000778973486,
"repo_name": "kchodorow/tensorflow",
"id": "ab500f52e358e521c62345797030dc706917e75e",
"size": "35350",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/stepper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "170011"
},
{
"name": "C++",
"bytes": "20341504"
},
{
"name": "CMake",
"bytes": "118925"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "103930"
},
{
"name": "HTML",
"bytes": "539783"
},
{
"name": "Java",
"bytes": "241385"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833646"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "31823"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "183131"
},
{
"name": "Python",
"bytes": "16539392"
},
{
"name": "Shell",
"bytes": "318751"
},
{
"name": "TypeScript",
"bytes": "761732"
}
],
"symlink_target": ""
}
|
"""
TANTIGEN: Tumor T-cell Antigen Database from Dana Farber CVC
http://cvc.dfci.harvard.edu/tadb/index.html
"""
from os.path import join
import pandas as pd
import numpy as np
from static_data import DATA_DIR
from common import bad_amino_acids
from features import make_unlabeled_ngram_dataset_from_args
def _load_dataframe(
path,
epitope_column_name,
mhc_class = None,
hla_type = None,
exclude_hla_type = None, # regex pattern i.e. '(HLA-A2)|(HLA-A\*02)'
peptide_length = None,
reduced_alphabet = None,
nrows = None):
df = pd.read_csv(path, skipinitialspace=True, nrows = nrows)
epitopes = df[epitope_column_name]
hla = df['HLA allele']
mask = ~(epitopes.str.contains(bad_amino_acids, na=False).astype('bool'))
if mhc_class == 1:
a = hla.str.startswith('A')
b = hla.str.startswith('B')
c = hla.str.startswith('C')
mask &= (a | b | c)
elif mhc_class == 2:
mask &= hla.str.startswith('D')
if hla_type:
mask &= hla.str.contains(hla_type, na=False).astype('bool')
if exclude_hla_type:
mask &= ~(hla.str.contains(exclude_hla_type, na=True).astype('bool'))
if peptide_length:
mask &= epitopes.str.len() == peptide_length
df = df[mask]
if reduced_alphabet:
epitopes = df[epitope_column_name]
df[epitope_column_name] = \
epitopes.map(make_alphabet_transformer(reduced_alphabet))
return df
def load_tcell(*args, **kwargs):
"""
Return a dataframe with accession IDs, peptide sequence, and MHC allele
for T-cell responsive tumor antigens
"""
tcell_path = join(DATA_DIR, 'tantigen_tcell.csv')
return _load_dataframe(tcell_path, 'Epitope sequence', *args, **kwargs)
def load_tcell_set(*args, **kwargs):
df = load_tcell(*args, **kwargs)
return set(df['Epitope sequence'])
def load_tcell_ngrams(*args, **kwargs):
return make_unlabeled_ngram_dataset_from_args(
load_tcell_set, *args, **kwargs)
def load_mhc(*args, **kwargs):
"""
Return a dataframe with accession IDs, peptide sequence, and MHC allele
for MHC-binding tumor antigens
"""
mhc_path = join(DATA_DIR, 'tantigen_mhc.csv')
return _load_dataframe(mhc_path, 'Ligand sequence', *args, **kwargs)
def load_mhc_set(*args, **kwargs):
df = load_mhc(*args, **kwargs)
return set(df['Ligand sequence'])
def load_mhc_ngrams(*args, **kwargs):
return make_unlabeled_ngram_dataset_from_args(load_mhc_set, *args, **kwargs)
|
{
"content_hash": "f161a04f075a24c7dfd8d19264a2b1a2",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 33.05194805194805,
"alnum_prop": 0.6385068762278978,
"repo_name": "cpcloud/pepdata",
"id": "3fcf8b1d956527baa3fb05c5765592983f1c624a",
"size": "3145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pepdata/tantigen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114025"
}
],
"symlink_target": ""
}
|
"""Constants for use in the keystone.conf package.
These constants are shared by more than one module in the keystone.conf
package.
"""
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token', 'oauth1', 'mapped']
_CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem'
_KEYFILE = '/etc/keystone/ssl/private/signing_key.pem'
|
{
"content_hash": "f796c7e53237e32fccd403945d3b3cfe",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.723404255319149,
"repo_name": "rajalokan/keystone",
"id": "8304e769a4eda03b68e508573da2d2d21f1134f8",
"size": "874",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/conf/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3865941"
},
{
"name": "Shell",
"bytes": "4861"
}
],
"symlink_target": ""
}
|
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_log import log as logging
from sqlalchemy import or_
from neutron.common import constants as l3_consts
from neutron.common import utils as n_utils
from neutron.db import agentschedulers_db
from neutron.db import l3_agentschedulers_db as l3agent_sch_db
from neutron.db import models_v2
from neutron.objects import l3agent as rb_obj
from neutron.plugins.ml2 import db as ml2_db
from neutron.plugins.ml2 import models as ml2_models
LOG = logging.getLogger(__name__)
class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Mixin class for L3 DVR scheduler.
DVR currently supports the following use cases:
- East/West (E/W) traffic between VMs: this is handled in a
distributed manner across Compute Nodes without a centralized element.
This includes E/W traffic between VMs on the same Compute Node.
- North/South traffic for Floating IPs (FIP N/S): this is supported on the
distributed routers on Compute Nodes when there is external network
connectivity and on centralized nodes when the port is not bound or when
the agent is configured as 'dvr_no_external'.
- North/South traffic for SNAT (SNAT N/S): this is supported via a
centralized element that handles the SNAT traffic.
To support these use cases, DVR routers rely on an L3 agent that runs on a
central node (also known as Network Node or Service Node), as well as, L3
agents that run individually on each Compute Node of an OpenStack cloud.
Each L3 agent creates namespaces to route traffic according to the use
cases outlined above. The mechanism adopted for creating and managing
these namespaces is via (Router, Agent) binding and Scheduling in general.
The main difference between distributed routers and centralized ones is
that in the distributed case, multiple bindings will exist, one for each
of the agents participating in the routed topology for the specific router.
These bindings are created in the following circumstances:
- A subnet is added to a router via router-interface-add, and that subnet
has running VM's deployed in it. A binding will be created between the
router and any L3 agent whose Compute Node is hosting the VM(s).
- An external gateway is set to a router via router-gateway-set. A binding
will be created between the router and the L3 agent running centrally
on the Network Node.
Therefore, any time a router operation occurs (create, update or delete),
scheduling will determine whether the router needs to be associated to an
L3 agent, just like a regular centralized router, with the difference that,
in the distributed case, the bindings required are established based on
the state of the router and the Compute Nodes.
"""
def dvr_handle_new_service_port(self, context, port,
dest_host=None, unbound_migrate=False):
"""Handle new dvr service port creation.
When a new dvr service port is created, this function will
schedule a dvr router to new compute node if needed and notify
l3 agent on that node.
The 'dest_host' will provide the destination host of the port in
case of service port migration.
If an unbound port migrates and becomes a bound port, send
notification to the snat_agents and to the bound host.
"""
port_host = dest_host or port[portbindings.HOST_ID]
l3_agent_on_host = (self.get_l3_agents(
context, filters={'host': [port_host]}) or [None])[0]
if not l3_agent_on_host:
return
if dest_host:
# Make sure we create the floatingip agent gateway port
# for the destination node if fip is associated with this
# fixed port
l3plugin = directory.get_plugin(plugin_constants.L3)
(
l3plugin.
check_for_fip_and_create_agent_gw_port_on_host_if_not_exists(
context, port, dest_host))
subnet_ids = [ip['subnet_id'] for ip in port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(context, subnet_ids)
if not router_ids:
return
agent_port_host_match = False
if unbound_migrate:
# This might be a case were it is migrating from unbound
# to a bound port.
# In that case please forward the notification to the
# snat_nodes hosting the routers.
# Make a call here to notify the snat nodes.
snat_agent_list = self.get_dvr_snat_agent_list(context)
for agent in snat_agent_list:
LOG.debug('DVR: Handle new unbound migration port, '
'host %(host)s, router_ids %(router_ids)s',
{'host': agent.host, 'router_ids': router_ids})
self.l3_rpc_notifier.routers_updated_on_host(
context, router_ids, agent.host)
if agent.host == port_host:
agent_port_host_match = True
if not agent_port_host_match:
LOG.debug('DVR: Handle new service port, host %(host)s, '
'router ids %(router_ids)s',
{'host': port_host, 'router_ids': router_ids})
self.l3_rpc_notifier.routers_updated_on_host(
context, router_ids, port_host)
def get_dvr_snat_agent_list(self, context):
agent_filters = {'agent_modes': [n_const.L3_AGENT_MODE_DVR_SNAT]}
state = agentschedulers_db.get_admin_state_up_filter()
return self.get_l3_agents(context, active=state,
filters=agent_filters)
def get_dvr_routers_by_subnet_ids(self, context, subnet_ids):
"""Gets the dvr routers on vmport subnets."""
if not subnet_ids:
return set()
router_ids = set()
filter_sub = {'fixed_ips': {'subnet_id': subnet_ids},
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
subnet_ports = self._core_plugin.get_ports(
context, filters=filter_sub)
for subnet_port in subnet_ports:
router_ids.add(subnet_port['device_id'])
return router_ids
def get_subnet_ids_on_router(self, context, router_id):
"""Return subnet IDs for interfaces attached to the given router."""
subnet_ids = set()
filter_rtr = {'device_id': [router_id]}
int_ports = self._core_plugin.get_ports(context, filters=filter_rtr)
for int_port in int_ports:
int_ips = int_port['fixed_ips']
if int_ips:
int_subnet = int_ips[0]['subnet_id']
subnet_ids.add(int_subnet)
else:
LOG.debug('DVR: Could not find a subnet id '
'for router %s', router_id)
return subnet_ids
def get_dvr_routers_to_remove(self, context, deleted_port):
"""Returns info about which routers should be removed
In case dvr serviceable port was deleted we need to check
if any dvr routers should be removed from l3 agent on port's host
"""
if not n_utils.is_dvr_serviced(deleted_port['device_owner']):
return []
admin_context = context.elevated()
port_host = deleted_port[portbindings.HOST_ID]
subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(admin_context,
subnet_ids)
if not router_ids:
LOG.debug('No DVR routers for this DVR port %(port)s '
'on host %(host)s', {'port': deleted_port['id'],
'host': port_host})
return []
agent = self._get_agent_by_type_and_host(
context, n_const.AGENT_TYPE_L3, port_host)
removed_router_info = []
for router_id in router_ids:
if rb_obj.RouterL3AgentBinding.objects_exist(context,
router_id=router_id,
l3_agent_id=agent.id):
# not removing from the agent hosting SNAT for the router
continue
subnet_ids = self.get_subnet_ids_on_router(admin_context,
router_id)
if self._check_dvr_serviceable_ports_on_host(
admin_context, port_host, subnet_ids):
continue
filter_rtr = {'device_id': [router_id],
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
admin_context, filters=filter_rtr)
for port in int_ports:
dvr_binding = (ml2_db.
get_distributed_port_binding_by_host(
context, port['id'], port_host))
if dvr_binding:
# unbind this port from router
dvr_binding['router_id'] = None
dvr_binding.update(dvr_binding)
info = {'router_id': router_id, 'host': port_host,
'agent_id': str(agent.id)}
removed_router_info.append(info)
LOG.debug('Router %(router_id)s on host %(host)s to be deleted',
info)
return removed_router_info
def _get_active_l3_agent_routers_sync_data(self, context, host, agent,
router_ids):
if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host, agent,
router_ids=router_ids,
active=True)
return self._get_dvr_sync_data(context, host, agent,
router_ids=router_ids, active=True)
def get_hosts_to_notify(self, context, router_id):
"""Returns all hosts to send notification about router update"""
hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify(
context, router_id)
router = self.get_router(context, router_id)
if router.get('distributed', False):
dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
dvr_hosts = set(dvr_hosts) - set(hosts)
state = agentschedulers_db.get_admin_state_up_filter()
agents = self.get_l3_agents(context, active=state,
filters={'host': dvr_hosts})
hosts += [a.host for a in agents]
return hosts
def _get_dvr_hosts_for_router(self, context, router_id):
"""Get a list of hosts where specified DVR router should be hosted
It will first get IDs of all subnets connected to the router and then
get a set of hosts where all dvr serviceable ports on those subnets
are bound
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
hosts = self._get_dvr_hosts_for_subnets(context, subnet_ids)
LOG.debug('Hosts for router %s: %s', router_id, hosts)
return hosts
def _get_dvr_hosts_for_subnets(self, context, subnet_ids):
"""Get a list of hosts with DVR servicable ports on subnet_ids."""
Binding = ml2_models.PortBinding
Port = models_v2.Port
IPAllocation = models_v2.IPAllocation
query = context.session.query(Binding.host).distinct()
query = query.join(Binding.port)
query = query.join(Port.fixed_ips)
query = query.filter(IPAllocation.subnet_id.in_(subnet_ids))
owner_filter = or_(
Port.device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX),
Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(owner_filter)
hosts = [item[0] for item in query]
return hosts
def _get_dvr_subnet_ids_on_host_query(self, context, host):
query = context.session.query(
models_v2.IPAllocation.subnet_id).distinct()
query = query.join(models_v2.IPAllocation.port)
query = query.join(models_v2.Port.port_binding)
query = query.filter(ml2_models.PortBinding.host == host)
owner_filter = or_(
models_v2.Port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX),
models_v2.Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(owner_filter)
return query
def _get_dvr_router_ids_for_host(self, context, host):
subnet_ids_on_host_query = self._get_dvr_subnet_ids_on_host_query(
context, host)
query = context.session.query(models_v2.Port.device_id).distinct()
query = query.filter(
models_v2.Port.device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE)
query = query.join(models_v2.Port.fixed_ips)
query = query.filter(
models_v2.IPAllocation.subnet_id.in_(subnet_ids_on_host_query))
router_ids = [item[0] for item in query]
LOG.debug('DVR routers on host %s: %s', host, router_ids)
return router_ids
def _get_router_ids_for_agent(self, context, agent_db, router_ids):
result_set = set(super(L3_DVRsch_db_mixin,
self)._get_router_ids_for_agent(
context, agent_db, router_ids))
router_ids = set(router_ids or [])
if router_ids and result_set == router_ids:
# no need for extra dvr checks if requested routers are
# explicitly scheduled to the agent
return list(result_set)
# dvr routers are not explicitly scheduled to agents on hosts with
# dvr serviceable ports, so need special handling
if (self._get_agent_mode(agent_db) in
[n_const.L3_AGENT_MODE_DVR,
l3_consts.L3_AGENT_MODE_DVR_NO_EXTERNAL,
n_const.L3_AGENT_MODE_DVR_SNAT]):
if not router_ids:
result_set |= set(self._get_dvr_router_ids_for_host(
context, agent_db['host']))
else:
for router_id in (router_ids - result_set):
subnet_ids = self.get_subnet_ids_on_router(
context, router_id)
if (subnet_ids and
self._check_dvr_serviceable_ports_on_host(
context, agent_db['host'],
list(subnet_ids))):
result_set.add(router_id)
return list(result_set)
def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids):
"""Check for existence of dvr serviceable ports on host
:param context: request context
:param host: host to look ports on
:param subnet_ids: IDs of subnets to look ports on
:return: return True if dvr serviceable port exists on host,
otherwise return False
"""
# db query will return ports for all subnets if subnet_ids is empty,
# so need to check first
if not subnet_ids:
return False
Binding = ml2_models.PortBinding
IPAllocation = models_v2.IPAllocation
Port = models_v2.Port
query = context.session.query(Binding)
query = query.join(Binding.port)
query = query.join(Port.fixed_ips)
query = query.filter(
IPAllocation.subnet_id.in_(subnet_ids))
device_filter = or_(
models_v2.Port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX),
models_v2.Port.device_owner.in_(
n_utils.get_other_dvr_serviced_device_owners()))
query = query.filter(device_filter)
host_filter = or_(
ml2_models.PortBinding.host == host,
ml2_models.PortBinding.profile.contains(host))
query = query.filter(host_filter)
return query.first() is not None
def _dvr_handle_unbound_allowed_addr_pair_add(
plugin, context, port, allowed_address_pair):
plugin.update_arp_entry_for_dvr_service_port(context, port)
def _dvr_handle_unbound_allowed_addr_pair_del(
plugin, context, port, allowed_address_pair):
aa_fixed_ips = plugin._get_allowed_address_pair_fixed_ips(context, port)
if aa_fixed_ips:
plugin.delete_arp_entry_for_dvr_service_port(
context, port, fixed_ips_to_delete=aa_fixed_ips)
def _notify_l3_agent_new_port(resource, event, trigger, **kwargs):
LOG.debug('Received %(resource)s %(event)s', {
'resource': resource,
'event': event})
port = kwargs.get('port')
if not port:
return
if n_utils.is_dvr_serviced(port['device_owner']):
l3plugin = directory.get_plugin(plugin_constants.L3)
context = kwargs['context']
l3plugin.dvr_handle_new_service_port(context, port)
l3plugin.update_arp_entry_for_dvr_service_port(context, port)
def _notify_port_delete(event, resource, trigger, **kwargs):
context = kwargs['context']
port = kwargs['port']
l3plugin = directory.get_plugin(plugin_constants.L3)
if port:
port_host = port.get(portbindings.HOST_ID)
allowed_address_pairs_list = port.get('allowed_address_pairs')
if allowed_address_pairs_list and port_host:
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_del(
l3plugin, context, port, address_pair)
l3plugin.delete_arp_entry_for_dvr_service_port(context, port)
removed_routers = l3plugin.get_dvr_routers_to_remove(context, port)
for info in removed_routers:
l3plugin.l3_rpc_notifier.router_removed_from_agent(
context, info['router_id'], info['host'])
def _notify_l3_agent_port_update(resource, event, trigger, **kwargs):
new_port = kwargs.get('port')
original_port = kwargs.get('original_port')
if new_port and original_port:
l3plugin = directory.get_plugin(plugin_constants.L3)
context = kwargs['context']
is_bound_port_moved = (
original_port[portbindings.HOST_ID] and
original_port[portbindings.HOST_ID] !=
new_port[portbindings.HOST_ID])
if is_bound_port_moved:
removed_routers = l3plugin.get_dvr_routers_to_remove(
context,
original_port)
if removed_routers:
removed_router_args = {
'context': context,
'port': original_port,
'removed_routers': removed_routers,
}
_notify_port_delete(
event, resource, trigger, **removed_router_args)
fips = l3plugin._get_floatingips_by_port_id(
context, port_id=original_port['id'])
fip = fips[0] if fips else None
if fip and not (removed_routers and
fip['router_id'] in removed_routers):
l3plugin.l3_rpc_notifier.routers_updated_on_host(
context, [fip['router_id']],
original_port[portbindings.HOST_ID])
is_new_port_binding_changed = (
new_port[portbindings.HOST_ID] and
(original_port[portbindings.HOST_ID] !=
new_port[portbindings.HOST_ID]))
dest_host = None
new_port_profile = new_port.get(portbindings.PROFILE)
if new_port_profile:
dest_host = new_port_profile.get('migrating_to')
# This check is required to prevent an arp update
# of the allowed_address_pair port.
if new_port_profile.get('original_owner'):
return
# If dest_host is set, then the port profile has changed
# and this port is in migration. The call below will
# pre-create the router on the new host
# No need to check for device_owner since we are scheduling
# the routers without checking for device_owner.
# If the original_port is None, then it is a migration
# from unbound to bound.
if (is_new_port_binding_changed or dest_host):
if original_port[portbindings.HOST_ID] is None:
l3plugin.dvr_handle_new_service_port(context, new_port,
unbound_migrate=True)
else:
l3plugin.dvr_handle_new_service_port(context, new_port,
dest_host=dest_host)
l3plugin.update_arp_entry_for_dvr_service_port(
context, new_port)
return
# Check for allowed_address_pairs and port state
new_port_host = new_port.get(portbindings.HOST_ID)
allowed_address_pairs_list = new_port.get('allowed_address_pairs')
if allowed_address_pairs_list and new_port_host:
new_port_state = new_port.get('admin_state_up')
original_port_state = original_port.get('admin_state_up')
if new_port_state:
# Case were we activate the port from inactive state,
# or the same port has additional address_pairs added.
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_add(
l3plugin, context, new_port, address_pair)
return
elif original_port_state:
# Case were we deactivate the port from active state.
for address_pair in allowed_address_pairs_list:
_dvr_handle_unbound_allowed_addr_pair_del(
l3plugin, context, original_port, address_pair)
return
is_fixed_ips_changed = (
'fixed_ips' in new_port and
'fixed_ips' in original_port and
new_port['fixed_ips'] != original_port['fixed_ips'])
if kwargs.get('mac_address_updated') or is_fixed_ips_changed:
l3plugin.update_arp_entry_for_dvr_service_port(
context, new_port)
def subscribe():
registry.subscribe(
_notify_l3_agent_port_update, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
_notify_l3_agent_new_port, resources.PORT, events.AFTER_CREATE)
registry.subscribe(
_notify_port_delete, resources.PORT, events.AFTER_DELETE)
|
{
"content_hash": "f26c8e00a030f1db453709f503fdcb06",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 79,
"avg_line_length": 46.211155378486055,
"alnum_prop": 0.5949650831968273,
"repo_name": "eayunstack/neutron",
"id": "a68c758a4f53c1a81b6feaec8ead1a0fe9247a6e",
"size": "23865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/l3_dvrscheduler_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
import os
import subprocess
def execute_command(command):
"""Executes the specified command using a shell"""
print(command)
exitcode = subprocess.call(command, shell=True)
if exitcode != 0:
print("Exit code returned:", exitcode)
exit(exitcode)
def main():
site = "https://mapcruzin.com/fcc-wireless-shapefiles/"
file = "airports.zip"
execute_command("wget -q " + site + file)
execute_command("unzip -o " + file)
os.unlink(file)
if __name__ == "__main__":
main()
|
{
"content_hash": "f4585f2420ac5130b7c49ca4abd3a3df",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 26.05,
"alnum_prop": 0.6314779270633397,
"repo_name": "mbudiu-vmw/hiero",
"id": "640e96e089cc577a78b348f0ea0bdb05cb972ea6",
"size": "1275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/geo/airports/download.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4105"
},
{
"name": "HTML",
"bytes": "6846"
},
{
"name": "Java",
"bytes": "586139"
},
{
"name": "JavaScript",
"bytes": "1281"
},
{
"name": "Protocol Buffer",
"bytes": "601"
},
{
"name": "Shell",
"bytes": "4969"
},
{
"name": "TypeScript",
"bytes": "150704"
}
],
"symlink_target": ""
}
|
import zmq
context = zmq.Context()
# This is where the weather server sits
frontend = context.socket(zmq.SUB)
frontend.connect("tcp://192.168.55.210:5556")
# This is our public endpoint for subscribers
backend = context.socket(zmq.PUB)
backend.bind("tcp://10.1.1.0:8100")
# Subscribe on everything
frontend.setsockopt(zmq.SUBSCRIBE, '')
# Shunt messages out to our own subscribers
while True:
# Process all parts of the message
message = frontend.recv()
more = frontend.getsockopt(zmq.RCVMORE)
if more:
backend.send(message, zmq.SNDMORE)
else:
backend.send(message) # last message part
|
{
"content_hash": "a8a8ebc965503fd12ebece9cabd213f5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 50,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.7117834394904459,
"repo_name": "krattai/noo-ebs",
"id": "249aa176280e0167d79c7c42607ddcc29ca4747e",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/zeroMQ-guide2/examples/Python/wuproxy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "2384"
},
{
"name": "Assembly",
"bytes": "4590201"
},
{
"name": "Awk",
"bytes": "396"
},
{
"name": "Batchfile",
"bytes": "19241"
},
{
"name": "C",
"bytes": "15563482"
},
{
"name": "C#",
"bytes": "265955"
},
{
"name": "C++",
"bytes": "691846"
},
{
"name": "CMake",
"bytes": "104078"
},
{
"name": "CSS",
"bytes": "72772"
},
{
"name": "DTrace",
"bytes": "1258"
},
{
"name": "Erlang",
"bytes": "4424888"
},
{
"name": "GAP",
"bytes": "1517"
},
{
"name": "HTML",
"bytes": "65461"
},
{
"name": "Haxe",
"bytes": "6282"
},
{
"name": "Java",
"bytes": "6899"
},
{
"name": "JavaScript",
"bytes": "494026"
},
{
"name": "Lua",
"bytes": "274783"
},
{
"name": "M4",
"bytes": "107581"
},
{
"name": "Makefile",
"bytes": "143161"
},
{
"name": "NSIS",
"bytes": "27658"
},
{
"name": "Objective-C",
"bytes": "13321"
},
{
"name": "PHP",
"bytes": "43263"
},
{
"name": "PLpgSQL",
"bytes": "80625"
},
{
"name": "Perl",
"bytes": "344546"
},
{
"name": "Python",
"bytes": "500718"
},
{
"name": "QML",
"bytes": "150"
},
{
"name": "QMake",
"bytes": "3028"
},
{
"name": "Ragel",
"bytes": "46210"
},
{
"name": "Roff",
"bytes": "120721"
},
{
"name": "Ruby",
"bytes": "121530"
},
{
"name": "Shell",
"bytes": "293349"
},
{
"name": "TeX",
"bytes": "788237"
},
{
"name": "XSLT",
"bytes": "1459"
},
{
"name": "Yacc",
"bytes": "5139"
}
],
"symlink_target": ""
}
|
"""MSA class to store MSA sequence.
"""
__author__ = 'Wenzhi Mao'
__all__ = ['MSA']
class MSA(object):
"""A MSA class to store sequences indexed by labels."""
def __init__(self, msa, labels=None, **kwargs):
from numpy import dtype, array
if type(msa) == list:
msa = [list(i) for i in msa]
msa = array(msa, dtype='|S1')
if not set(['ndim', 'shape']).issubset(set(dir(msa))):
raise TypeError('MSA must be numpy array or list')
if msa.ndim != 2:
raise ValueError('Must be 2-dimension.')
(numseq, numres) = msa.shape
if labels == None:
labels = array(['None'] * numseq, dtype='|S4')
if type(labels) == list:
labels = array(labels)
if len(labels) != numseq:
raise ValueError(
'Label number wrong. {0} Sequences and {1} labels'.format(numseq, len(labels)))
self.seq = msa
self.label = labels
self._dict = {}
for i in xrange(numseq):
if not labels[i] in self._dict.keys():
self._dict[labels[i]] = [i]
else:
self._dict[labels[i]] = self._dict[labels[i]] + [i]
self.numseq = numseq
self.numres = numres
def __repr__(self):
return "MSA = {0} sequences with {1} residues.".format(self.numseq, self.numres)
def __getitem__(self, index):
from numpy import array
if type(index) == int:
return self.seq[index]
if type(index) == str:
if index in self._dict.keys():
return array([self.seq[i] for i in self._dict[index]])
raise ValueError('Cannot index.')
def __iter__(self):
for i in xrange(self.numseq):
yield self.seq[i]
|
{
"content_hash": "10878c45548b888d6b4655fdd5487c4f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 95,
"avg_line_length": 30.627118644067796,
"alnum_prop": 0.5229662423907028,
"repo_name": "wzmao/mbio",
"id": "191d2d2287d1a1d6cdf351e31342242abd05841f",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mbio/Sequence/msa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "204223"
},
{
"name": "Makefile",
"bytes": "483"
},
{
"name": "Python",
"bytes": "191328"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_hutt_assassin_camp_large1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "ccf25ee86debe9c65f613d9ca68e3ab9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.7,
"repo_name": "anhstudios/swganh",
"id": "d4950f6706e3b227a31f8f61a7f33c1b79a32156",
"size": "465",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/poi/shared_tatooine_hutt_assassin_camp_large1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.vision.v1p4beta1",
manifest={
"WebDetection",
},
)
class WebDetection(proto.Message):
r"""Relevant information for the image from the Internet.
Attributes:
web_entities (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebEntity]):
Deduced entities from similar images on the
Internet.
full_matching_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebImage]):
Fully matching images from the Internet.
Can include resized copies of the query image.
partial_matching_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebImage]):
Partial matching images from the Internet.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
pages_with_matching_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebPage]):
Web pages containing the matching images from
the Internet.
visually_similar_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebImage]):
The visually similar image results.
best_guess_labels (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebLabel]):
The service's best guess as to the topic of
the request image. Inferred from similar images
on the open web.
"""
class WebEntity(proto.Message):
r"""Entity deduced from similar images on the Internet.
Attributes:
entity_id (str):
Opaque entity ID.
score (float):
Overall relevancy score for the entity.
Not normalized and not comparable across
different image queries.
description (str):
Canonical description of the entity, in
English.
"""
entity_id = proto.Field(
proto.STRING,
number=1,
)
score = proto.Field(
proto.FLOAT,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
class WebImage(proto.Message):
r"""Metadata for online images.
Attributes:
url (str):
The result image URL.
score (float):
(Deprecated) Overall relevancy score for the
image.
"""
url = proto.Field(
proto.STRING,
number=1,
)
score = proto.Field(
proto.FLOAT,
number=2,
)
class WebPage(proto.Message):
r"""Metadata for web pages.
Attributes:
url (str):
The result web page URL.
score (float):
(Deprecated) Overall relevancy score for the
web page.
page_title (str):
Title for the web page, may contain HTML
markups.
full_matching_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebImage]):
Fully matching images on the page.
Can include resized copies of the query image.
partial_matching_images (Sequence[google.cloud.vision_v1p4beta1.types.WebDetection.WebImage]):
Partial matching images on the page.
Those images are similar enough to share some
key-point features. For example an original
image will likely have partial matching for its
crops.
"""
url = proto.Field(
proto.STRING,
number=1,
)
score = proto.Field(
proto.FLOAT,
number=2,
)
page_title = proto.Field(
proto.STRING,
number=3,
)
full_matching_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message="WebDetection.WebImage",
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE,
number=5,
message="WebDetection.WebImage",
)
class WebLabel(proto.Message):
r"""Label to provide extra metadata for the web detection.
Attributes:
label (str):
Label for extra metadata.
language_code (str):
The BCP-47 language code for ``label``, such as "en-US" or
"sr-Latn". For more information, see
http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
"""
label = proto.Field(
proto.STRING,
number=1,
)
language_code = proto.Field(
proto.STRING,
number=2,
)
web_entities = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=WebEntity,
)
full_matching_images = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=WebImage,
)
partial_matching_images = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=WebImage,
)
pages_with_matching_images = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=WebPage,
)
visually_similar_images = proto.RepeatedField(
proto.MESSAGE,
number=6,
message=WebImage,
)
best_guess_labels = proto.RepeatedField(
proto.MESSAGE,
number=8,
message=WebLabel,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "3cd32d4a27786ceb2ecac35e11af6f88",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 106,
"avg_line_length": 31.026881720430108,
"alnum_prop": 0.5598683070525039,
"repo_name": "googleapis/python-vision",
"id": "55d0b826dc636f43e37ba87ffe0813ff8d09c32f",
"size": "6371",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/vision_v1p4beta1/types/web_detection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3254393"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
from datetime import date, datetime
from stdnet import QuerySetError, odm
from stdnet.utils import test, zip, range
from examples.models import (SportAtDate, SportAtDate2, Person,
TestDateModel, Group)
class SortGenerator(test.DataGenerator):
def generate(self, **kwargs):
self.dates = self.populate('date', start=date(2005,6,1),
end=date(2012,6,6))
self.groups = self.populate('choice',
choice_from=['football', 'rugby', 'swimming',
'running', 'cycling'])
self.persons = self.populate('choice',
choice_from=['pippo', 'pluto', 'saturn', 'luca', 'josh',
'carl', 'paul'])
class TestSort(test.TestCase):
'''Base class for sorting'''
desc = False
data_cls = SortGenerator
@classmethod
def after_setup(cls):
d = cls.data
with cls.session().begin() as t:
for p, n, d in zip(d.persons, d.groups, d.dates):
t.add(cls.model(person=p, name=n, dt=d))
return t.on_result
def checkOrder(self, qs, attr, desc=None):
if hasattr(qs, 'all'):
all = yield qs.all()
else:
all = qs
self.assertTrue(all)
desc = desc if desc is not None else self.desc
at0 = all[0].get_attr_value(attr)
for obj in all[1:]:
at1 = obj.get_attr_value(attr)
if desc:
self.assertTrue(at1<=at0)
else:
self.assertTrue(at1>=at0)
at0 = at1
class ExplicitOrderingMixin(object):
def test_size(self):
qs = self.query()
yield self.async.assertEqual(qs.count(), len(self.data.dates))
def testDateSortBy(self):
return self.checkOrder(self.query().sort_by('dt'), 'dt')
def testDateSortByReversed(self):
return self.checkOrder(self.query().sort_by('-dt'),'dt',True)
def testNameSortBy(self):
return self.checkOrder(self.query().sort_by('name'),'name')
def testNameSortByReversed(self):
return self.checkOrder(self.query().sort_by('-name'),'name',True)
def testSimpleSortError(self):
qs = self.query()
self.assertRaises(QuerySetError, qs.sort_by, 'whaaaa')
def testFilter(self):
qs = self.query().filter(name='rugby').sort_by('dt')
yield self.checkOrder(qs, 'dt')
for v in qs:
self.assertEqual(v.name, 'rugby')
def _slicingTest(self, attr, desc, start=0, stop=10, expected_len=10):
p = '-' if desc else ''
qs = self.query().sort_by(p+attr)
qs1 = yield qs[start:stop]
self.assertEqual(len(qs1), expected_len)
self.checkOrder(qs1, attr, desc)
def testDateSlicing(self):
return self._slicingTest('dt',False)
def testDateSlicingDesc(self):
return self._slicingTest('dt',True)
class TestSortBy(TestSort, ExplicitOrderingMixin):
'''Test the sort_by in a model without ordering meta attribute.
Pure explicit ordering.'''
model = TestDateModel
class TestSortByForeignKeyField(TestSort):
model = Person
models = (Person, Group)
@classmethod
def after_setup(cls):
d = cls.data
session = cls.session()
with session.begin() as t:
for g in d.groups:
t.add(Group(name=g))
yield t.on_result
groups = yield session.query(Group).all()
gps = test.populate('choice', d.size, choice_from=groups)
with session.begin() as t:
for p, g in zip(d.persons, gps):
t.add(cls.model(name=p, group=g))
yield t.on_result
def test_size(self):
qs = self.query()
return self.async.assertEqual(qs.count(), len(self.data.dates))
def testNameSortBy(self):
return self.checkOrder(self.query().sort_by('name'),'name')
def testNameSortByReversed(self):
return self.checkOrder(self.query().sort_by('-name'),'name',True)
def testSortByFK(self):
qs = self.query()
qs = qs.sort_by('group__name')
ordering = qs.ordering
self.assertEqual(ordering.name, 'group_id')
self.assertEqual(ordering.nested.name, 'name')
self.assertEqual(ordering.model, qs.model)
self.checkOrder(qs, 'group__name')
class TestOrderingModel(TestSort):
'''Test a model which is always sorted by the ordering meta attribute.'''
model = SportAtDate
def testMeta(self):
model = self.model
self.assertTrue(model._meta.ordering)
ordering = model._meta.ordering
self.assertEqual(ordering.name, 'dt')
self.assertEqual(ordering.field.name, 'dt')
self.assertEqual(ordering.desc, self.desc)
def testSimple(self):
yield self.checkOrder(self.query(), 'dt')
def testFilter(self):
qs = self.query().filter(name=('football','rugby'))
return self.checkOrder(qs,'dt')
def testExclude(self):
qs = self.query().exclude(name='rugby')
return self.checkOrder(qs, 'dt')
class TestOrderingModelDesc(TestOrderingModel):
model = SportAtDate2
desc = True
|
{
"content_hash": "f533677a816bf653bb08296f1872fcea",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 77,
"avg_line_length": 32.08383233532934,
"alnum_prop": 0.578200821201941,
"repo_name": "lsbardel/python-stdnet",
"id": "b729d2bdb988336bd26b4b2857690d3c866edba3",
"size": "5358",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/all/query/sorting.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Lua",
"bytes": "130655"
},
{
"name": "Python",
"bytes": "747745"
},
{
"name": "Shell",
"bytes": "3200"
}
],
"symlink_target": ""
}
|
from google.appengine.ext import vendor
#https://cloud.google.com/appengine/docs/python/tools/using-libraries-python-27#installing_a_library
# pip install -t lib gcloud
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
|
{
"content_hash": "e7b4fad337d3024098c3885e2bd71fb5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 100,
"avg_line_length": 47.6,
"alnum_prop": 0.7857142857142857,
"repo_name": "tdliu/hoop-picks",
"id": "11a7706ec550ddd6c67151d96d4d9a694bd50979",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98"
},
{
"name": "HTML",
"bytes": "2610"
},
{
"name": "JavaScript",
"bytes": "3073204"
},
{
"name": "Makefile",
"bytes": "888"
},
{
"name": "Python",
"bytes": "2158488"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
import os
import glob
import numpy as np
from monty.json import jsanitize
from monty.json import MSONable
scipy_old_piecewisepolynomial = True
try:
from scipy.interpolate import PiecewisePolynomial
except ImportError:
from scipy.interpolate import CubicSpline
scipy_old_piecewisepolynomial = False
from pymatgen.util.plotting import pretty_plot
from pymatgen.io.vasp import Poscar, Outcar
from pymatgen.analysis.structure_matcher import StructureMatcher
import warnings
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '6/1/15'
class NEBAnalysis(MSONable):
"""
An NEBAnalysis class.
"""
def __init__(self, r, energies, forces, structures, spline_options=None):
"""
Initializes an NEBAnalysis from the cumulative root mean squared distances
between structures, the energies, the forces, the structures and the
interpolation_order for the analysis.
Args:
r: Root mean square distances between structures
energies: Energies of each structure along reaction coordinate
forces: Tangent forces along the reaction coordinate.
structures ([Structure]): List of Structures along reaction
coordinate.
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.r = np.array(r)
self.energies = np.array(energies)
self.forces = np.array(forces)
self.structures = structures
self.spline_options = spline_options if spline_options is not None \
else {}
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.setup_spline(spline_options=self.spline_options)
def setup_spline(self, spline_options=None):
"""
Setup of the options for the spline interpolation
Args:
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.spline_options = spline_options
relative_energies = self.energies - self.energies[0]
if scipy_old_piecewisepolynomial:
if self.spline_options:
raise RuntimeError('Option for saddle point not available with'
'old scipy implementation')
self.spline = PiecewisePolynomial(
self.r, np.array([relative_energies, -self.forces]).T,
orders=3)
else:
# New scipy implementation for scipy > 0.18.0
if self.spline_options.get('saddle_point', '') == 'zero_slope':
imax = np.argmax(relative_energies)
self.spline = CubicSpline(x=self.r[:imax + 1],
y=relative_energies[:imax + 1],
bc_type=((1, 0.0), (1, 0.0)))
cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
bc_type=((1, 0.0), (1, 0.0)))
self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
else:
self.spline = CubicSpline(x=self.r, y=relative_energies,
bc_type=((1, 0.0), (1, 0.0)))
@classmethod
def from_outcars(cls, outcars, structures, **kwargs):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
forces = np.array(forces)
r = np.array(r)
return cls(r=r, energies=energies, forces=forces,
structures=structures, **kwargs)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(self.r * scale, relative_energies * 1000, 'ro',
x * scale, y, 'k-', linewidth=2, markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % (np.max(y) - np.min(y)),
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i == 0 or i == neb_dirs[-1][0]
if terminal:
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
break
else:
raise ValueError("OUTCAR cannot be found for terminal "
"point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis.from_outcars(outcars, structures, **kwargs)
def as_dict(self):
"""
Dict representation of NEBAnalysis.
Returns:
JSON serializable dict representation.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
'r': jsanitize(self.r),
'energies': jsanitize(self.energies),
'forces': jsanitize(self.forces),
'structures': [s.as_dict() for s in self.structures]}
def combine_neb_plots(neb_analyses, arranged_neb_analyses=False,
reverse_plot=False):
"""
neb_analyses: a list of NEBAnalysis objects
arranged_neb_analyses: The code connects two end points with the
smallest-energy difference. If all end points have very close energies, it's
likely to result in an inaccurate connection. Manually arrange neb_analyses
if the combined plot is not as expected compared with all individual plots.
E.g., if there are two NEBAnalysis objects to combine, arrange in such a
way that the end-point energy of the first NEBAnalysis object is the
start-point energy of the second NEBAnalysis object.
Note that the barrier labeled in y-axis in the combined plot might be
different from that in the individual plot due to the reference energy used.
reverse_plot: reverse the plot or percolation direction.
return: a NEBAnalysis object
"""
x = StructureMatcher()
for neb_index in range(len(neb_analyses)):
if neb_index == 0:
neb1 = neb_analyses[neb_index]
neb1_energies = list(neb1.energies)
neb1_structures = neb1.structures
neb1_forces = neb1.forces
neb1_r = neb1.r
continue
neb2 = neb_analyses[neb_index]
neb2_energies = list(neb2.energies)
matching = 0
for neb1_s in [neb1_structures[0], neb1_structures[-1]]:
if x.fit(neb1_s, neb2.structures[0]) or \
x.fit(neb1_s, neb2.structures[-1]):
matching += 1
break
if matching == 0:
raise ValueError("no matched structures for connection!")
neb1_start_e, neb1_end_e = neb1_energies[0], neb1_energies[-1]
neb2_start_e, neb2_end_e = neb2_energies[0], neb2_energies[-1]
min_e_diff = min(([abs(neb1_start_e - neb2_start_e),
abs(neb1_start_e - neb2_end_e),
abs(neb1_end_e - neb2_start_e),
abs(neb1_end_e - neb2_end_e)]))
if arranged_neb_analyses:
neb1_energies = neb1_energies[0:len(neb1_energies) - 1] \
+ [(neb1_energies[-1] + neb2_energies[0]) / 2] \
+ neb2_energies[
1:]
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = list(neb1_r) + [i + neb1_r[-1] for i in
list(neb2.r)[1:]]
elif abs(neb1_start_e - neb2_start_e) == min_e_diff:
neb1_energies = list(reversed(neb1_energies[1:])) + neb2_energies
neb1_structures = list(
reversed((neb1_structures[1:]))) + neb2.structures
neb1_forces = list(reversed(list(neb1_forces)[1:])) + list(
neb2.forces)
neb1_r = list(reversed(
[i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)[1:]])) + [
i + neb1_r[-1] for i in list(neb2.r)]
elif abs(neb1_start_e - neb2_end_e) == min_e_diff:
neb1_energies = neb2_energies + neb1_energies[1:]
neb1_structures = neb2.structures + neb1_structures[1:]
neb1_forces = list(neb2.forces) + list(neb1_forces)[1:]
neb1_r = [i for i in list(neb2.r)] + \
[i + list(neb2.r)[-1] for i in list(neb1_r)[1:]]
elif abs(neb1_end_e - neb2_start_e) == min_e_diff:
neb1_energies = neb1_energies + neb2_energies[1:]
neb1_structures = neb1_structures + neb2.structures[1:]
neb1_forces = list(neb1_forces) + list(neb2.forces)[1:]
neb1_r = [i for i in list(neb1_r)] + \
[i + neb1_r[-1] for i in list(neb2.r)[1:]]
else:
neb1_energies = neb1_energies + list(reversed(neb2_energies))[1:]
neb1_structures = neb1_structures + list(
reversed((neb2.structures)))[1:]
neb1_forces = list(neb1_forces) + \
list(reversed(list(neb2.forces)))[1:]
neb1_r = list(neb1_r) + list(
reversed([i * -1 - list(neb2.r)[-1] * -1 + list(neb1_r)[-1]
for i in list(neb2.r)[:-1]]))
if reverse_plot:
na = NEBAnalysis(
list(reversed([i * -1 - neb1_r[-1] * -1 for i in list(neb1_r)])),
list(reversed(neb1_energies)),
list(reversed(neb1_forces)), list(reversed(neb1_structures)))
else:
na = NEBAnalysis(neb1_r, neb1_energies, neb1_forces, neb1_structures)
return na
|
{
"content_hash": "8c3976f9454a7f9b05d5110b0b6d9ce3",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 83,
"avg_line_length": 42.53846153846154,
"alnum_prop": 0.5641369655252872,
"repo_name": "czhengsci/pymatgen",
"id": "d6c4bb446bb3652ea37876112c72d99c7138847b",
"size": "17253",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/transition_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6706935"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
"""
cli:
Generic Abstract class for the Stasipy CLI.
Author: Corwin Brown
Date: 05/02/2016
"""
from abc import ABCMeta, abstractmethod
import argparse
class StasipyCLI(object):
__metaclass__ = ABCMeta
_description = None
def __init__(self, args):
"""
Constructor.
Args:
args (list): Args from the command line. I expect this to
basically be: sys.argv[2:]
"""
self.args = args
self.parser = self._setup_base_parser()
def _setup_base_parser(self):
"""
Construct a base parser object that has options that I expect
every subcommand to want access to.
Returns:
ArgumentParser object.
"""
parser = argparse.ArgumentParser(self.description)
parser.add_argument('site_path',
type=str,
metavar='SITE-PATH',
help='The path to the site you wish to reference.')
parser.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='Toggle verbose mode.')
parser.add_argument('-y',
dest='skip_confirm',
action='store_true',
default=False,
help='Answer yes to all confirm dialogs.')
return parser
def parse(self):
"""
Parse out command line args.
"""
self.parsed_args = self.parser.parse_args(self.args)
@property
def description(self):
"""
Get the description of the commandline tool.
"""
if self._description is None:
raise NotImplementedError('Description not implemented.')
return self._description
@abstractmethod
def run(self):
"""
Execute the command line task.
"""
raise NotImplementedError()
|
{
"content_hash": "31aefd0d6ce932d2ba7c63833ba2e0b6",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 26.192307692307693,
"alnum_prop": 0.5090553108174254,
"repo_name": "blakfeld/stasipy",
"id": "a0a0e43915d00717705aea2c58b55544aec733e3",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stasipy/cli/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15112"
}
],
"symlink_target": ""
}
|
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class FilterDatasetTest(test_base.DatasetTestBase):
def testFilterDataset(self):
components = (
np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) * np.arange(
7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7)
)
count = array_ops.placeholder(dtypes.int64, shape=[])
modulus = array_ops.placeholder(dtypes.int64)
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count)
.filter(lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
# Test that we can dynamically feed a different modulus value for each
# iterator.
def do_test(count_val, modulus_val):
sess.run(init_op, feed_dict={count: count_val, modulus: modulus_val})
for _ in range(count_val):
for i in [x for x in range(7) if x**2 % modulus_val == 0]:
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
def testFilterRange(self):
dataset = dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.cached_session() as sess:
self.assertEqual(0, sess.run(get_next))
self.assertEqual(1, sess.run(get_next))
self.assertEqual(3, sess.run(get_next))
def testFilterDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.filter(lambda d: math_ops.equal(d["bar"] % 2, 0))
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
if (i ** 2) % 2 == 0:
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testUseStepContainerInFilter(self):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = functional_ops.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
iterator = (
dataset_ops.Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6]])
.filter(_predicate)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
self.assertAllEqual(input_data[0], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
iterator = (
dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map(
lambda x, i: x).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(5):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _map_fn(i * 2)[0])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testShortCircuit(self):
iterator = (
dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10),
dataset_ops.Dataset.from_tensors(True).repeat(None)))
.filter(lambda x, y: y).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, True), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelFilters(self):
dataset = dataset_ops.Dataset.range(10).filter(
lambda x: math_ops.equal(x % 2, 0))
iterators = [dataset.make_one_shot_iterator() for _ in range(10)]
next_elements = [iterator.get_next() for iterator in iterators]
with self.cached_session() as sess:
self.assertEqual([0 for _ in range(10)], sess.run(next_elements))
class FilterDatasetBenchmark(test.Benchmark):
def _benchmark(self, predicate, name):
with ops.Graph().as_default():
dataset = (
dataset_ops.Dataset.from_tensors(True).repeat(None).filter(predicate))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Filter dataset using %s. Median wall time: %f" %
(name, median_wall_time))
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="benchmark_filter_dataset_%s" % name)
def benchmarkSimpleFunction(self):
self._benchmark(array_ops.identity, "simple_function")
def benchmarkReturnComponentOptimization(self):
self._benchmark(lambda x: x, "return_component")
if __name__ == "__main__":
test.main()
|
{
"content_hash": "cd334e1311160be1ee2470f686aa1f2b",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 80,
"avg_line_length": 35.092233009708735,
"alnum_prop": 0.6309309724719878,
"repo_name": "seanli9jan/tensorflow",
"id": "a0c6b37a6dc0c7f4cec829efb26bec08899b8b34",
"size": "7918",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/filter_dataset_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3301"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "446293"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50950243"
},
{
"name": "CMake",
"bytes": "198845"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285854"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "869263"
},
{
"name": "Jupyter Notebook",
"bytes": "2611125"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "62216"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40335927"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "487251"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import operator
import argparse
import praw
parser = argparse.ArgumentParser()
parser.add_argument("--subreddit", help="which subreddit to use", default="funny")
parser.add_argument("-n","--num", help="How many posts to test", default=10)
parser.add_argument("--type", help="What type of posts, hot/new/top", default='hot')
args = parser.parse_args()
glob_words = {}
def gather_words(text,score):
global glob_words
words = text.split(' ')
for word in words:
if word in glob_words.keys():
glob_words[word]+= score/len(words)
else:
glob_words[word] = score/len(words)
r = praw.Reddit(user_agent='my_cool_application')
if args.type == 'new':
submissions = r.get_subreddit(args.subreddit).get_new(limit=int(args.num))
elif args.type == 'top':
submissions = r.get_subreddit(args.subreddit).get_top(limit=int(args.num))
else:
submissions = r.get_subreddit(args.subreddit).get_hot(limit=int(args.num))
for x in submissions:
score = x.score
text = x.selftext
gather_words(text,score)
sorted_x = sorted(glob_words.items(), key=operator.itemgetter(1))
print sorted_x[-20:]
|
{
"content_hash": "37512c3fc6175cce56e46f1815221733",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 28.725,
"alnum_prop": 0.6753698868581375,
"repo_name": "dannyvai/reddit_crawlers",
"id": "9914119f6c847b86a40205f02712382c72289eb3",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "most_used_words_subreddit.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "4455"
},
{
"name": "HTML",
"bytes": "9432"
},
{
"name": "JavaScript",
"bytes": "8894"
},
{
"name": "Python",
"bytes": "28944"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
'''
Windows auditpol audit module
'''
from __future__ import absolute_import
import copy
import csv
import fnmatch
import logging
import salt.utils
import salt.utils.platform
log = logging.getLogger(__name__)
__virtualname__ = 'win_auditpol'
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This audit module only runs on windows'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get(__virtualname__, {}):
labelled_test_cases=[]
for test_case in __data__[__virtualname__].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data[__virtualname__][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Runs auditpol on the local machine and audits the return data
with the CIS yaml processed by __virtual__
'''
__data__ = {}
__auditdata__ = _auditpol_import()
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('auditpol audit __data__:')
log.debug(__data__)
log.debug('auditpol audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data['name']
audit_type = tag_data['type']
match_output = tag_data['match_output'].lower()
# Blacklisted audit (do not include)
if 'blacklist' in audit_type:
if name not in __auditdata__:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Value of balcklisted attribute '{0}' is " \
"configured on your system. It should not " \
"be configured".format(name)
ret['Failure'].append(tag_data)
# Whitelisted audit (must include)
if 'whitelist' in audit_type:
if name in __auditdata__:
audit_value = __auditdata__[name].lower()
tag_data['found_value'] = audit_value
secret = _translate_value_type(audit_value, tag_data['value_type'], match_output)
if secret:
ret['Success'].append(tag_data)
else:
tag_data['failure_reason'] = "Value of attribute '{0}' is currently" \
" set as '{1}'. Expected value is '{2}({3})'" \
.format(name,
audit_value,
match_output,
tag_data['value_type'])
ret['Failure'].append(tag_data)
else:
log.debug('When trying to audit the advanced auditpol section,'
' the yaml contained incorrect data for the key')
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the secedit:blacklist and
secedit:whitelist level
'''
if __virtualname__ not in ret:
ret[__virtualname__] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get(__virtualname__, {}):
if topkey not in ret[__virtualname__]:
ret[__virtualname__][topkey] = []
for key, val in data[__virtualname__][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret[__virtualname__][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfullname')
for toplist, toplevel in data.get(__virtualname__, {}).iteritems():
# secedit:whitelist
for audit_dict in toplevel:
for audit_id, audit_data in audit_dict.iteritems():
# secedit:whitelist:PasswordComplexity
tags_dict = audit_data.get('data', {})
# secedit:whitelist:PasswordComplexity:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# secedit:whitelist:PasswordComplexity:data:Windows 2012
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'win_auditpol',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _auditpol_export():
try:
dump = __salt__['cmd.run']('auditpol /get /category:* /r')
if dump:
dump = dump.split('\n')
return dump
else:
log.error('Nothing was returned from the auditpol command.')
except StandardError:
log.error('An error occurred running the auditpol command.')
def _auditpol_import():
dict_return = {}
export = _auditpol_export()
auditpol_csv = csv.DictReader(export)
for row in auditpol_csv:
if row:
dict_return[row['Subcategory']] = row['Inclusion Setting']
return dict_return
def _translate_value_type(current, value, evaluator):
if 'equal' in value:
if current == evaluator:
return True
else:
return False
|
{
"content_hash": "468a62ba5233c7303858427434de3023",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 146,
"avg_line_length": 40.48039215686274,
"alnum_prop": 0.48050375393557765,
"repo_name": "HubbleStack/Hubble",
"id": "b46c4a6c9884d194513ca74b178c11006a1102a5",
"size": "8284",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "hubblestack/files/hubblestack_nova/win_auditpol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from setuptools import setup
import swiftly
setup(
name='swiftly', version=swiftly.VERSION, description='Client for Swift',
author='Gregory Holt', author_email='swiftly@brim.net',
url='http://gholt.github.com/swiftly/',
packages=['swiftly', 'swiftly.cli', 'swiftly.client'],
scripts=['bin/swiftly'], install_requires=['six'])
|
{
"content_hash": "21272dfac8889397997927f790e71ed1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.6962750716332379,
"repo_name": "dpgoetz/swiftly",
"id": "4ae113f6827e5a116fccc6194c9dd24f4a81e746",
"size": "948",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "291131"
}
],
"symlink_target": ""
}
|
import argparse
import binascii
import getpass
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import king_phisher.color as color
import king_phisher.find as find
import king_phisher.security_keys as security_keys
import king_phisher.serializers as serializers
import king_phisher.utilities as utilities
import ecdsa
def action_display(arguments):
if arguments.file is None:
print('must specify a key file to display')
return
signing_key = security_keys.SigningKey.from_file(
arguments.file,
password=(getpass.getpass('password: ') if arguments.file.endswith('.enc') else None)
)
print('Key Information:')
print('ID: ' + signing_key.id)
print('Curve: ' + signing_key.curve.name)
def action_generate(arguments):
curve = ecdsa.NIST521p
color.print_status('generating a new ecdsa singing key')
signing_key = ecdsa.SigningKey.generate(curve=curve)
verifying_key = signing_key.get_verifying_key()
signing_key = binascii.b2a_base64(signing_key.to_string()).decode('utf-8').strip()
verifying_key = binascii.b2a_base64(verifying_key.to_string()).decode('utf-8').strip()
print('public key information for inclusion in security.json:')
key_info = {
'id': arguments.id,
'verifying-key': {
'data': verifying_key,
'type': curve.openssl_name
}
}
print(serializers.JSON.dumps(key_info))
key_info['signing-key'] = {
'data': signing_key,
'type': curve.openssl_name
}
serializers.JSON.dump(key_info, arguments.file)
def main():
parser = argparse.ArgumentParser(description='King Phisher Signing-Key Generation Utility', conflict_handler='resolve')
utilities.argp_add_args(parser)
subparsers = parser.add_subparsers(dest='subcommand')
subparsers.required = True
parser_display = subparsers.add_parser('display')
parser_display.set_defaults(action=action_display)
parser_display.add_argument('file', default=os.getenv('KING_PHISHER_DEV_KEY'), nargs='?', help='the key file to display')
parser_generate = subparsers.add_parser('generate')
parser_generate.set_defaults(action=action_generate)
parser_generate.add_argument('id', help='this key\'s identifier')
parser_generate.add_argument('file', type=argparse.FileType('w'), help='the destination to write the key to')
arguments = parser.parse_args()
find.init_data_path()
arguments.action(arguments)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "c8af6d0400209a394dc7668a45da8aa6",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 122,
"avg_line_length": 32.554054054054056,
"alnum_prop": 0.7359900373599004,
"repo_name": "wolfthefallen/king-phisher",
"id": "fa32172098561488b1d39e05755bda4d750f380b",
"size": "3990",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/development/key_tool.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1680"
},
{
"name": "CSS",
"bytes": "10384"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "HTML",
"bytes": "25790"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Jupyter Notebook",
"bytes": "11394"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "1384398"
},
{
"name": "Ruby",
"bytes": "7629"
},
{
"name": "SCSS",
"bytes": "12455"
},
{
"name": "Shell",
"bytes": "28115"
}
],
"symlink_target": ""
}
|
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='queue',
name='difficulty',
field=models.PositiveSmallIntegerField(default=3, validators=[django.core.validators.MaxValueValidator(5)]),
),
migrations.AddField(
model_name='queue',
name='out_of_order',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "495821aa2d4c08e415881d84eb0eb0ee",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 120,
"avg_line_length": 26.863636363636363,
"alnum_prop": 0.6040609137055838,
"repo_name": "hackerspace-ntnu/website",
"id": "983c998d6eed2ba9330d2e0f88237c51419154a1",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reservations/migrations/0002_auto_20191030_1851.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
}
|
import requests
import json
def retrieveCnnNews(search, num, filepath):
r = requests.get("https://search.api.cnn.io/content?q=" + search + "&size=" + str(num) + "")
response = r.json()["result"]
with open(filepath, 'a') as outfile:
print("CRAWLING RESULT")
for newsitem in response:
aux = dict()
aux["url"] = newsitem["url"]
aux["headline"] = newsitem["headline"]
print(aux)
json.dump(aux, outfile)
outfile.write('\n')
|
{
"content_hash": "aa1416ada32616e54a3b042dbdf645d4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 26.72222222222222,
"alnum_prop": 0.6008316008316008,
"repo_name": "gsi-upm/gsicrawler",
"id": "ea655f320797212a7a6a82977d6d98775a2d1113",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/tutorial2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6390"
},
{
"name": "HTML",
"bytes": "64958"
},
{
"name": "Makefile",
"bytes": "9531"
},
{
"name": "Python",
"bytes": "60416"
},
{
"name": "Roff",
"bytes": "296"
},
{
"name": "Ruby",
"bytes": "166977"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
import rhc.tcpsocket as network
PORT = 12345
class EchoServer(network.BasicHandler):
def on_data(self, data):
self.send(data) # step 2: on server recv, send data back
class EchoClient(network.BasicHandler):
def on_ready(self):
self.test_data = b'test_data'
self.send(self.test_data) # step 1: on client connect, send data
def on_data(self, data): # step 3: on client recv, assert and close
assert data == self.test_data
self.close()
def test_echo():
n = network.Server()
n.add_server(PORT, EchoServer)
c = n.add_connection(('localhost', PORT), EchoClient)
while c.is_open: # keep going until the client closes
n.service()
n.close()
|
{
"content_hash": "b4ea1dcbda0b42cf70628bbe0aa52fa1",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 77,
"avg_line_length": 24.733333333333334,
"alnum_prop": 0.628032345013477,
"repo_name": "robertchase/rhc",
"id": "05bf659feadf114bbb99acb29b3572f4872a742e",
"size": "742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_echo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "686"
},
{
"name": "Jupyter Notebook",
"bytes": "42516"
},
{
"name": "Makefile",
"bytes": "333"
},
{
"name": "Python",
"bytes": "236043"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "TSQL",
"bytes": "833"
}
],
"symlink_target": ""
}
|
"""The service for gating features.
This service provides different interfaces to access the feature flag values
for clients and the backend respectively as they have different context for
evaluation of feature flag values.
For clients, please use 'evaluate_all_feature_flag_values_for_client' from
request handlers with client context.
For the backend, please directly call 'is_feature_enabled' with the name of
the feature.
For more details of the usage of these two methods, please refer their
docstrings in this file.
"""
from __future__ import annotations
from core import platform_feature_list
from core.constants import constants
from core.domain import platform_parameter_domain
from core.domain import platform_parameter_registry as registry
ALL_FEATURES_LIST = (
platform_feature_list.DEV_FEATURES_LIST +
platform_feature_list.TEST_FEATURES_LIST +
platform_feature_list.PROD_FEATURES_LIST
)
ALL_FEATURES_NAMES_SET = set(feature.value for feature in ALL_FEATURES_LIST)
class FeatureFlagNotFoundException(Exception):
"""Exception thrown when an unknown feature flag is requested."""
pass
def create_evaluation_context_for_client(client_context_dict):
"""Returns context instance for evaluation, using the information
provided by clients.
Args:
client_context_dict: dict. The client side context.
Returns:
EvaluationContext. The context for evaluation.
"""
return platform_parameter_domain.EvaluationContext.from_dict(
client_context_dict,
{
'server_mode': _get_server_mode()
}
)
def get_all_feature_flag_dicts():
"""Returns dict representations of all feature flags. This method is used
for providing detailed feature flags information to the admin panel.
Returns:
list(dict). A list containing the dict mappings of all fields of the
feature flags.
"""
return [
registry.Registry.get_platform_parameter(_feature.value).to_dict()
for _feature in ALL_FEATURES_LIST
]
def evaluate_all_feature_flag_values_for_client(context):
"""Evaluates and returns the values for all feature flags.
Args:
context: EvaluationContext. The context used for evaluation.
Returns:
dict. The keys are the feature names and the values are boolean
results of corresponding flags.
"""
return _evaluate_feature_flag_values_for_context(
ALL_FEATURES_NAMES_SET, context)
def is_feature_enabled(feature_name):
"""A short-form method for server-side usage. This method evaluates and
returns the values of the feature flag, using context from the server only.
Args:
feature_name: str. The name of the feature flag that needs to
be evaluated.
Returns:
bool. The value of the feature flag, True if it's enabled.
"""
return _evaluate_feature_flag_value_for_server(feature_name)
def update_feature_flag_rules(
feature_name, committer_id, commit_message, new_rules
):
"""Updates the feature flag's rules.
Args:
feature_name: str. The name of the feature to update.
committer_id: str. ID of the committer.
commit_message: str. The commit message.
new_rules: list(PlatformParameterRule). A list of PlatformParameterRule
objects to update.
Raises:
FeatureFlagNotFoundException. The feature_name is not registered in
core/platform_feature_list.py.
"""
if feature_name not in ALL_FEATURES_NAMES_SET:
raise FeatureFlagNotFoundException(
'Unknown feature flag: %s.' % feature_name)
registry.Registry.update_platform_parameter(
feature_name, committer_id, commit_message, new_rules)
# TODO(#10211): Currently Oppia runs in either of the two modes:
# dev or prod. There should be another mode 'test' added for QA testing,
# once it is added, this function needs to be updated to take that into
# consideration.
def _get_server_mode():
"""Returns the running mode of Oppia.
Returns:
Enum(SERVER_MODES). The server mode of Oppia, dev if Oppia is running
in development mode, prod if in production mode.
"""
return (
platform_parameter_domain.ServerModes.DEV
if constants.DEV_MODE
else platform_parameter_domain.ServerModes.PROD
)
def _create_evaluation_context_for_server():
"""Returns evaluation context with information of the server.
Returns:
EvaluationContext. The context for evaluation.
"""
# TODO(#11208): Properly set app version below using GAE app version as
# part of the server & client context.
return platform_parameter_domain.EvaluationContext.from_dict(
{
'platform_type': 'Backend',
'app_version': None,
},
{
'server_mode': _get_server_mode()
}
)
def _evaluate_feature_flag_values_for_context(feature_names_set, context):
"""Evaluates and returns the values for specified feature flags.
Args:
feature_names_set: set(str). The set of names of feature flags that need
to be evaluated.
context: EvaluationContext. The context used for evaluation.
Returns:
dict. The keys are the feature names and the values are boolean
results of corresponding flags.
Raises:
FeatureFlagNotFoundException. Some names in 'feature_names_set' are not
registered in core/platform_feature_list.py.
"""
unknown_feature_names = list(feature_names_set - ALL_FEATURES_NAMES_SET)
if len(unknown_feature_names) > 0:
raise FeatureFlagNotFoundException(
'Unknown feature flag(s): %s.' % unknown_feature_names)
result_dict = {}
for feature_name in feature_names_set:
param = registry.Registry.get_platform_parameter(
feature_name)
result_dict[feature_name] = param.evaluate(context)
return result_dict
def _evaluate_feature_flag_value_for_server(feature_name):
"""Evaluates and returns the values of the feature flag, using context
from the server only.
Args:
feature_name: str. The name of the feature flag that needs to
be evaluated.
Returns:
bool. The value of the feature flag, True if it's enabled.
"""
context = _create_evaluation_context_for_server()
values_dict = _evaluate_feature_flag_values_for_context(
set([feature_name]), context)
return values_dict[feature_name]
|
{
"content_hash": "85a06a35f988b33b32435468eb974b9a",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.17733990147783,
"alnum_prop": 0.6887630128597673,
"repo_name": "brianrodri/oppia",
"id": "571ceee976b74a153bb50bf7932ea4d926a8464a",
"size": "7155",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/platform_feature_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "487903"
},
{
"name": "HTML",
"bytes": "1748056"
},
{
"name": "JavaScript",
"bytes": "1176446"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "14169091"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13316709"
}
],
"symlink_target": ""
}
|
# Modify the print statement according to
# the mini-project instructions
print "We want... a shrubbery!"
|
{
"content_hash": "125470e157c4b823ff58532127b1c2d1",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 22.4,
"alnum_prop": 0.7232142857142857,
"repo_name": "mr-ice/pipython",
"id": "8c9dc3ce5e48c8291e6679bc6d476fedade4447a",
"size": "151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InteractiveProgramming/user39_VpaZAK7n4p_0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71641"
}
],
"symlink_target": ""
}
|
from decimal import Decimal as D
from django.test import TestCase
from accounts.checkout.allocation import Allocations
class TestAllocations(TestCase):
def setUp(self):
self.allocations = Allocations()
def test_have_default_total_of_zero(self):
self.assertEqual(D('0.00'), self.allocations.total)
def test_has_items_interface(self):
self.allocations.add('A', D('10.00'))
for code, amount in self.allocations.items():
self.assertEqual('A', code)
self.assertEqual(D('10.00'), amount)
def test_allow_items_to_be_removed(self):
self.allocations.add('A', D('10.00'))
self.assertEqual(D('10.00'), self.allocations.total)
self.allocations.remove('A')
self.assertEqual(D('0.00'), self.allocations.total)
|
{
"content_hash": "ae9fe57fe6b43d99b8c4ff3b63ce7e91",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 60,
"avg_line_length": 29.962962962962962,
"alnum_prop": 0.6551297898640297,
"repo_name": "carver/django-account-balances",
"id": "485cc3a2450501b152eac9c99a063fd076c17851",
"size": "809",
"binary": false,
"copies": "6",
"ref": "refs/heads/remove-oscar",
"path": "tests/unit/allocation_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "149608"
}
],
"symlink_target": ""
}
|
"""Test result objects."""
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'TestResult',
'TextTestResult',
'ThreadsafeForwardingResult',
]
from testtools.testresult.real import (
ExtendedToOriginalDecorator,
MultiTestResult,
TestResult,
TextTestResult,
ThreadsafeForwardingResult,
)
|
{
"content_hash": "52ecf9719eae9fd0f83616e3acd6b20e",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 20.41176470588235,
"alnum_prop": 0.6829971181556196,
"repo_name": "postfix/pth-toolkit",
"id": "19f88bc8a34e77c0c7ff3b424d7da9f4a6325ddc",
"size": "416",
"binary": false,
"copies": "50",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/samba/external/testtools/testresult/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "5113"
},
{
"name": "Python",
"bytes": "1294855"
},
{
"name": "Shell",
"bytes": "1105"
}
],
"symlink_target": ""
}
|
"""
Metric index of a reference database
"""
import scripts
from amquery.core.distance import distances, FFP_JSD
from amquery.core.preprocessing.kmer_counter import KmerCounter
from amquery.core.sample import Sample
from amquery.core.storage import VpTree
from amquery.utils.config import get_sample_dir
class ReferenceTree:
def __init__(self, distance, preprocessor, storage, reference_files):
"""
:param distance: SampleDistance
:param preprocessor: Preprocessor
:param storage: MetricIndexStorage
"""
self._distance = distance
self._preprocessor = preprocessor
self._storage = storage
self._reference_files = reference_files
@staticmethod
def create(database_config):
"""
:param database_config: dict
:return: ReferenceTree
"""
assert 'kmer_size' in database_config
assert 'rep_tree' in database_config
kmer_size = database_config['kmer_size']
reference_files = database_config['rep_tree']
distance = distances[FFP_JSD]
preprocessor = KmerCounter(kmer_size)
storage = VpTree()
return ReferenceTree(distance, preprocessor, storage, reference_files)
@staticmethod
def load(database_config):
"""
:param database_config: dict
:return: ReferenceTree
"""
return None
def build(self, input_file):
"""
:param input_file: str
:return: None
"""
sample_files = scripts.split_fasta(input_file, get_sample_dir())
samples = [Sample(sample_file) for sample_file in sample_files]
processed_samples = [self._preprocessor(sample) for sample in samples]
self._distance.add_samples(processed_samples)
self._storage.build(self._distance, processed_samples)
|
{
"content_hash": "1c5c59d66c71a291c1d7c9cdd64947df",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 30.360655737704917,
"alnum_prop": 0.6490280777537797,
"repo_name": "arriam-lab2/amquery",
"id": "a352580a654e922c33089b1e742f609159c5d374",
"size": "1852",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amquery/core/refindex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3789"
},
{
"name": "Python",
"bytes": "52699"
}
],
"symlink_target": ""
}
|
bind = "0.0.0.0:8080"
workers = 3
daemon = False
loglevel = "debug"
proc_name = "Residency"
pidfile = "/tmp/Residency.pid"
worker_class = "gevent"
debug = True
django_settings = "Residency.settings"
|
{
"content_hash": "e8102e05e34a59db691ee579d5731718",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 22.11111111111111,
"alnum_prop": 0.7035175879396985,
"repo_name": "Kbman99/CodeWarriors",
"id": "0de252095cd2475ce8c7a3eb368e1ec28ee35c0d",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/gunicorn/local.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "319"
},
{
"name": "HTML",
"bytes": "2392"
},
{
"name": "JavaScript",
"bytes": "217"
},
{
"name": "Python",
"bytes": "13444"
},
{
"name": "Ruby",
"bytes": "165"
},
{
"name": "Shell",
"bytes": "7384"
}
],
"symlink_target": ""
}
|
import bz2
import collections
import os
import re
import tarfile
import time
try:
import lzma
except ImportError:
lzma = None
import arpy
import six
from .utils import gzip_compress
from .version import __version__
class AptMeta(collections.OrderedDict):
def __init__(self, data):
super(AptMeta, self).__init__()
self._data = data # For testing/debugging
last_key = None
for line in data.splitlines() if hasattr(data, 'splitlines') else data.readlines():
line = line.rstrip('\n')
if line[0].isspace():
if last_key:
self[last_key] += '\n' + line
else:
raise ValueError('Can not parse line: {0}'.format(repr(line)))
else:
last_key, value = line.split(':', 1)
value = value.lstrip(' ')
self[last_key] = value
def __str__(self):
return '\n'.join('{0}:{1}{2}'.format(key, '' if value[0] == '\n' else ' ', value) for key, value in six.iteritems(self) if value)
class AptPackage(AptMeta):
def __init__(self, filename, fileobj=None, data=None, pool_path=None):
self.name = filename
self._pool_path = pool_path # Used for manual pool path overrides
if not data:
if fileobj:
self.ar = arpy.Archive(filename or getattr(fileobj, 'name', None), fileobj)
else:
self.ar = arpy.Archive(filename)
self.ar.read_all_headers()
self.control_tar = tarfile.open('control.tar.gz', 'r:gz', fileobj=self.ar.archived_files['control.tar.gz'])
data = self.control_tar.extractfile('./control')
super(AptPackage, self).__init__(data)
@property
def pool_path(self):
return self._pool_path or self['Filename'] or 'pool/{}'.format(self.name)
class AptPackages(object):
def __init__(self, storage, data):
self.storage = storage
self.packages = {}
self._data = data # For testing/debugging
for buf in data.split('\n\n'):
if not buf.strip():
continue
pkg = AptPackage(None, data=buf)
self.packages[(pkg['Package'], pkg['Version'])] = pkg
def add(self, pkg):
hashes = self.storage.hashes(pkg.pool_path)
pkg['Filename'] = pkg.pool_path
pkg['Size'] = str(hashes['size'].size)
pkg['MD5sum'] = hashes['md5'].hexdigest()
pkg['SHA1'] = hashes['sha1'].hexdigest()
pkg['SHA256'] = hashes['sha256'].hexdigest()
self.packages[(pkg['Package'], pkg['Version'])] = pkg
def __str__(self, extra_fn=None):
return '\n\n'.join(str(pkg) for key, pkg in sorted(six.iteritems(self.packages), key=lambda k: k[0]))
class AptRelease(AptMeta):
def __init__(self, storage, codename, *args, **kwargs):
self.storage = storage
self.codename = codename
super(AptRelease, self).__init__(*args, **kwargs)
if 'Components' not in self:
# Need to setup some defaults
self['Origin'] = 'Depot {0}'.format(__version__)
self['Date'] = '' # Will be regenerated, but lock the order
self['Codename'] = self.codename
# These are filled in using add_metadata()
self['Architectures'] = ''
self['Components'] = ''
self.hashes = {
'md5': self._parse_hashes('MD5Sum'),
'sha1': self._parse_hashes('SHA1'),
'sha256': self._parse_hashes('SHA256'),
}
def _parse_hashes(self, key):
hashes = collections.OrderedDict()
if key in self:
for line in self[key].splitlines():
line = line.strip()
if not line:
continue
hash, size, path = line.split(' ', 2)
hashes[path] = (hash, size)
return hashes
def _compile_hashes(self, key):
return '\n' + '\n'.join(' {0} {1} {2}'.format(hash, size, path) for path, (hash, size) in six.iteritems(self.hashes[key]))
def update_hash(self, path):
hashes = self.storage.hashes('dists/{0}/{1}'.format(self.codename, path))
for hash_type in list(six.iterkeys(self.hashes)):
self.hashes[hash_type][path] = (hashes[hash_type].hexdigest(), str(hashes['size'].size))
def add_metadata(self, component, architecture):
components = set(s for s in self['Components'].split() if s)
components.add(component)
self['Components'] = ' '.join(sorted(components))
architectures = set(s for s in self['Architectures'].split() if s)
architectures.add(architecture)
self['Architectures'] = ' '.join(sorted(architectures))
def __str__(self):
self['MD5Sum'] = self._compile_hashes('md5')
self['SHA1'] = self._compile_hashes('sha1')
self['SHA256'] = self._compile_hashes('sha256')
if not self.get('Date'):
now = time.gmtime()
# The debian standard (Policy 4.4) really does specify the English labels
day_of_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now.tm_wday]
month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now.tm_mon - 1]
self['Date'] = time.strftime('{0}, %d {1} %Y %H:%M:%S UTC'.format(day_of_week, month), now)
return super(AptRelease, self).__str__()
class AptRepository(object):
# ex. mypgk@1.0
COPY_SPEC_RE = re.compile(r'^([\w_-]+)@(.+?)$')
def __init__(self, storage, gpg, codename, component='main', architecture=None):
self.storage = storage
self.gpg = gpg
self.codename = codename
self.component = component
self.architecture = architecture
self.dirty_packages = {} # arch: [pkg,+]
self.dirty_sources = False
def add_package(self, path, fileobj=None, force=False, pool_path=None):
fileobj = fileobj or open(path, 'rb')
path = os.path.basename(path)
pkg = AptPackage(path, fileobj, pool_path=pool_path)
# Check that we have an arch if needed
arch = pkg['Architecture']
if pkg['Architecture'] == 'any':
arch = self.architecture
if not arch:
raise ValueError('Architechture required when adding packages for "any"')
# Check that the package doesn't already exist
if not force and pkg.pool_path in self.storage:
return False
# Stream up the actual package file
fileobj.seek(0, 0)
self.storage.upload(pkg.pool_path, fileobj)
self.dirty_packages.setdefault(arch, []).append(pkg)
return True
def copy_package(self, package):
md = self.COPY_SPEC_RE.match(package)
if not md:
raise ValueError('Unable to parse {0)'.format(repr(package)))
#package_name = md.group(1)
#package_version = md.group(2)
#package_component = md.group(3)
raise NotImplementedError('TODO finish this')
def commit_package_metadata(self, arch, pkgs):
# Update the Packages file
packages_path = 'dists/{0}/{1}/binary-{2}/Packages'.format(self.codename, self.component, arch)
packages = AptPackages(self.storage, self.storage.download(packages_path, skip_hash=True) or '')
for pkg in pkgs:
packages.add(pkg)
packages_raw = str(packages)
self.storage.upload(packages_path, packages_raw)
self.storage.upload(packages_path+'.gz', gzip_compress(packages_raw))
self.storage.upload(packages_path+'.bz2', bz2.compress(packages_raw))
if lzma:
self.storage.upload(packages_path+'.lzma', lzma.compress(packages_raw))
def commit_sources_metadata(self):
# Update the Sources file
sources_path = 'dists/{0}/{1}/source/Sources'.format(self.codename, self.component)
if sources_path in self.storage:
return
sources_content = ''
self.storage.upload(sources_path, sources_content)
self.storage.upload(sources_path+'.gz', gzip_compress(sources_content))
self.storage.upload(sources_path+'.bz2', bz2.compress(sources_content))
if lzma:
self.storage.upload(sources_path+'.lzma', lzma.compress(sources_content))
self.dirty_sources = True
def commit_release_metadata(self, archs):
# Update Release
release_path = 'dists/{0}/Release'.format(self.codename)
release = AptRelease(self.storage, self.codename, self.storage.download(release_path, skip_hash=True) or '')
for arch in archs:
release.add_metadata(self.component, arch)
release_packages_path = '{0}/binary-{1}/Packages'.format(self.component, arch)
release.update_hash(release_packages_path)
release.update_hash(release_packages_path+'.gz')
release.update_hash(release_packages_path+'.bz2')
if lzma:
release.update_hash(release_packages_path+'.lzma')
if self.dirty_sources:
release_sources_path = '{0}/source/Sources'.format(self.component)
release.update_hash(release_sources_path)
release.update_hash(release_sources_path+'.gz')
release.update_hash(release_sources_path+'.bz2')
if lzma:
release.update_hash(release_sources_path+'.lzma')
self.dirty_sources = False
# Force the date to regenerate
release['Date'] = None
release_raw = str(release)
self.storage.upload(release_path, release_raw)
# GPG signing
if self.gpg:
# Fun fact, even debian's own tools don't seem to support this InRelease file
in_release_path = 'dists/{0}/InRelease'.format(self.codename)
self.storage.upload(in_release_path, self.gpg.sign(release_raw))
self.storage.upload(release_path+'.gpg', self.gpg.sign(release_raw, detach=True))
# Upload the pubkey to be nice
self.storage.upload('pubkey.gpg', self.gpg.public_key())
def commit_metadata(self):
for arch, packages in six.iteritems(self.dirty_packages):
self.commit_package_metadata(arch, packages)
self.commit_sources_metadata()
self.commit_release_metadata(six.iterkeys(self.dirty_packages))
self.dirty_packages = {}
|
{
"content_hash": "0d92a217fb9c17f43ef3dbf8161fb255",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 137,
"avg_line_length": 41.3399209486166,
"alnum_prop": 0.5882015489052491,
"repo_name": "coderanger/depot",
"id": "52059a94f8a69484ff6c1b989d5fe2ab42e7c873",
"size": "10473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depot/apt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48821"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'aino-utkik'
copyright = u'2011, Mikko Hellsing'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.1'
# The full version, including alpha/beta/rc tags.
release = '0.7.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aino-utkikdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'aino-utkik.tex', u'aino-utkik Documentation',
u'Mikko Hellsing', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aino-utkik', u'aino-utkik Documentation',
[u'Mikko Hellsing'], 1)
]
|
{
"content_hash": "60b4b676aeaac058db4a395d5d8e714a",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.62068965517241,
"alnum_prop": 0.7053760193295077,
"repo_name": "aino/aino-utkik",
"id": "bf7765e8a28e4eafc676de8356ca60f0d5c3218c",
"size": "7043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18969"
}
],
"symlink_target": ""
}
|
'''
@author: Frank
'''
import ConfigParser
class Parser(ConfigParser.SafeConfigParser):
def get(self, section, option, default=None):
try:
return ConfigParser.SafeConfigParser.get(self, section, option)
except ConfigParser.NoOptionError:
return default
|
{
"content_hash": "a955360817737be290bfa8171ba6b1b4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 25.75,
"alnum_prop": 0.656957928802589,
"repo_name": "SoftwareKing/zstack-utility",
"id": "6c1255e72674dce6a5b02f66098c82e62e6fa104",
"size": "309",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zstacklib/zstacklib/utils/misc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "Puppet",
"bytes": "10604"
},
{
"name": "Python",
"bytes": "1379076"
},
{
"name": "Shell",
"bytes": "116426"
}
],
"symlink_target": ""
}
|
import optparse
import os
import pprint
import re
import shlex
import subprocess
import sys
import shutil
import string
# gcc and g++ as defaults matches what GYP's Makefile generator does,
# except on OS X.
CC = os.environ.get('CC', 'cc' if sys.platform == 'darwin' else 'gcc')
CXX = os.environ.get('CXX', 'c++' if sys.platform == 'darwin' else 'g++')
root_dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(root_dir, 'tools', 'gyp', 'pylib'))
from gyp.common import GetFlavor
# parse our options
parser = optparse.OptionParser()
valid_os = ('win', 'mac', 'solaris', 'freebsd', 'openbsd', 'linux', 'android')
valid_arch = ('arm', 'arm64', 'ia32', 'mips', 'mipsel', 'x32', 'x64', 'x86')
valid_arm_float_abi = ('soft', 'softfp', 'hard')
valid_mips_arch = ('loongson', 'r1', 'r2', 'r6', 'rx')
valid_mips_fpu = ('fp32', 'fp64', 'fpxx')
valid_mips_float_abi = ('soft', 'hard')
valid_intl_modes = ('none', 'small-icu', 'full-icu', 'system-icu')
parser.add_option('--debug',
action='store_true',
dest='debug',
help='also build debug build')
parser.add_option('--dest-cpu',
action='store',
dest='dest_cpu',
choices=valid_arch,
help='CPU architecture to build for ({0})'.format(', '.join(valid_arch)))
parser.add_option('--xcode',
action='store_true',
dest='use_xcode',
help='Generate xcode files')
parser.add_option('--dest-os',
action='store',
dest='dest_os',
choices=valid_os,
help='operating system to build for ({0})'.format(', '.join(valid_os)))
(options, args) = parser.parse_args()
def warn(msg):
warn.warned = True
prefix = '\033[1m\033[93mWARNING\033[0m' if os.isatty(1) else 'WARNING'
print('%s: %s' % (prefix, msg))
# track if warnings occured
warn.warned = False
def b(value):
"""Returns the string 'true' if value is truthy, 'false' otherwise."""
if value:
return 'true'
else:
return 'false'
def cc_macros():
"""Checks predefined macros using the CC command."""
try:
p = subprocess.Popen(shlex.split(CC) + ['-dM', '-E', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
print '''jc3-handling-editor configure error: No acceptable C compiler found!
Please make sure you have a C compiler installed on your system and/or
consider adjusting the CC environment variable if you installed
it in a non-standard prefix.
'''
sys.exit()
p.stdin.write('\n')
out = p.communicate()[0]
out = str(out).split('\n')
k = {}
for line in out:
lst = shlex.split(line)
if len(lst) > 2:
key = lst[1]
val = lst[2]
k[key] = val
return k
def host_arch_cc():
"""Host architecture check using the CC command."""
k = cc_macros()
matchup = {
'__aarch64__' : 'arm64',
'__i386__' : 'ia32',
'__x86_64__' : 'x64',
}
rtn = 'ia32' # default
for i in matchup:
if i in k and k[i] != '0':
rtn = matchup[i]
break
return rtn
def host_arch_win():
"""Host architecture check using environ vars (better way to do this?)"""
observed_arch = os.environ.get('PROCESSOR_ARCHITECTURE', 'x86')
arch = os.environ.get('PROCESSOR_ARCHITEW6432', observed_arch)
matchup = {
'AMD64' : 'x64',
'x86' : 'ia32',
'arm' : 'arm',
'mips' : 'mips',
}
return matchup.get(arch, 'ia32')
def write(filename, data):
filename = os.path.join(root_dir, filename)
print 'creating ', filename
f = open(filename, 'w+')
f.write(data)
do_not_edit = '# Do not edit. Generated by the configure script.\n'
output = {
'variables': { 'python': sys.executable,
'deps_path': os.path.relpath('deps/') },
'include_dirs': [],
'libraries': [],
'defines': [],
'cflags': [],
}
host_arch = 'x64'
target_arch = options.dest_cpu or host_arch
# ia32 is preferred by the build tools (GYP) over x86 even if we prefer the latter
# the Makefile resets this to x86 afterward
if target_arch == 'x86':
target_arch = 'ia32'
output['variables']['host_arch'] = host_arch
output['variables']['target_arch'] = target_arch
# Should we add a compiler check here?
# determine the "flavor" (operating system) we're building for,
# leveraging gyp's GetFlavor function
flavor_params = {}
if (options.dest_os):
flavor_params['flavor'] = options.dest_os
flavor = GetFlavor(flavor_params)
# variables should be a root level element,
# move everything else to target_defaults
variables = output['variables']
del output['variables']
# make_global_settings should be a root level element too
if 'make_global_settings' in output:
make_global_settings = output['make_global_settings']
del output['make_global_settings']
else:
make_global_settings = False
output = {
'variables': variables,
'target_defaults': output,
}
if make_global_settings:
output['make_global_settings'] = make_global_settings
pprint.pprint(output, indent=2)
write('config.gypi', do_not_edit +
pprint.pformat(output, indent=2) + '\n')
config = {
'BUILDTYPE': 'Debug' if options.debug else 'Release',
# 'USE_XCODE': str(int(options.use_xcode or 0)),
'PYTHON': sys.executable,
}
config = '\n'.join(map('='.join, config.iteritems())) + '\n'
write('config.mk',
'# Do not edit. Generated by the configure script.\n' + config)
gyp_args = [sys.executable, 'tools/gyp_jc3_handling_editor.py', '--no-parallel']
if options.use_xcode:
gyp_args += ['-f', 'xcode']
elif flavor == 'win' and sys.platform != 'msys':
gyp_args += ['-f', 'msvs', '-G', 'msvs_version=auto']
else:
gyp_args += ['-f', 'make-' + flavor]
gyp_args += args
if warn.warned:
warn('warnings were emitted in the configure phase')
sys.exit(subprocess.call(gyp_args))
|
{
"content_hash": "a27d4bf70c28778af4dcf756048be3aa",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 84,
"avg_line_length": 25.830357142857142,
"alnum_prop": 0.6310058762530245,
"repo_name": "xforce/jc3-handling-editor",
"id": "f34ec77758a3462ac877d4037fcf2388f2aedac0",
"size": "5808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "611497"
},
{
"name": "C++",
"bytes": "149269"
},
{
"name": "Makefile",
"bytes": "115"
},
{
"name": "Python",
"bytes": "67046"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import pytz
import arrow
from airy import settings
timezone = pytz.timezone(settings.TIMEZONE)
def tz_now() -> datetime:
return arrow.now(settings.TIMEZONE).datetime
def day_beginning(dt: datetime) -> datetime:
return arrow.get(dt).floor('day').datetime
def week_beginning(dt: datetime) -> datetime:
return arrow.get(dt).floor('week').datetime
def is_day_beginning(dt: datetime) -> bool:
dt_ = arrow.get(dt)
return dt_ == dt_.to(settings.TIMEZONE).floor('day')
def is_day_end(dt: datetime) -> bool:
dt_ = arrow.get(dt)
return (dt_.replace(microsecond=0) ==
dt_.to(settings.TIMEZONE).ceil('day').replace(microsecond=0))
def localize(dt: datetime) -> datetime:
return dt.astimezone(tz=timezone)
|
{
"content_hash": "6baf10a36c065274220e200656e277f0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 22.34285714285714,
"alnum_prop": 0.6867007672634271,
"repo_name": "xuhcc/airy",
"id": "70dc624f2edd98cf9d8653d97003bcf0e7863a08",
"size": "782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airy/utils/date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5531"
},
{
"name": "HTML",
"bytes": "15388"
},
{
"name": "JavaScript",
"bytes": "98979"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "97742"
},
{
"name": "SaltStack",
"bytes": "1572"
}
],
"symlink_target": ""
}
|
"""
dfsbuild.py
单Git仓库多Dockerfile构建工具,提高了构建效率
快速使用:
chmod +x ./dfsbuild.py
只构建Git最近一次修改的Dockerfile
./dfsbuild.py -a auto -r registry.cn-shanghai.aliyuncs.com/userename
构建所有的Dockerfile
./dfsbuild.py -a all -r registry.cn-shanghai.aliyuncs.com/userename
构建特定的Dockerfile
./dfsbuild.py -a dfs -r registry.cn-shanghai.aliyuncs.com/userename nginx
解决的问题:
通常我们用大量的基础Dockerfile需要维护
很多时候这些大量的Dockerfile会放在同一个Git仓库当中
当Git push时Git server的webhook功能去触发CI(Jenkins等)系统
CI系统会去自动docker build镜像
产生的问题是每次都会docker build全部的Dockerfile文件
构建的过程中虽然会使用缓存,但实际的构建时间还是不能接受的
本工具可以自动处理只构建Git最近一次修改的Dockerfile
从而大大提高了单Git仓库多Dockerfile的docker build构建速度
关键点:
git最近一次修改的Dockerfile
git --no-pager whatchanged --name-only --oneline -1
参看gitLastDockerFiles函数实现
"""
import os
import argparse
import datetime
def walkDockerfiles(path,splitFirt=True):
""" 遍历目录中的所有dockerfile
Arguments:
path {string} -- 目录路径
Keyword Arguments:
splitFirt {bool} -- 去除文件开头的path (default: {True})
Returns:
array -- dockerfile文件列表
"""
files_list = []
if not os.path.exists(path):
return -1
for root, sub_dirs, files in os.walk(path):
for filename in files:
if isDockerfile(filename):
fullFileName = os.path.join(root, filename)
if splitFirt:
fullFileName = fullFileName.replace(path,"")
files_list.append(fullFileName) # 路径和文件名连接构成完整路径
return files_list
def isDockerfile(filename):
dockerfileStr = "Dockerfile"
if dockerfileStr in filename:
return True
return False
def gitLastDockerFiles():
""" git最近一次修改的Dockerfile文件
Returns:
array -- 最近一次修改的Dockerfile
"""
gitlastcmd = "git --no-pager whatchanged --name-only --oneline -1"
os.chdir(os.path.dirname(os.path.realpath(__file__)))
process = os.popen(gitlastcmd) # return file
gitlastOut = process.read()
process.close()
lines = gitlastOut.split('\n')
last_files = []
for line in lines:
line = line.strip('\n')
if isDockerfile(line):
last_files.append(line)
return last_files
def dockerDo(df="", action="build", registry=""):
if df == "" or registry == "":
printMsg("E","输入的参数不完整")
"""tag生成策略
nginx/Dockerfile >> registry/nginx:latest
nginx/alpine/Dockerfile >> registry/nginx:alpine
php/7.2-fpm-alpine/Dockerfile >> registry/php:7.2-fpm-alpine
目前只支持两级目录
"""
dfpath = df.replace('/Dockerfile','')
tagArr = dfpath.split('/')
tagArrLen = len(tagArr)
if 1 == tagArrLen:
tag = registry + "/" + tagArr[0] + ":latest"
elif 2 <= tagArrLen:
tag = registry + "/" + tagArr[0] + ":" + tagArr[1]
cmd = "docker info"
if action == "build":
cmd = 'docker build -t ' + tag + ' ./' + dfpath
elif action == "push":
cmd = 'docker push ' + tag
os.system(cmd)
def scan_files(directory,prefix=None,postfix=None):
files_list=[]
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root,special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root,special_file))
else:
files_list.append(os.path.join(root,special_file))
return files_list
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'dfs',
nargs='*',
help='Dockerfile文件相对路径支持多个,用空格分割',
metavar='dfs'
)
parser.add_argument(
'-a', '--action',
default='auto',
help="设置build Dockerfile的范围 \
auto(默认)为自动模式取git最后一次修改的Dockerfile \
all全部的Dockerfile \
dfs指定的Dockerfile",
metavar='action',
)
parser.add_argument(
'-r', '--registry',
default='index.docker.io',
help="定义docker仓库地址",
metavar='registry',
)
parser.add_argument(
'-p', '--push',
default=True,
help="build完成是否运行docker push",
metavar='push',
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s 1.0.0',
)
return parser.parse_args()
def printMsg(level="I",msg=""):
print(datetime.datetime.now().isoformat() + " ["+level+"] "+msg)
def main():
parser = _parse_args()
dfs = parser.dfs
registry = parser.registry
push = parser.push
action = parser.action
if action == "auto":
dfs = gitLastDockerFiles()
if len(dfs) < 1:
printMsg("I", "最近1次无Dockerfile修改")
elif action == "all":
dfs = walkDockerfiles("./")
elif action == "dfs":
pass
else:
printMsg("E","-a 错误,输入的参数,未定义")
if len(dfs) > 0:
for df in dfs:
dockerDo(df, 'build', registry)
if True == push:
dockerDo(df, 'push', registry)
else:
printMsg("E", "Dockerfile未找到")
if __name__ == '__main__':
main()
|
{
"content_hash": "2bfda485292e1eb633c59a17b3044b9e",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 74,
"avg_line_length": 26.38888888888889,
"alnum_prop": 0.5915789473684211,
"repo_name": "wwek/docker",
"id": "9cd838a8b432de90cda224e48d3114010f1e16b0",
"size": "5955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfsbuild.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "54431"
},
{
"name": "Python",
"bytes": "5955"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
import threading, MySQLdb
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from config import healthy_check_server_port as port
mysql_host = 'localhost'
mysql_port = 3306
mysql_user = 'mysql'
mysql_passwd = ''
CHECK_QUERY="SHOW GLOBAL STATUS WHERE variable_name='wsrep_local_state';"
class HealthyCheckHTTPHandler(BaseHTTPRequestHandler):
def response_OK(self):
self.send_response(200)
#self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('MySQL is available.')
def response_NG(self, error):
self.send_response(503)
self.end_headers()
self.wfile.write('MySQL is unavailable.' + error)
def check_stat(self):
conn = None
error = None
try:
conn = MySQLdb.connect(host=mysql_host, port=mysql_port,
user=mysql_user, passwd=mysql_passwd)
cursor = conn.cursor()
cursor.execute(CHECK_QUERY)
except Exception, e:
error = str(e)
if conn:
conn.close()
return error
def do_GET(self):
error = self.check_stat()
if self.path == '/state':
if error:
self.response_NG(error)
else:
self.response_OK()
else:
self.response_NG('wrong path')
class HealthyCheckServer(threading.Thread):
def run(self):
self._running = True
server_address = ('', port)
httpd = HTTPServer(server_address, HealthyCheckHTTPHandler)
while self._running:
httpd.handle_request()
def mysql_info(self, host, port, user, passwd):
global mysql_host,mysql_port,mysql_user,mysql_passwd
mysql_host = host
mysql_port = port
mysql_user = user
mysql_passwd = passwd
def cancel(self):
self._running = False
if __name__ == '__main__':
import time
sv = HealthyCheckServer()
sv.mysql_info('localhost', 3306, 'sst', 'sst')
sv.start()
while True:
exit = raw_input('exit?y/n:')
if exit == 'y':
sv.cancel()
break
time.sleep(100)
|
{
"content_hash": "b06561a8032ff9a2617abc1ce74e3d62",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 73,
"avg_line_length": 28.813333333333333,
"alnum_prop": 0.5881536325775104,
"repo_name": "kuipertan/rdbs",
"id": "6183a5cefe3f23be33f36de83ee6227eb4a40d58",
"size": "2161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/healthyCheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12821"
},
{
"name": "Shell",
"bytes": "650"
}
],
"symlink_target": ""
}
|
import argparse
import time
import lib.asciicanvas as canvas
import itertools
import os
import tempfile
from sys import stderr
from lib.constants import *
from lib.encoding import *
from lib.vectorizer import *
from lib.statements import *
from lib.constants import ConstantsInvoker
from lib.extended_math import ExtendedMathInvoker
from behaviour.status import Status
from behaviour.environment import Environment, GlobalEnvironment
ExtendedMath = ExtendedMathInvoker()
Constants = ConstantsInvoker()
recent_inputs = []
global_env = GlobalEnvironment()
def get_input():
a = input()
if a[:3] == "\"\"\"":
a = a[3:]
while a[-3:] != "\"\"\"":
a += "\n" + input()
a = a[:-3]
if is_array(a):
a = apply_safe(ast.literal_eval, a)
recent_inputs.append(a)
return a
def opt_input():
try:
return get_input()
except:
return recent_inputs[-1]
def is_array(array):
if not array:
return False
array = str(array)
if array[0] == "[" and array[-1] == "]":
return True
return False
class Osabie:
def __init__(self,
commands: str,
debug: bool=False,
safe_mode: bool=False,
suppress_print: bool=False,
environment: Environment=None,
stack: list=None,
depth: int=0):
# Params
self.commands = commands
self.debug = debug
self.safe_mode = safe_mode
self.suppress_print = suppress_print
self.depth = depth
# Pointer position
self.pointer_position = -1
# Configurable parameters
self.stack = stack if stack else []
self.environment = environment if environment else Environment() # type: Environment
def reset(self):
global global_env
global_env = GlobalEnvironment()
self.environment = Environment()
def interp(self) -> (list, Status):
while self.pointer_position < len(self.commands) - 1:
try:
status = self.step()
if status == Status.BREAK or status == Status.EXIT:
if self.debug:
print("Status was", status)
return self.stack, status
except Exception as e:
if self.debug:
print(e)
return self.stack, Status.OK
def run(self) -> (list, Status):
self.interp()
if not global_env.has_printed and not self.suppress_print:
if self.stack:
print(self.stack[len(self.stack) - 1])
elif ".\u02c6" in self.commands:
if len(recent_inputs) == 0:
try:
a = int(get_input())
except:
a = -1
print(global_env.global_array[a])
elif ".^" in self.commands:
if len(recent_inputs) == 0:
try:
a = int(get_input())
except:
a = -1
print(global_env.global_array[a])
elif "\u00b5" in self.commands:
print(self.environment.range_variable)
elif "\u02c6" in self.commands:
print(global_env.global_array)
elif "\u039b" in self.commands:
print(canvas.canvas_dict_to_string(self.environment.current_canvas))
elif "\u00bc" in self.commands:
print(global_env.counter_variable)
if self.debug:
print("stack > " + str(self.stack))
def pop_stack(self, default=None):
if self.stack:
return self.stack.pop()
try:
a = opt_input()
except:
if default is None:
raise
a = default
if is_array(a):
a = ast_int_eval(a)
return a
def __run_subprogram(self, commands: str, environment: Environment=None, stack: list=None) -> (list, Status):
env = environment if environment else self.environment
stk = stack if stack else self.stack
sub_program = Osabie(commands, debug=self.debug, safe_mode=self.safe_mode,
environment=env, stack=stk, depth=self.depth + 1)
return sub_program.interp()
def value(self, commands: str, stack: list):
curr_stack, _ = self.__run_subprogram(commands, stack=stack)
return curr_stack[-1] if curr_stack else None
def step(self):
global global_env
self.pointer_position += 1
current_command = self.commands[self.pointer_position]
status = Status.OK
if current_command == ".":
self.pointer_position += 1
current_command += self.commands[self.pointer_position]
if current_command == "\u017e":
self.pointer_position += 1
current_command += self.commands[self.pointer_position]
if current_command == "\u00c5":
self.pointer_position += 1
current_command += self.commands[self.pointer_position]
if self.debug:
try:
print()
print("----------------------------------")
print("Depth:", self.depth)
print("Stack:", self.stack)
print("Current command:", current_command)
except:
pass
if current_command == "h":
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(a, 16, convert_to_base))
# Command: b
# pop a
# push bin(a)
elif current_command == "b":
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(a, 2, convert_to_base))
# Command: B
# pop a,b
# push base(a, b)
elif current_command == "B":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(a, b, convert_to_base))
# Command: в
# pop a,b
# push a converted to base b (arbitrary)
elif current_command == "\u0432":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(a, b, convert_to_base_arbitrary))
# self.commands: [0-9]+
# Push the corresponding digit value onto the self.stack,
# as a single number if multiple digits are consecutive
elif is_digit_value(current_command):
temp_number = ""
temp_number += current_command
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
except:
break
if is_digit_value(current_command):
temp_number += current_command
self.pointer_position += 1
else:
break
self.stack.append(temp_number)
# Command: "
# Start/end string literal
elif current_command == "\"":
temp_string = ""
temp_string_2 = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
except:
break
if current_command == "\"":
break
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
self.pointer_position += 1
else:
temp_string += current_command
self.pointer_position += 1
self.pointer_position += 1
self.stack.append(temp_string)
# Command: ’
# start/end of a compressed string (no implicit space)
elif current_command == "\u2019":
temp_string = ""
temp_string_2 = ""
temp_index = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
if dictionary.unicode_index.__contains__(
current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 2
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_string += dictionary.dictionary[
int(temp_index)]
temp_index = ""
elif current_command == "\u2019":
self.pointer_position += 1
break
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
self.pointer_position += 1
else:
temp_string += current_command
self.pointer_position += 1
except:
self.pointer_position += 1
break
if self.debug:
print("{} with {}".format(
self.pointer_position, hex(ord(current_command))
))
self.stack.append(temp_string)
# Command: ‘
# Start/end of a compressed string (upper)
elif current_command == "\u2018":
temp_string = ""
temp_string_2 = ""
temp_index = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
if dictionary.unicode_index.__contains__(
current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 2
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[
int(temp_index)].upper()
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)].upper()
temp_index = ""
elif current_command == "\u2018":
self.pointer_position += 1
break
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
self.pointer_position += 1
else:
temp_string += current_command
self.pointer_position += 1
except:
self.pointer_position += 1
break
if self.debug:
print("{} with {}".format(
self.pointer_position, hex(ord(current_command))
))
self.stack.append(temp_string)
# Command: “
# Start/end of a compressed string (normal)
elif current_command == "\u201c":
temp_string = ""
temp_string_2 = ""
temp_index = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
if dictionary.unicode_index.__contains__(
current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 2
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[
int(temp_index)]
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)]
temp_index = ""
elif current_command == "\u201c":
self.pointer_position += 1
break
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
self.pointer_position += 1
else:
temp_string += current_command
self.pointer_position += 1
except:
self.pointer_position += 1
break
if self.debug:
print("{} with {}".format(
self.pointer_position, hex(ord(current_command))
))
self.stack.append(temp_string)
# Command: ”
# Start/end of a compressed string (title)
elif current_command == "\u201d":
temp_string = ""
temp_string_2 = ""
temp_index = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
if dictionary.unicode_index.__contains__(
current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 2
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[
int(temp_index)].title()
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)].title()
temp_index = ""
elif current_command == "\u201d":
self.pointer_position += 1
break
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
self.pointer_position += 1
else:
temp_string += current_command
self.pointer_position += 1
except:
self.pointer_position += 1
break
if self.debug:
print("{} with {}".format(
self.pointer_position, hex(ord(current_command))
))
self.stack.append(temp_string)
# Command: ª
# pop a
# push sentence_cased(a)
elif current_command == "\u00aa":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, sentence_case, str))
elif current_command == ".Œ":
b = self.pop_stack(default=2)
a = self.pop_stack(default=[])
if type(a) is not list:
a = str(a)
self.stack.append(divide_into(a[::-1], ast_int_eval(b)))
# Command: ù
# pop a,b
# push a with elements of length b
elif current_command == "\u00f9":
b = self.pop_stack(default=0)
a = self.pop_stack(default=[])
if type(b) is not list:
temp_list = []
try:
b_length = int(ast_int_eval(b))
except:
b_length = len(b)
for Q in a:
lenQ = len(Q if type(Q) is list else str(Q))
if lenQ == b_length:
temp_list.append(Q)
self.stack.append(temp_list)
else:
temp_list_2 = []
for R in b:
temp_list = []
try:
R_length = int(ast_int_eval(R))
except:
R_length = len(R)
for Q in a:
lenQ = len(Q if type(Q) is list else str(Q))
if lenQ == R_length:
temp_list.append(Q)
temp_list_2.append(temp_list)
self.stack.append(temp_list_2)
# Command: Λ
# pop a,b,c
# store a canvas with {a: num, b: filler, c: pattern}
elif current_command == "\u039B":
pattern = self.pop_stack()
filler = self.pop_stack()
number_pattern = self.pop_stack()
self.environment.current_canvas, self.environment.current_cursor = canvas.canvas_code_to_string(
number_pattern, pattern, filler, self.environment.current_canvas, self.environment.current_cursor
)
# Command: .Λ
# pop a,b,c
# store a canvas with {a: num, b: filler, c: pattern}
# and push the string to the self.stack
elif current_command == ".\u039B":
pattern = self.pop_stack()
filler = self.pop_stack()
number_pattern = self.pop_stack()
self.environment.current_canvas, self.environment.current_cursor = canvas.canvas_code_to_string(
number_pattern, pattern, filler, self.environment.current_canvas, self.environment.current_cursor
)
self.stack.append(canvas.canvas_dict_to_string(self.environment.current_canvas))
# Command: ∊
# pop a
# push vertically mirrored a
elif current_command == "\u220A":
a = self.pop_stack(default="")
self.stack.append(apply_safe(vertical_mirror, a))
# Command: .∊
# pop a
# push intersected vertical mirror a
elif current_command == ".\u220A":
a = self.pop_stack(default="")
self.stack.append(apply_safe(vertical_intersected_mirror, a))
# Command: ∍
# pop a,b
# push a extended/shortened to length b
elif current_command == "\u220D":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
self.stack.append(apply_safe(shape_like, a, b))
# Command: ā
# get a
# push range(1, len(a) + 1)
elif current_command == "\u0101":
a = self.pop_stack(default="")
self.stack.append(a)
if type(a) is not list:
a = str(a)
self.stack.append(apply_safe(lambda a: list(range(1, len(a) + 1)), a))
# Command: Ā
# pop a
# push truthified a:
# if a can be converted to a number: 1 if != 0, 0 otherwise
# if a is a string: 1 if non-empty, 0 otherwise
elif current_command == "\u0100":
a = self.pop_stack(default=0)
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(not not a), float
))
# Command: ∞
# pop a
# push mirrored a
elif current_command == "\u221e":
a = self.pop_stack(default="")
self.stack.append(apply_safe(mirror, a))
# Command: .∞
# pop a
# push intersected mirror a
elif current_command == ".\u221e":
a = self.pop_stack(default="")
self.stack.append(apply_safe(intersected_mirror, a))
# Command: н
# pop a
# push a[0]
elif current_command == "\u043D":
a = self.pop_stack(default="")
try:
if type(a) is list:
self.stack.append(a[0])
else:
self.stack.append(str(a)[0])
except:
self.stack.append(a)
# Command: θ
# pop a
# push a[-1]
elif current_command == "\u03B8":
a = self.pop_stack(default="")
if type(a) is list:
self.stack.append(a[-1])
else:
self.stack.append(str(a)[-1])
# Command: ζ
# pop a,(b):
# if a is list, zip with spaces
# otherwise: pop b, zip a with b
elif current_command == "\u03B6":
b = self.pop_stack(default="")
if type(b) is list:
a, b = b, " "
self.stack.append(apply_safe(zip_with, a, b))
else:
b = str(b)
a = self.pop_stack(default="")
self.stack.append(apply_safe(zip_with, a, b))
# Command: ε
# Usage: εCODE}
# pop a
# apply each on a
elif current_command == "\u03B5":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
result = []
for element in a:
value = self.value(statement, stack=[element])
result.append(value)
self.pointer_position += len(statement) + 1
self.stack.append(result)
# Command: !
# pop a
# push factorial(a)
elif current_command == "!":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, math.factorial, int))
# Command: +
# pop a,b
# push a+b
elif current_command == "+":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a + b if type(a) is not str and type(b) is not str else str(a) + str(b), ast_int_eval
))
# Command: -
# pop a,b
# push a-b
elif current_command == "-":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a - b, ast_int_eval
))
# Command: *
# pop a,b
# push a*b
elif current_command == "*":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a * b, ast_int_eval
))
# Command: /
# pop a,b
# push a / b
elif current_command == "/":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a / b, ast_int_eval
))
# Command: %
# pop a,b
# push a % b
elif current_command == "%":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a % b, ast_int_eval
))
# Command: D
# pop a
# push a, a
elif current_command == "D":
a = self.pop_stack()
self.stack.append(a)
self.stack.append(a)
# Command: R
# pop a
# push reversed a
elif current_command == "R":
a = self.pop_stack(default="")
if type(a) is list:
self.stack.append(a[::-1])
else:
self.stack.append(str(a)[::-1])
# Command: I
# push input()
elif current_command == "I":
a = opt_input()
self.stack.append(a)
# Command: $
# push 1, input()
elif current_command == "$":
self.stack.append(1)
try:
a = get_input()
self.stack.append(a)
recent_inputs.append(a)
except:
self.stack.append("")
# Command: H
# pop a
# push int(a, 16)
elif current_command == "H":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a, 16), str
))
# Command: C
# pop a
# push int(a, 2)
# Error: push ""
elif current_command == "C":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a, 2), str
))
# Command: a
# pop a
# push isAlpha(a)
elif current_command == "a":
a = self.pop_stack(default=0)
self.stack.append(single_vectorized_evaluation(
a, lambda a: is_alpha_value(a), str
))
# Command: d
# pop a
# push isNumber(a)
elif current_command == "d":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(type(a) is not str), ast_int_eval
))
# Command: p
# pop a
# push isPrime(a)
elif current_command == "p":
a = self.pop_stack(default=0)
self.stack.append(single_vectorized_evaluation(
a, lambda a: is_prime(a), ast_int_eval
))
# Command: u
# pop a
# push uppercase(a)
elif current_command == "u":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a.upper(), str
))
# Command: l
# pop a
# push lowercase(a)
elif current_command == "l":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a.lower(), str
))
# Command: _
# pop a
# push negative bool of a:
# if a can be converted to a number: 1 if == 0, 0 otherwise
# if a is a string: 1 if empty, 0 otherwise
# Error: push 0
elif current_command == "_":
a = self.pop_stack(default=1)
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(not a), float
))
# Command: s
# pop a,b
# push b,a
elif current_command == "s":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(b)
self.stack.append(a)
# Command: |
# push the rest of input as an array with strings
elif current_command == "|":
temp_list = []
try:
while True:
a = get_input()
if a == "":
break
temp_list.append(a)
except:
pass
self.stack.append(temp_list)
# Command: ≠
# pop a
# push 05AB1E falsified a (a != 1)
elif current_command == "\u2260":
a = self.pop_stack(default=0)
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a != 1), ast_int_eval
))
# Command: Θ
# pop a
# push 05AB1E truthified a (a == 1)
elif current_command == "\u0398":
a = self.pop_stack(default=0)
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a == 1), ast_int_eval
))
# Command: м
# pop a,b
# push a.remove(all elements of b)
elif current_command == "\u043C":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is list:
b = [str(x) for x in deep_flatten(b)]
else:
b = [x for x in str(b)]
for i in b:
a = single_vectorized_evaluation(
a, lambda a: a.replace(i, ''), str
)
self.stack.append(a)
# Command: L
# pop a
# push [1 .. a]
elif current_command == "L":
temp_list = []
a = self.pop_stack(default=1)
if type(a) is list:
for Q in a:
Q = apply_safe(int, Q)
if type(Q) is not int:
temp_list.append(Q)
elif Q > 0:
for X in range(1, Q + 1):
temp_list.append(X)
elif Q < 0:
for X in range(1, (Q * -1) + 1):
temp_list.append(X * -1)
else:
temp_list.append(0)
else:
a = apply_safe(int, a)
if type(a) is not int:
temp_list.append(a)
elif a > 0:
for X in range(1, a + 1):
temp_list.append(X)
elif a < 0:
for X in range(1, (a * -1) + 1):
temp_list.append(X * -1)
else:
temp_list.append(0)
self.stack.append(temp_list)
# Command: r
# reverse the self.stack
elif current_command == "r":
self.stack.reverse()
# Command: i
# if...}else...} statement
elif current_command == "i":
statement, else_statement, remaining = get_statements(self.commands[self.pointer_position + 1:], True)
a = apply_safe(ast_int_eval, self.pop_stack(default=0))
if a == 1:
self.stack, status = self.__run_subprogram(statement)
elif else_statement:
self.stack, status = self.__run_subprogram(else_statement)
self.pointer_position += len(statement) + len(else_statement) + 1
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: \
# pop a
elif current_command == "\\":
self.pop_stack()
# Command: `
# pop a
# push all items of a into the self.stack
elif current_command == "`":
a = self.pop_stack()
if type(a) is not list:
a = str(a)
for x in a:
self.stack.append(x)
# Command: x
# pop a
# push a, a * 2
elif current_command == "x":
a = self.pop_stack(default="")
self.stack.append(a)
self.stack.append(single_vectorized_evaluation(a, lambda a: a * 2, ast_int_eval))
# Command: F
# pop a
# for N in range(0, a) { }: F(self.commands)} / N = variable
elif current_command == "F":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
a = apply_safe(int, self.pop_stack(default=0))
self.pointer_position += len(statement) + 1
if type(a) is int and a != 0:
for range_variable in range(0, a):
new_env = self.environment
new_env.range_variable = range_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: G
# pop a
# for N in range(1, a) { }: F(self.commands)} / N = variable
elif current_command == "G":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
a = apply_safe(int, self.pop_stack(default=0))
self.pointer_position += len(statement) + 1
if type(a) is int and a > 1:
for range_variable in range(1, a):
new_env = self.environment
new_env.range_variable = range_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: µ
# pop a
# while counter_variable != a, do...
elif current_command == "\u00b5":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
a = apply_safe(int, self.pop_stack(default=0))
self.pointer_position += len(statement) + 1
if type(a) is int:
range_variable = 0
if '\u00bc' not in statement and '\u00bd' not in statement:
statement += '\u00bd'
self.pointer_position -= 1
while global_env.counter_variable != a:
range_variable += 1
new_env = self.environment
new_env.range_variable = range_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: Ë
# pop a
# push 1 if all equal else 0
elif current_command == "\u00cb":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if not len(a):
self.stack.append(1)
return
# try to convert floats to int first so that
# 1.0 and 1 are considered equal
converted = []
for item in a:
item = str(item)
try:
item = float(item)
except:
pass
else:
if int(item) == item:
item = int(item)
converted.append(item)
compare = converted[0]
result = True
for item in converted:
result = result and item == compare
self.stack.append(int(result))
# Command: ƒ
# pop a
# push for N in range(0, a + 1)
elif current_command == "\u0192":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
a = apply_safe(int, self.pop_stack(default=-1))
self.pointer_position += len(statement) + 1
if type(a) is int and a > -1:
for range_variable in range(0, a + 1):
new_env = self.environment
new_env.range_variable = range_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: N
# Push iteration counter
elif current_command == "N":
self.stack.append(self.environment.range_variable)
# Command: T
# Push 10
elif current_command == "T":
self.stack.append(10)
# Command: S
# pop a
# push all chars a seperate
elif current_command == "S":
a = self.pop_stack(default="")
if type(a) is not list:
self.stack.append([x for x in str(a)])
else:
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + [x for x in val], str, []
))
# Command: ^
# pop a,b
# push a XOR b
elif current_command == "^":
b = self.pop_stack(default=0)
a = self.pop_stack(default=0)
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a ^ b, int
))
# Command: ~
# pop a,b
# push a OR b
elif current_command == "~":
b = self.pop_stack(default=0)
a = self.pop_stack(default=0)
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a | b, int
))
# Command: &
# pop a,b
# push a AND b
elif current_command == "&":
b = self.pop_stack(default=0)
a = self.pop_stack(default=0)
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a & b, int
))
# Command: c
# pop a,b
# push a nCr b
elif current_command == "c":
b = self.pop_stack(default=1)
a = self.pop_stack(default=0)
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: combinations(a, b), int
))
# Command: e
# pop a,b
# push a nPr b
elif current_command == "e":
b = self.pop_stack(default=1)
a = self.pop_stack(default=0)
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: permutations(a, b), int
))
# Command: >
# pop a
# push a + 1
elif current_command == ">":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a + 1, ast_int_eval
))
# Command: <
# pop a
# push a - 1
elif current_command == "<":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a - 1, ast_int_eval
))
# Command: '
# push char ( 'a pushes "a" )
elif current_command == "'":
temp_string = ""
temp_index = ""
self.pointer_position += 1
temp_position = self.pointer_position
current_command = self.commands[self.pointer_position]
if dictionary.unicode_index.__contains__(current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 1
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[int(temp_index)]
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)]
temp_index = ""
self.stack.append(temp_string)
else:
temp_string = self.commands[self.pointer_position]
self.stack.append(temp_string)
# Command: „
# 2 char string / can also be used for 2 compressed strings
elif current_command == "\u201e":
temp_string = ""
temp_index = ""
word_count = 0
while word_count != 2:
self.pointer_position += 1
temp_position = self.pointer_position
current_command = self.commands[self.pointer_position]
if dictionary.unicode_index.__contains__(current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 1
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[
int(temp_index)]
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)]
temp_index = ""
word_count += 1
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
word_count += 1
else:
temp_string += self.commands[self.pointer_position]
word_count += 1
self.stack.append(temp_string)
# Command: …
# 3 char string / can also be used for 3 compressed strings
elif current_command == "\u2026":
temp_string = ""
temp_index = ""
word_count = 0
while word_count != 3:
self.pointer_position += 1
temp_position = self.pointer_position
current_command = self.commands[self.pointer_position]
if dictionary.unicode_index.__contains__(current_command):
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
temp_position += 1
self.pointer_position += 1
current_command = self.commands[temp_position]
temp_index += str(dictionary.unicode_index.index(
current_command)).rjust(2, "0")
if temp_string == "":
temp_string += dictionary.dictionary[
int(temp_index)]
else:
temp_string += " " + dictionary.dictionary[
int(temp_index)]
temp_index = ""
word_count += 1
# String interpolation (command: ÿ)
elif current_command == "\u00ff":
temp_string += str(self.pop_stack(default=""))
word_count += 1
else:
temp_string += self.commands[self.pointer_position]
word_count += 1
self.stack.append(temp_string)
# Command: ö
# pop a,b
# push int(a, b)
elif current_command == "\u00f6":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: convert_from_base(a, b)
))
# Command: ¸
# pop a
# push [a]
elif current_command == "\u00b8":
a = self.pop_stack(default="")
self.stack.append([a])
# Command: .S
# pop a,b
# push 1 if a > b, -1 if a < b, 0 if a == b
elif current_command == ".S":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(a) is list:
if type(b) is list:
temp_list = []
for Q in range(0, len(a)):
aieAq = apply_safe(ast_int_eval, str(a[Q]))
aieBq = apply_safe(ast_int_eval, str(b[Q]))
if type(aieAq) is not int or type(aieBq) is not int:
temp_list.append(aieAq)
elif aieAq > aieBq:
temp_list.append(1)
elif aieAq < aieBq:
temp_list.append(-1)
elif aieAq == aieBq:
temp_list.append(0)
self.stack.append(temp_list)
else:
temp_list = []
aieBq = apply_safe(ast_int_eval, str(b))
for Q in a:
aieAq = apply_safe(ast_int_eval, str(Q))
if type(aieAq) is not int or type(aieBq) is not int:
temp_list.append(aieAq)
elif aieAq > aieBq:
temp_list.append(1)
elif aieAq < aieBq:
temp_list.append(-1)
elif aieAq == aieBq:
temp_list.append(0)
self.stack.append(temp_list)
else:
if type(b) is list:
temp_list = []
aieAq = apply_safe(ast_int_eval, str(a))
for Q in b:
aieBq = apply_safe(ast_int_eval, str(Q))
if type(aieAq) is not int or type(aieBq) is not int:
temp_list.append(aieAq)
elif aieAq > aieBq:
temp_list.append(1)
elif aieAq < aieBq:
temp_list.append(-1)
elif aieAq == aieBq:
temp_list.append(0)
self.stack.append(temp_list)
else:
aieAq = apply_safe(ast_int_eval, str(a))
aieBq = apply_safe(ast_int_eval, str(b))
if type(aieAq) is not int or type(aieBq) is not int:
self.stack.append(aieAq)
elif aieAq > aieBq:
self.stack.append(1)
elif aieAq < aieBq:
self.stack.append(-1)
elif aieAq == aieBq:
self.stack.append(0)
# Command: [
# Infinite loop start
elif current_command == "[":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
range_variable = -1
self.pointer_position += len(statement) + 1
while True:
range_variable += 1
new_env = self.environment
new_env.range_variable = range_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
break
# Command: #
# pop a
# if contains spaces, split on spaces
# else if 1, break/end
elif current_command == "#":
a = self.pop_stack(default=0)
if " " in str(a):
self.stack.append(str(a).split(" "))
else:
try:
if ast_int_eval(a) == 1:
return Status.BREAK
except:
pass
# Command: é
# pop a
# push sorted a (key=length)
elif current_command == "\u00e9":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
temp_list = []
for Q in a:
if type(Q) is not list:
temp_list.append(str(Q))
else:
temp_list.append(Q)
self.stack.append(sorted(temp_list, key=len))
# Command: =
# print last item
elif current_command == "=":
try:
a = self.pop_stack()
self.stack.append(a)
except:
a = ""
print(a)
global_env.has_printed = True
# Command: Q
# pop a,b
# push a == b (bool)
elif current_command == "Q":
b = self.pop_stack(default=0)
a = self.pop_stack(default=1)
if type(a) is list and type(b) is list:
self.stack.append(int(
str([str(apply_safe(ast_int_eval, x)) for x in a]) == str(
[str(apply_safe(ast_int_eval, x)) for x in b])
))
else:
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(a == b), ast_int_eval
))
# Command: Ê
# pop a,b
# push a != b (bool)
elif current_command == "\u00ca":
b = self.pop_stack(default=0)
a = self.pop_stack(default=0)
if type(a) is list and type(b) is list:
self.stack.append(int(
str([str(apply_safe(ast_int_eval, x)) for x in a]) != str(
[str(apply_safe(ast_int_eval, x)) for x in b])
))
else:
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(a != b), ast_int_eval
))
# Command: (
# pop a
# push -a
elif current_command == "(":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: ast_int_eval(a) * -1
))
# Command: A
# push [a-z]
elif current_command == "A":
self.stack.append('abcdefghijklmnopqrstuvwxyz')
# Command: ™
# pop a
# push title_cased(a)
elif current_command == "\u2122":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a.title(), str
))
# Command: E
# get input
elif current_command == "E":
self.stack.append(single_vectorized_evaluation(get_input(), ast_int_eval))
# Command: )
# wrap total self.stack to an array
elif current_command == ")":
temp_list = []
if self.stack:
temp_list = list(self.stack)
self.stack.clear()
self.stack.append(temp_list)
# Command: P
# pop a
# if a is list, push total product of a
# else: push total product of self.stack
elif current_command == "P":
a = self.pop_stack(default=[])
if type(a) is not list:
a = self.stack + [a]
self.stack.clear()
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc * val, ast_int_eval, 1
))
# Command: O
# pop a
# if a is list, push total sum of a
# else: push total sum of self.stack
elif current_command == "O":
a = self.pop_stack(default=[])
if type(a) is not list:
a = self.stack + [a]
self.stack.clear()
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + val, ast_int_eval, 0
))
# Command: ;
# pop a
# push a / 2
elif current_command == ";":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a / 2, ast_int_eval))
# Command: w
# wait one second
elif current_command == "w":
time.sleep(1)
# Command: m
# pop a,b
# push a**b
elif current_command == "m":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a ** b, ast_int_eval))
# Command: X
# Push variable X
elif current_command == "X":
self.stack.append(self.environment.x)
# Command: Y
# Push variable Y
elif current_command == "Y":
self.stack.append(self.environment.y)
# Command: z
# pop a
# push 1 / a
elif current_command == "z":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: 1 / a, ast_int_eval))
# Command: U
# pop a
# stores a in variable X
elif current_command == "U":
a = self.pop_stack()
self.environment.x = a
# Command: V
# pop a
# stores a in variable Y
elif current_command == "V":
a = self.pop_stack()
self.environment.y = a
# Command: W
# push min(a) without popping
elif current_command == "W":
a = self.pop_stack(default="")
self.stack.append(a)
a = str(a) if type(a) is not list else deep_flatten(a)
minval = apply_safe(ast_int_eval, a[0])
for i in a:
if type(minval) is str or type(i) is str:
minval = min(str(minval), str(i))
# fallbacks to number comparison if possible
minval = apply_safe(ast_int_eval, minval)
else:
minval = min(minval, i)
self.stack.append(minval)
# Command: Z
# push max(a) without popping
elif current_command == "Z":
a = self.pop_stack(default="")
self.stack.append(a)
a = str(a) if type(a) is not list else deep_flatten(a)
maxval = apply_safe(ast_int_eval, a[0])
for i in a:
if type(maxval) is str or type(i) is str:
maxval = max(str(maxval), str(i))
# fallbacks to number comparison if possible
maxval = apply_safe(ast_int_eval, maxval)
else:
maxval = max(maxval, i)
self.stack.append(maxval)
# Command: q
# exit the program
elif current_command == "q":
return Status.EXIT
# Command: g
# pop a
# push length of a
elif current_command == "g":
a = self.pop_stack(default="")
if type(a) is not list:
self.stack.append(len(str(a)))
else:
self.stack.append(len(a))
# Command: J
# pop a
# push ''.join(a) if a is list / if not, then push ''.join(self.stack)
elif current_command == "J":
a = self.pop_stack(default="")
if type(a) is not list:
a = self.stack + [a]
self.stack.clear()
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + val, str, ""
))
# Command: :
# pop a,b,c
# a.replace(b, c) / infinite replacement
elif current_command == ":":
c = self.pop_stack()
b = self.pop_stack()
a = self.pop_stack()
self.stack.append(infinite_replace(a, b, c))
# Command: j
# pop a,b
# push ''.join(a) if a is list / if not, then push ''.join(self.stack)
# Each joined string has a min size of b, and is right justified
elif current_command == "j":
b = apply_safe(int, self.pop_stack(default=""))
a = self.pop_stack(default="")
if type(b) is not int:
self.stack.append(a)
return
if type(a) is not list:
a = self.stack + [a]
self.stack.clear()
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + val.rjust(b), str, ""
))
# Command: .j
# (deprecated)
elif current_command == ".j":
a = self.pop_stack(default="")
a = int(a)
temp_string = ""
for Q in range(0, len(self.stack) + 1):
temp_string += str(Q).rjust(a)
print(temp_string)
temp_number = 0
for Q in self.stack:
temp_number += 1
temp_string = ""
if type(Q) is list:
for R in Q:
temp_string += str(R).rjust(a)
print(str(temp_number).rjust(a) + temp_string)
else:
print(str(Q).rjust(a), end="")
global_env.has_printed = True
# Command: .J
# (deprecated)
elif current_command == ".J":
a = self.pop_stack(default="")
a = int(a)
temp_string = ""
for Q in range(1, len(self.stack) + 2):
temp_string += str(Q).rjust(a)
print(temp_string)
temp_number = 1
for Q in self.stack:
temp_number += 1
temp_string = ""
if type(Q) is list:
for R in Q:
temp_string += str(R).rjust(a)
print(str(temp_number).rjust(a) + temp_string)
else:
print(str(Q).rjust(a), end="")
global_env.has_printed = True
# Command: .b
# pop a
# push letterified(a)
elif current_command == ".b":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: get_letter(a), int))
# Command: @
# pop a
# pop and push the element at index a in the self.stack (leftmost element = index 0)
elif current_command == "@":
a = apply_safe(int, self.pop_stack())
if type(a) is int:
self.stack.append(self.stack.pop(a))
# Command: M
# push the largest number in the self.stack
elif current_command == "M":
temp_list = []
temp_list.append(self.stack)
temp_list = deep_flatten(temp_list)
max_int = -float("inf")
for Q in temp_list:
try:
if ast_int_eval(Q) > max_int:
max_int = ast_int_eval(Q)
except:
0
self.stack.append(max_int)
# Command: t
# pop a
# push sqrt(a)
elif current_command == "t":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: math.sqrt(a), ast_int_eval))
# Command: n
# pop a
# push a ** 2
elif current_command == "n":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: a ** 2, ast_int_eval))
# Command: o
# pop a
# push 2 ** a
elif current_command == "o":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: 2 ** a, ast_int_eval))
# Command: k
# pop a,b
# push 0-indexed index of b in a (-1 when not found)
elif current_command == "k":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
try:
if type(a) is list:
temp_list = []
for Q in a:
temp_list.append(str(Q))
if type(b) is list:
self.stack.append([temp_list.index(str(c)) for c in b])
else:
self.stack.append(temp_list.index(str(b)))
else:
if type(b) is list:
self.stack.append([str(a).index(str(c)) for c in b])
else:
self.stack.append(str(a).index(str(b)))
except:
self.stack.append(-1)
# Command: {
# pop a
# push sorted a
elif current_command == "{":
a = self.pop_stack(default="")
if type(a) is list:
self.stack.append(apply_safe(sorted, a))
else:
self.stack.append(''.join(apply_safe(sorted, str(a))))
# Command: °
# pop a
# push 10 ** a
elif current_command == "\u00b0":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: 10 ** a, ast_int_eval
))
# Command: º
# push len(self.stack) > 0
elif current_command == "\u00ba":
if len(self.stack) > 0:
self.stack.append(1)
else:
self.stack.append(0)
# Command: å
# pop a,b
# push a in b
elif current_command == "\u00e5":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
if type(a) is list:
a = [str(c) for c in deep_flatten(a)]
else:
a = str(a)
if type(b) is list:
self.stack.append([int(str(c) in a) for c in deep_flatten(b)])
else:
self.stack.append(int(str(b) in a))
# Command: .å
# pop a,b
# push a in b (vectorized)
elif current_command == ".\u00e5":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
if type(b) is list:
a = [str(x) for x in deep_flatten(a)] if type(a) is list else str(a)
self.stack.append(single_vectorized_evaluation(
b, lambda b: int(b in a), str
))
else:
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(b in a), str
))
# Command: v
# pop a
# range loop: for y in a (y = string, N = index)
elif current_command == "v":
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
a = self.pop_stack()
self.pointer_position += len(statement) + 1
range_variable = -1
if type(a) is not list:
a = str(a)
for string_variable in a:
range_variable += 1
if self.debug:
print("N = " + str(range_variable))
new_env = self.environment
new_env.range_variable = range_variable
new_env.string_variable = string_variable
self.stack, status = self.__run_subprogram(statement, environment=new_env)
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: y
# push string variable (used in mapping loops)
elif current_command == "y":
self.stack.append(self.environment.string_variable)
# Command: ,
# pop a
# print a
elif current_command == ",":
a = self.pop_stack()
print(str(a))
global_env.has_printed = True
# Command: f
# pop a
# push list of prime factors (no duplicates)
elif current_command == "f":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: prime_factorization(a), int
))
# Command: Ò
# pop a
# push list of prime factors (with duplicates)
elif current_command == "\u00d2":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: prime_factorization_duplicates(a), int
))
# Command: Ó
# pop a
# push list of exponents of prime factors (2^a, 3^b, 5^c, 7^d, etc.)
elif current_command == "\u00d3":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: prime_factorization_powers(a), int
))
# Command: ú
# pop a,b
# push a padded with b spaces in the front
elif current_command == "\u00fa":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
# backward compatibility: swap a<=>b if necessary
try:
[int(c) for c in deep_flatten([b])]
except:
a, b = b, a
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: " " * int(b) + str(a)
))
# Command: þ
# pop a
# push only digits of a
elif current_command == "\u00fe":
a = self.pop_stack(default="")
self.stack.append(vectorized_filter(a, is_digit_value, str))
# Command: á
# pop a
# push only letters of a
elif current_command == "\u00e1":
a = self.pop_stack(default="")
self.stack.append(vectorized_filter(a, is_alpha_value, str))
# Command: .u
# pop a
# push is_upper(a)
elif current_command == ".u":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(is_alpha_value(a) and str(a).upper() == str(a))
))
# Command: .l
# pop a
# push is_lower(a)
elif current_command == ".l":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(is_alpha_value(a) and str(a).lower() == str(a))
))
# Command: ê
# pop a
# push sorted_uniquified(a)
elif current_command == "\u00ea":
a = self.pop_stack(default="")
a = apply_safe(uniquify, a)
sorted_a = apply_safe(sorted, a)
self.stack.append(sorted_a if type(a) is list else ''.join(sorted_a))
# Command: Ç
# pop a
# push ASCII value of a
elif current_command == "\u00c7":
a = self.pop_stack("")
self.stack.append(single_vectorized_evaluation(
a, lambda a: ord(a) if len(a) == 1 else [ord(c) for c in a], str
))
# Command: ç
# pop a
# push char a
elif current_command == "\u00e7":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: chr(a), int
))
# Command: ˜
# pop a
# push deep flattened a
elif current_command == "\u02dc":
a = self.pop_stack(default=[])
self.stack.append(deep_flatten(a))
# Command: ô
# pop a,b
# push a split in pieces of b
elif current_command == "\u00f4":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is list and type(a) is not list:
a, b = b, a
self.stack.append(apply_safe(even_divide, a, b))
# Command: í
# pop a
# push [reversed Q for Q in a] (short for €R)
elif current_command == "\u00ed":
a = self.pop_stack(default="")
temp_list = []
for Q in a:
if type(Q) is not list:
Q = str(Q)
temp_list.append(Q[::-1])
self.stack.append(temp_list)
# Command: ÷
# pop a,b
# push a // b
elif current_command == "\u00f7":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: a // b, ast_int_eval
))
# Command: ±
# pop a
# push bitwise not a
elif current_command == "\u00b1":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: ~a, int
))
# Command: Æ
# pop a
# push reduced substraction a
elif current_command == "\u00c6":
a = self.pop_stack(default=[])
if type(a) is not list:
a = self.stack + [a]
self.stack.clear()
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc - val, ast_int_eval
))
# Command: Ù
# pop a
# push uniquified a
elif current_command == "\u00d9":
a = self.pop_stack(default="")
self.stack.append(uniquify(a))
# Command: ø
# pop (a,)b
# push zipped b if b is list, else zipped a with b
elif current_command == "\u00f8":
b = self.pop_stack(default="")
if type(b) is not list:
b = str(b)
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
result = [list(x) for x in zip(*[a, b])]
if type(a) is str:
self.stack.append([''.join(x) for x in result])
else:
self.stack.append(result)
else:
if max([type(x) is list for x in b]):
result = [list(x) for x in zip(*b)]
self.stack.append(result)
elif max([len(x) for x in b]) > 1:
result = [list(x) for x in zip(*b)]
self.stack.append([''.join(x) for x in result])
else:
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
result = [list(x) for x in zip(*[a, b])]
self.stack.append(result)
# Command: Ú
# pop a
# push reverse uniquified a
elif current_command == "\u00da":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
self.stack.append(uniquify(a[::-1])[::-1])
# Command: Û
# pop a,b
# push a with leading b's trimmed off
elif current_command == "\u00db":
b = self.pop_stack(default="")
a = self.pop_stack(default=[])
if type(a) is not list:
a = str(a)
if type(b) is not list:
b = [b]
for i in b:
while a and str(a[0]) == str(i):
a = a[1:]
self.stack.append(a)
# Command: ¥
# pop a
# push delta's a
elif current_command == "\u00a5":
a = self.pop_stack(default=[])
self.stack.append(deltaify(a))
# Command: ©
# store a in register_c without popping
elif current_command == "\u00a9":
a = self.pop_stack()
self.stack.append(a)
self.environment.c = a
# Command: ®
# push the last item from register_c
elif current_command == "\u00ae":
self.stack.append(self.environment.c)
# Command: Ü
# pop a,b
# push a with trailing b's trimmed off
elif current_command == "\u00dc":
b = self.pop_stack(default="")
a = self.pop_stack(default=[])
if type(a) is not list:
a = str(a)
if type(b) is not list:
b = [b]
for i in b:
while a and str(a[-1]) == str(i):
a = a[:-1]
self.stack.append(a)
# Command: È
# pop a
# push a % 2 == 0 (is even)
elif current_command == "\u00c8":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a % 2 == 0), ast_int_eval
))
# Command: ¿
# pop (a,)b
# push gcd(b) if b is list, else push gcd([b, a])
elif current_command == "\u00bf":
a = self.pop_stack(default=[])
if type(a) is not list:
a = [a, self.pop_stack(default="")]
self.stack.append(vectorized_aggregator(
a, lambda acc, val: fractions.gcd(acc, val) if acc and val else 0, ast_int_eval
))
# Command: É
# pop a
# push a % 2 == 1 (is uneven)
elif current_command == "\u00c9":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(a % 2 == 1), ast_int_eval
))
# Command: ü
# pairwise command (vectorizes if the first element is a list)
elif current_command == "\u00fc":
a = self.pop_stack()
self.pointer_position += 1
for_each_command = self.commands[self.pointer_position]
if for_each_command == ".":
self.pointer_position += 1
for_each_command += self.commands[self.pointer_position]
if type(a) is not list:
a = str(a)
zipper = a if type(a[0]) is list else zip(*[a, a[1:]])
results = []
for element in zipper:
results.append(self.value(for_each_command, stack=[element[0], element[1]]))
self.stack.append(results)
# Command: ¡
# pop a,b
# push a.split(b)
elif current_command == "\u00a1":
b = self.pop_stack(default="")
a = self.pop_stack(default=[])
self.stack.append(
single_vectorized_evaluation(a, lambda a: multi_split(a, b), str)
)
# Command: γ
# pop a
# push a split into chunks of consecutive equal elements
elif current_command == "\u03b3":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
is_list = type(a) is list
temp_list = []
inner_str = ""
inner_list = []
i = 0
while i < len(a):
if is_list:
inner_list.append(a[i])
else:
inner_str += a[i]
if i == len(a) - 1 or a[i] != a[i + 1]:
if is_list:
temp_list.append(inner_list)
else:
temp_list.append(inner_str)
inner_list = []
inner_str = ""
i += 1
self.stack.append(temp_list)
# Command: ï
# pop a
# push int(a)
elif current_command == "\u00ef":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: int(ast_int_eval(a))))
# Command: Þ
# pop a
# push float(a)
elif current_command == "\u00de":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: floatify(a)))
# Command: Ñ
# pop a
# push divisors(a)
elif current_command == "\u00d1":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: divisors_of_number(a)
))
# Command: Î
# push 0 and input
elif current_command == "\u00ce":
self.stack.append(0)
self.stack.append(get_input())
# Command: §
# pop a
# push str(a)
elif current_command == "\u00a7":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: str(a)))
# Command: ¦
# pop a
# push a[1:]
elif current_command == "\u00a6":
a = self.pop_stack(default="")
if type(a) is not list:
self.stack.append(str(a)[1:])
else:
self.stack.append(a[1:])
# Command: š
# pop a
# push switch_cased(a)
elif current_command == "\u0161":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: a.swapcase(), str))
# Command: £
# pop a,b
# push a[0:b]
elif current_command == "\u00a3":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
try:
b = [int(x) for x in b] if type(b) is list else int(b)
except:
a, b = b, a
try:
if type(a) is not list:
a = str(a)
if type(b) is list:
temp_list = []
temp_element = a
for Q in b:
temp_list.append(temp_element[0:int(Q)])
temp_element = temp_element[int(Q):]
self.stack.append(temp_list)
else:
b = int(b)
self.stack.append(a[0:b])
except:
self.stack.append(a)
# Command: K
# pop a,b
# push a with no b's
elif current_command == "K":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is not list:
b = [b]
for i in b:
if type(a) is not list:
a = str(a).replace(str(i), "")
else:
a = vectorized_filter(a, lambda a: a != str(i), str)
self.stack.append(a)
# Command: ß
# extract smallest element of list
elif current_command == "\u00df":
a = self.pop_stack("")
has_skipped = False
result = []
if type(a) is not list:
a = str(a)
if not a:
self.stack.append(a)
self.stack.append('')
return
for element in a:
if str(element) == str(min(a)) and not has_skipped:
has_skipped = True
else:
result.append(element)
if type(a) is not list:
self.stack.append(''.join([str(x) for x in result]))
else:
self.stack.append(result)
self.stack.append(min(a))
# Command: à
# extract greatest element of list
elif current_command == "\u00e0":
a = self.pop_stack(default="")
has_skipped = False
result = []
if type(a) is not list:
a = str(a)
if not a:
self.stack.append(a)
self.stack.append('')
return
for element in a:
if str(element) == str(max(a)) and not has_skipped:
has_skipped = True
else:
result.append(element)
if type(a) is not list:
self.stack.append(''.join([str(x) for x in result]))
else:
self.stack.append(result)
self.stack.append(max(a))
# Command: ¤
# get a
# push tail(a)
elif current_command == "\u00a4":
if self.stack:
a = self.stack[-1]
else:
a = self.pop_stack(default="")
self.stack.append(a)
if type(a) is not list:
self.stack.append(str(a)[-1])
else:
self.stack.append(a[-1])
# Command: ‹
# pop a,b
# push a < b
elif current_command == "\u2039":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(str(a) < str(b) if type(a) is str or type(b) is str else a < b), ast_int_eval
))
# Command: ʒ
# pop a
# filter a when the result of code == 1: usage ʒCODE}
elif current_command == "\u0292":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
filtered = []
for element in a:
value = self.value(statement, stack=[element])
if value == 1 or ast_int_eval(value) == "1":
filtered.append(element)
if type(a) is str:
filtered = ''.join(str(x) for x in filtered)
self.stack.append(filtered)
self.pointer_position += len(statement) + 1
# Command: Σ
# pop a
# sort a by the result of code: usage ΣCODE}
elif current_command == "\u03A3":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
results = []
for element in a:
value = self.value(statement, stack=[element])
results.append([value if value else float('inf'), element])
results = sorted(results, key=lambda element: element[0])
if type(a) is list:
self.stack.append([x[1] for x in results])
else:
self.stack.append(''.join([x[1] for x in results]))
self.pointer_position += len(statement) + 1
# Command: ›
# pop a,b
# push a > b
elif current_command == "\u203A":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(str(a) > str(b) if type(a) is str or type(b) is str else a > b), ast_int_eval
))
# Command: À
# pop a
# push a rotated 1 left
elif current_command == "\u00c0":
a = self.pop_stack(default="")
if type(a) is list:
if len(a):
b = a[0]
a = a[1:]
a.append(b)
else:
a = str(a)
if len(a):
a += a[0]
a = a[1:]
self.stack.append(a)
# Command: Á
# pop a
# push a rotated 1 right
elif current_command == "\u00c1":
a = self.pop_stack(default="")
if type(a) is list:
if len(a):
b = []
b.append(a[-1])
for Q in a:
b.append(Q)
a = b[:-1]
else:
a = str(a)
if len(a):
a = a[-1] + a
a = a[:-1]
self.stack.append(a)
# Command: Ø
# pop a
# push ath prime (zero-indexed)
elif current_command == "\u00d8":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: get_nth_prime(a), int
))
# Command: .Ø
# pop a
# push 0-index number of the greatest prime <= a
elif current_command == ".\u00d8":
a = self.pop_stack(default="")
self.stack.append(
single_vectorized_evaluation(
a, lambda a: get_index_of_prime(a), int
)
)
# Command: ¢
# pop a,b
# push a.count(b)
elif current_command == "\u00a2":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
if type(a) is list:
a = [str(x) for x in deep_flatten(a)]
self.stack.append(single_vectorized_evaluation(
b, lambda b: a.count(b), str
))
# Command: ¨
# pop a
# push a[0:-1]
elif current_command == "\u00a8":
a = self.pop_stack(default="")
if type(a) is not list:
self.stack.append(str(a)[0:-1])
else:
self.stack.append(a[0:-1])
# Command: æ
# pop a
# push powerset(a)
elif current_command == "\u00e6":
a = self.pop_stack(default="")
b = None
if type(a) is not list:
b = list(str(a))
else:
b = [str(x) if type(x) is int else x for x in a]
s = list(b)
s = list(itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
))
list_of_lists = [list(elem) for elem in s]
if type(a) is not list:
self.stack.append([''.join(x) for x in list_of_lists])
else:
self.stack.append(list_of_lists)
# Command: œ
# pop a
# push permutations(a)
elif current_command == "\u0153":
a = self.pop_stack(default="")
if type(a) is not list:
b = list(str(a))
else:
b = a
b = list(itertools.permutations(list(b)))
list_of_lists = [list(elem) for elem in b]
if type(a) is not list:
self.stack.append([''.join(x) for x in list_of_lists])
else:
self.stack.append(list_of_lists)
# Command: Œ
# pop a
# push substrings(a)
elif current_command == "\u0152":
a = self.pop_stack(default="")
self.stack.append(apply_safe(get_all_substrings, a))
# Command: Ð
# pop a
# triplicate (push a, push a, push a)
elif current_command == "\u00d0":
a = self.pop_stack(default="")
self.stack.append(a)
self.stack.append(a)
self.stack.append(a)
# Command: Ä
# pop a
# push abs(a)
elif current_command == "\u00c4":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: abs(a), ast_int_eval
))
# Command: Ý
# pop a
# push [0..a]
elif current_command == "\u00dd":
a = self.pop_stack(default="")
incr = lambda i: -1 if i < 0 else 1
self.stack.append(single_vectorized_evaluation(
a, lambda a: list(range(0, a + incr(a), incr(a))), int
))
# Command: û
# pop a
# push palindromized(a)
elif current_command == "\u00fb":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
self.stack.append(a + a[::-1][1:])
# Command: ¶
# push a newline character
elif current_command == "\u00b6":
self.stack.append("\n")
# Command: ý
# pop (a,)b
# push b.join(a) if a is list, else b.join(self.stack)
elif current_command == "\u00fd":
b = str(self.pop_stack(default=""))
if self.stack and type(self.stack[-1]) is list:
a = self.pop_stack()
else:
a = list(self.stack)
self.stack.clear()
if a:
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + b + val, str
))
else:
self.stack.append("")
# Command: Ÿ
# pop (a,)b
# push [a, ..., b] if b not a list, otherwise push [b[0],...,b[1],...,b[n]]
elif current_command == "\u0178":
b = self.pop_stack(default="")
if type(b) is not list:
b = [self.pop_stack(default=""), b]
milestones = []
for i in b:
try:
milestones.append(int(ast_int_eval(i)))
except:
pass
ranges = []
for i in range(len(milestones) - 1):
x = milestones[i]
y = milestones[i + 1]
if x == y:
ranges.append(x)
else:
incr = lambda: 1 if x < y else -1
# do not repeat a milestone
if i > 0:
x += incr()
ranges += list(range(x, y + incr(), incr()))
self.stack.append(ranges)
# Command: Š
# pop a,b,c
# push c,a,b
elif current_command == "\u0160":
c = self.pop_stack()
# defaulting to prevent losing a value if there is
# at least one on the self.stack, but no input is available
b = self.pop_stack(default="")
a = self.pop_stack(default="")
# a b c -> c a b
self.stack.append(c)
self.stack.append(a)
self.stack.append(b)
# Command: Ö
# pop a,b
# push a % b == 0
elif current_command == "\u00d6":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: int(a % b == 0), ast_int_eval
))
# Command: ¬
# get a
# push head(a)
elif current_command == "\u00ac":
a = self.pop_stack(default="")
self.stack.append(a)
if type(a) is not list:
self.stack.append(str(a)[0])
else:
self.stack.append(self.stack[-1][0])
# Command: Ž
# break/end if self.stack is empty
elif current_command == "\u017d":
if not self.stack:
return True
# Command: »
# pop (a)
# if list, join list by newlines, else join self.stack by newlines
elif current_command == "\u00bb":
if self.stack and type(self.stack[-1]) is list:
a = self.pop_stack()
else:
a = list(self.stack)
self.stack.clear()
result = []
for Q in a:
if type(Q) is list:
result.append(' '.join([str(x) for x in Q]))
else:
result.append(str(Q))
self.stack.append("\n".join(result))
# Command: «
# pop a,b
# push concatenated(a, b)
elif current_command == "\u00ab":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(a) is list and type(b) is list:
self.stack.append(a + b)
elif type(a) is list:
self.stack.append(single_vectorized_evaluation(a, lambda a: a + str(b), str))
elif type(b) is list:
self.stack.append(single_vectorized_evaluation(b, lambda b: str(a) + b, str))
else:
self.stack.append(str(a) + str(b))
# Command: ì
# pop a,b
# push a.prepend(b)
elif current_command == "\u00ec":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(a) is list and type(b) is list:
self.stack.append(b + a)
elif type(a) is list:
self.stack.append(single_vectorized_evaluation(a, lambda a: str(b) + a, str))
elif type(b) is list:
self.stack.append(single_vectorized_evaluation(b, lambda b: b + str(a), str))
else:
self.stack.append(str(b) + str(a))
# Command: ×
# pop a,b
# push a x b (strings)
elif current_command == "\u00d7":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(string_multiplication(a, b))
# Command: .×
# Command: и
# pop a,b
# push a n-repeat (list-multiply) b
elif current_command == ".\u00d7" or current_command == "\u0438":
b = self.pop_stack(default=0)
a = self.pop_stack(default=[])
self.stack.append(list_multiply(a, b))
# Command: ò
# pop a
# push a rounded to nearest integer
elif current_command == "\u00f2":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, round, ast_int_eval))
# Command: .ò
# pop a,b
# round a with b digits precision (bankers rounding)
elif current_command == ".\u00f2":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: round(a, b), ast_int_eval
))
# Command: ð
# push a space character
elif current_command == "\u00f0":
self.stack.append(" ")
# Command: ƶ
# pop a
# push lifted a, each element is multiplied by its index (1-indexed)
elif current_command == "\u01b6":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
result = []
for n in range(len(a)):
result.append(a[n] * (n + 1))
self.stack.append(result)
else:
result = []
for n in range(len(a)):
result.append(vectorized_evaluation(
a[n], n + 1, lambda a, b: a * b, ast_int_eval
))
self.stack.append(result)
# Command: .M
# pop a
# push most frequent in a
elif current_command == ".M":
a = self.pop_stack(default="")
if type(a) is list:
buf = []
for item in a:
if type(item) is list:
# stringify sublists to make them hashable
buf.append(str(item))
else:
buf.append(apply_safe(ast_int_eval, item))
a = buf
else:
a = list(str(a))
result = []
if a:
uniques = list(set(a))
counts = list(map(lambda i: a.count(i), uniques))
max_count = max(counts)
for Q in range(len(counts)):
if counts[Q] == max_count:
result.append(uniques[Q])
# unstringified sublists if necessary
self.stack.append([apply_safe(ast_int_eval, x) for x in result])
# Command: .m
# pop a
# push least frequent in a
elif current_command == ".m":
a = self.pop_stack(default="")
if type(a) is list:
buf = []
for item in a:
if type(item) is list:
# stringify sublists to make them hashable
buf.append(str(item))
else:
buf.append(apply_safe(ast_int_eval, item))
a = buf
else:
a = list(str(a))
result = []
if a:
uniques = list(set(a))
counts = list(map(lambda i: a.count(i), uniques))
min_count = min(counts)
for Q in range(len(counts)):
if counts[Q] == min_count:
result.append(uniques[Q])
# unstringified sublists if necessary
self.stack.append([apply_safe(ast_int_eval, x) for x in result])
# Command: Ì
# pop a
# push a + 2
elif current_command == "\u00cc":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: a + 2, ast_int_eval))
# Command: Í
# pop a
# push a - 2
elif current_command == "\u00cd":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: a - 2, ast_int_eval))
# Command: †
# pop a,b
# push a with b filtered to the front
elif current_command == "\u2020":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(filtered_to_the_front(a, b))
# Command: ¼
# counter_variable++
elif current_command == "\u00bc":
global_env.counter_variable += 1
# Command: .¼
# pop a
# push tan(a)
elif current_command == ".\u00bc":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: math.tan(ast_int_eval(a))
))
# Command: ½
# pop a
# if 1, then counter_variable++
elif current_command == "\u00bd":
if str(ast_int_eval(self.pop_stack(default=""))) == "1":
global_env.counter_variable += 1
# Command: .½
# pop a
# push sin(a)
elif current_command == ".\u00bd":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: math.sin(ast_int_eval(a))
))
# Command: .x
# pop a,b
# push the element in a closest to b
elif current_command == ".x":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(apply_safe(closest_to, a, b))
# Command: .¥
# pop a
# push undelta a
elif current_command == ".\u00a5":
a = self.pop_stack(default="")
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + [acc[-1] + ast_int_eval(val)], start=[0]
))
# Command: ¾
# push counter_variable
elif current_command == "\u00be":
self.stack.append(global_env.counter_variable)
# Command: .¾
# pop a
# push cos(a)
elif current_command == ".\u00be":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: math.cos(ast_int_eval(a))
))
# Command: ó
# pop a
# push round_down(a)
elif current_command == "\u00f3":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, math.floor, ast_int_eval
))
# Command: ?
# pop a
# push a no newline
elif current_command == "?":
a = self.pop_stack(default="")
print(a, end="")
global_env.has_printed = True
# Command: .o
# pop a,b
# push overlab(b) (deprecated)
elif current_command == ".o":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
a = " " + str(a)
b = str(b)
temp_string = ""
stop = False
for Q in b:
if stop:
temp_string += Q
else:
if a[0] == Q:
temp_string += Q
a = a[1:]
else:
while a[0] != Q:
a = a[1:]
if a == "":
stop = True
temp_string += Q
break
if a[0] == Q or Q == "\u00f0":
temp_string += Q
break
else:
temp_string += " "
self.stack.append(temp_string)
# Command: .O
# pop a,b
# push connected_overlap(b) (deprecated)
elif current_command == ".O":
b = str(self.pop_stack(default=""))
a = str(self.pop_stack(default="")) + b
temp_string = b
while True:
is_substring = True
for Q in range(0, len(b)):
if a[Q] != b[Q] and a[Q] != "\u00f0":
is_substring = False
break
if is_substring:
break
else:
temp_string = " " + temp_string
a = a[1:]
self.stack.append(temp_string)
# Command: .N
# pop a
# push hashed(a)
elif current_command == ".N":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(a, lambda a: get_hash(a)))
# Command: ‡
# pop a,b,c
# push a.transliterate(b -> c)
elif current_command == "\u2021":
c = self.pop_stack(default="")
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is not list:
b = str(b)
if type(c) is not list:
c = str(c)
self.stack.append(single_vectorized_evaluation(
a, lambda a: transliterate(a, b, c), str
))
# Command: Ï
# pop a,b
# push the elements from a at which the same index at b is 1
elif current_command == "\u00cf":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is not list:
b = str(b)
if type(a) is not list:
a = str(a)
filtered = []
for i in range(min(len(a), len(b))):
if apply_safe(ast_int_eval, b[i]) == 1:
filtered.append(a[i])
if type(a) is str:
filtered = ''.join(filtered)
self.stack.append(filtered)
# Command: ñ
# pop a,b,c
# push a + b merged with c as merge character
elif current_command == "\u00f1":
c = str(self.pop_stack(default=""))
b = str(self.pop_stack(default=""))[::-1]
a = str(self.pop_stack(default=""))[::-1]
if len(b) > len(a):
a = str(a).ljust(len(b), c)
if len(a) > len(b):
b = str(b).ljust(len(a), c)
temp_string = ""
for Q in range(len(a)):
if a[Q] == c and b[Q] != c:
temp_string += b[Q]
else:
temp_string += a[Q]
self.stack.append(temp_string[::-1])
# Command: .ï
# pop a
# push is_int(a)
elif current_command == ".\u00ef":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: int(type(a) is float and int(a) == a), float
))
# Command: .¿
# pop (a,)b
# push lcm(b) if b is list, else push lcm(b, a)
elif current_command == ".\u00bf":
a = self.pop_stack(default=[])
if type(a) is not list:
a = [a, self.pop_stack(default="")]
self.stack.append(vectorized_aggregator(
a, lambda acc, val: abs(acc) * abs(val) // fractions.gcd(acc, val) if acc and val else 0, ast_int_eval
))
# Command: .ø
# pop a,b
# surround a with b
elif current_command == ".\u00f8":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is not list:
b = [b]
if type(a) is not list:
self.stack.append(vectorized_aggregator(
b, lambda acc, val: val + acc + val, str, str(a)
))
else:
self.stack.append(vectorized_aggregator(
b, lambda acc, val: [val] + acc + [val], str, a
))
# Command: –
# pop a
# if 1, print N (used in loops)
elif current_command == "\u2013":
a = self.pop_stack(default=0)
if apply_safe(ast_int_eval, a) == 1:
print(self.environment.range_variable)
global_env.has_printed = True
# Command: —
# pop a
# if 1, print y (used in loops)
elif current_command == "\u2014":
a = self.pop_stack(default=0)
if apply_safe(ast_int_eval, a) == 1:
print(self.environment.string_variable)
global_env.has_printed = True
# Command: .e
# pop a
# run with experimental python evaluation (does not work in safe mode)
elif current_command == ".e":
if self.safe_mode:
print("exec self.commands are ignored in safe mode")
else:
temp_string = str(self.pop_stack(default=""))
if len(temp_string):
temp_string = temp_string.replace("#", "self.stack")
temp_string = temp_string.replace(";", "\n")
exec(temp_string)
# Command: .E
# pop a
# run with experimental batch evaluation (does not work in safe mode)
elif current_command == ".E":
if self.safe_mode:
print("exec self.commands are ignored in safe mode")
else:
a = str(self.pop_stack(default=""))
if len(a):
f = tempfile.NamedTemporaryFile()
f.write(bytes(str(a), "cp1252"))
os.system(f.name)
f.close()
# Command: .V
# pop a
# run with experimental batch evaluation (does not work in safe mode)
elif current_command == ".V":
a = self.pop_stack(default="")
self.stack, status = self.__run_subprogram(str(a))
if status == Status.BREAK:
return Status.OK
elif status == Status.EXIT:
return Status.EXIT
# Command: .R
# Command: Ω
# pop a
# push random_pick(a)
elif current_command == ".R" or current_command == "\u03A9":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if len(a):
self.stack.append(random.choice(a))
else:
self.stack.append(a)
# Command: .r
# pop a
# push random_shuffle(a)
elif current_command == ".r":
a = self.pop_stack(default="")
b = a if type(a) is list else list(str(a))
random.shuffle(b)
self.stack.append(b if type(a) is list else ''.join(b))
# Command: ¹
# push the first item from the input history
elif current_command == "\u00b9":
if len(recent_inputs) > 0:
self.stack.append(recent_inputs[0])
else:
while len(recent_inputs) <= 0:
get_input()
self.stack.append(recent_inputs[0])
# Command: ²
# push the second item from the input history
elif current_command == "\u00b2":
if len(recent_inputs) > 1:
self.stack.append(recent_inputs[1])
else:
while len(recent_inputs) <= 1:
get_input()
self.stack.append(recent_inputs[1])
# Command: ³
# push the third item from the input history
elif current_command == "\u00b3":
if len(recent_inputs) > 2:
self.stack.append(recent_inputs[2])
else:
while len(recent_inputs) <= 2:
get_input()
self.stack.append(recent_inputs[2])
# Command: •
# start/end a 1-9 char compressed string
elif current_command == "\u2022":
temp_string = ""
temp_string_2 = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
except:
break
if current_command == "\u2022":
break
else:
temp_string += current_command
self.pointer_position += 1
self.pointer_position += 1
self.stack.append(apply_safe(convert_from_base, temp_string, 255))
# Command: .•
# decompress a base 255 alphabet based string
elif current_command == ".\u2022":
temp_string = ""
temp_string_2 = ""
temp_position = self.pointer_position
while temp_position < len(self.commands) - 1:
temp_position += 1
try:
current_command = self.commands[temp_position]
except:
break
if current_command == "\u2022":
break
else:
temp_string += current_command
self.pointer_position += 1
self.pointer_position += 1
processed_value = convert_from_base(temp_string, 255)
processed_value = convert_to_base_arbitrary(
processed_value, 27)
self.stack.append(''.join(
[chr(x + 96) if x > 0 else " " for x in processed_value]
))
# Command: β
# pop a,b
# push a converted from base b (arbitrary)
elif current_command == "\u03B2":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
b, lambda b: convert_from_base_arbitrary(a, b), int
))
# Command: .L
# pop a,b
# push levenshtein(a, b)
elif current_command == ".L":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(a, b, minimum_edit_distance, str))
# Command: â
# pop a,b
# push cartesian product
elif current_command == "\u00e2":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if type(b) is not list:
b = str(b)
c = list(itertools.product(a, b))
if type(a) is list or type(b) is list:
self.stack.append([list(Q) for Q in c])
else:
self.stack.append([''.join(str(y) for y in x) for x in c])
# Command: ã
# pop a,b
# push a choose b (cartesian product repeat)
elif current_command == "\u00e3":
b = self.pop_stack(default="")
a = None
if type(b) is list:
a, b = b[:], 2
else:
a = self.pop_stack(default="")
try:
b = int(ast_int_eval(b))
except:
self.stack.append(a)
else:
if type(a) is not list:
a = str(a)
c = list(itertools.product(a, repeat=b))
if type(a) is list:
self.stack.append([list(Q) for Q in c])
else:
self.stack.append([''.join(str(y) for y in x) for x in c])
# Command: è
# pop a,b
# push a[b]
elif current_command == "\u00e8":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if not len(a):
self.stack.append(a)
return
if type(b) is list:
temp_list = []
for Q in b:
try:
Q = int(Q)
except:
pass
else:
temp_list.append(a[Q % len(a)])
self.stack.append(temp_list)
else:
try:
b = int(b)
except:
self.stack.append(a)
else:
self.stack.append(a[b % len(a)])
# Command: .p
# Command: η
# pop a
# push prefixes(a)
elif current_command == ".p" or current_command == "\u03B7":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
temp_list = []
for Q in range(1, len(a) + 1):
temp_list.append(a[0:Q])
self.stack.append(temp_list)
# Command: .s
# pop a
# push suffixes(a)
elif current_command == ".s":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
temp_list = []
for Q in range(1, len(a) + 1):
temp_list.append(a[-Q:])
self.stack.append(temp_list)
# Command: .À
# rotate self.stack 1 left
elif current_command == ".\u00C0":
temp_stack = self.stack[:]
self.stack.clear()
for Q in temp_stack[1:]:
self.stack.append(Q)
self.stack.append(temp_stack[0])
# Command: .Á
# rotate self.stack 1 right
elif current_command == ".\u00C1":
temp_stack = self.stack[:]
self.stack.clear()
self.stack.append(temp_stack[-1])
for Q in temp_stack[:-1]:
self.stack.append(Q)
# Command: Ć
# pop a
# push enclosed a: a + a[0]
elif current_command == "\u0106":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if not len(a):
self.stack.append(a)
else:
self.stack.append(a + [a[0]] if type(a) is list else a + a[0])
# Command: ć
# pop a
# push head_extracted a: a[1:], a[0]
elif current_command == "\u0107":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
if not len(a):
self.stack.append(a)
else:
self.stack.append(a[1:])
self.stack.append(a[0])
# Command: €
# pop a
elif current_command == "\u20AC":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
self.pointer_position += 1
for_each_command = self.commands[self.pointer_position]
# Double chars self.commands: . À
# Pairwise: ü
# or for-each itself: €
if for_each_command in ".\u00c5\u20AC\u00fc":
self.pointer_position += 1
for_each_command += self.commands[self.pointer_position]
result = []
for element in a:
result.append(self.value(for_each_command, stack=[element]))
self.stack.append(result)
# Command: α
# pop a,b
# push absolute difference of a and b
elif current_command == "\u03b1":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: abs(a - b), ast_int_eval
))
# Command: .B
# pop a
# push squarified(a)
elif current_command == ".B":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a).split("\n")
max_length = 0
for Q in a:
if len(str(Q)) > max_length:
max_length = len(str(Q))
temp_list = []
for Q in a:
temp_list.append(
str(Q) + ((max_length - len(str(Q))) * " ")
)
self.stack.append(temp_list)
# Command: .«
# foldr
# folds a dyadic command between each element in a list from right to left
#
# Command: .»
# foldl
# folds a dyadic command between each element in a list from right to left
# with opposite right/left operands
elif current_command == ".\u00AB" or current_command == ".\u00BB":
self.pointer_position += 1
fold_command = self.commands[self.pointer_position]
if self.stack and type(self.stack[-1]) is list and len(self.stack[-1]) > 1:
a = self.pop_stack()
if current_command == ".\u00AB":
result = a[-1]
for element in a[:-1][::-1]:
result = self.value(fold_command, stack=[element, result])
else:
result = a[0]
for element in a[1:]:
result = self.value(fold_command, stack=[result, element])
self.stack.append(result)
# Command: .h
# pop a,b
# bijectively convert a from base 10 to base b
elif current_command == ".h":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
try:
b = int(ast_int_eval(b))
# throw if b = 0
1 / b
except:
self.stack.append(a)
else:
self.stack.append(single_vectorized_evaluation(
a, lambda a: bijective_base_conversion(a, b)
))
# Command: .H
# pop a,b
# bijectively convert a from base b to base 10
elif current_command == ".H":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
try:
b = int(ast_int_eval(b))
except:
self.stack.append(a)
else:
self.stack.append(single_vectorized_evaluation(
a, lambda a: bijective_decimal_conversion(a, b)
))
# Command: .D
# pop a,b
# push b copies of a if b is int, else push len(b) copies of a
elif current_command == ".D":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
L = []
try:
L = int(ast_int_eval(b)) or 1
except:
L = len(b) or 1
for Q in range(L):
self.stack.append(a)
# Command: Â
# pop a
# push a, reversed(a)
elif current_command == "\u00c2":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
self.stack.append(a)
self.stack.append(a[::-1])
# Command: õ
# push empty string
elif current_command == "\u00f5":
self.stack.append("")
# Command: Ô
# pop a
# push connected uniquified a
elif current_command == "\u00d4":
a = self.pop_stack(default="")
self.stack.append(apply_safe(uniquify, a, True))
# Command: ‚
# pop a,b
# push [a, b]
elif current_command == "\u201A":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append([a, b])
# Command: .€
# pop a
# self.debug printer (default encoding, fallbacks on cp1252)
elif current_command == ".\u20AC":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
for Q in a:
try:
print(Q, end="")
except:
print(str(Q).encode("cp1252"), end="")
print()
global_env.has_printed = True
# Command: Õ
# pop a
# push euler_totient(a)
elif current_command == "\u00d5":
a = self.pop_stack(default="")
if type(a) is list:
self.stack.append(single_vectorized_evaluation(
a, euler_totient, int
))
else:
self.stack.append(apply_safe(euler_totient, a))
# Command: .ä
# pop a
# self.debug printer (cp1252)
elif current_command == ".\u00e4":
a = self.pop_stack(default="")
print(str(a).encode("cp1252"))
global_env.has_printed = True
# Command: .c
# pop a
# push centralized(a) focused to the left
elif current_command == ".c":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a).split("\n")
max_length = 0
for Q in a:
if len(str(Q)) > max_length:
max_length = len(str(Q))
temp_list = []
for Q in a:
Q = str(Q)
space_length = (max_length - len(Q)) // 2
if space_length > 0:
temp_list.append(space_length * " " + Q)
else:
temp_list.append(Q)
self.stack.append('\n'.join(temp_list))
# Command: .C
# pop a
# push centralized(a) focused to the right
elif current_command == ".C":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a).split("\n")
max_length = 0
for Q in a:
if len(str(Q)) > max_length:
max_length = len(str(Q))
temp_list = []
for Q in a:
Q = str(Q)
space_length = (max_length - len(Q) + 1) // 2
if space_length > 0:
temp_list.append(space_length * " " + Q)
else:
temp_list.append(Q)
self.stack.append('\n'.join(temp_list))
# Command: Ã
# pop a, b
# push a.keep(b)
elif current_command == "\u00c3":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is list:
b = [str(x) for x in deep_flatten(b)]
else:
b = str(b)
self.stack.append(vectorized_filter(a, lambda a: a in b, str))
# Command: ˆ
# pop a
# add to global array
elif current_command == "\u02c6":
a = self.pop_stack()
global_env.global_array.append(a)
# Command: .ˆ
# pop a
# insert a into global array and after quit, print array[input_1]
elif current_command == "\u02c6":
a = self.pop_stack()
global_env.global_array.append(a)
# Command: .^
# pop a
# insert a into global array with immediate sorting and
# after quit, print array[input_1]
elif current_command == ".^":
a = self.pop_stack()
global_env.global_array.append(a)
temp_list = sorted(global_env.global_array)
global_env.global_array.clear()
for x in temp_list:
global_env.global_array.append(x)
# Command: ¯
# push global array
elif current_command == "\u00af":
self.stack.append(global_env.global_array)
# Command: ´
# clear global array
elif current_command == "\u00b4":
global_env.global_array.clear()
# Command: ‰
# pop a, b
# push a divmod b
elif current_command == "\u2030":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: list(divmod(a, b)), ast_int_eval
))
# Command: ·
# pop a
# push 2 * a
elif current_command == "\u00b7":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, lambda a: 2 * a, ast_int_eval
))
# Command: .n
# pop a,b
# push log_b(a)
elif current_command == ".n":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(vectorized_evaluation(
a, b, lambda a, b: math.log(a, b), ast_int_eval
))
# Command: .w
# pop a
# push a.readall()
# (internet access, doesn't work in safe mode)
# (returns 0 on error)
elif current_command == ".w":
if self.safe_mode:
print("internet access is prohibited in safe mode")
else:
try:
a = self.pop_stack(default="")
import urllib.request as req
f = req.urlopen("http://" + str(a))
self.stack.append(f.read())
except:
self.stack.append(0)
# Command: .W
# pop a
# wait a millisecodns
elif current_command == ".W":
a = ast_int_eval(self.pop_stack())
time.sleep(a / 1000)
# Command: ä
# pop a,b
# push a sliced into b pieces
elif current_command == "\u00e4":
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(apply_safe(chunk_divide, a, b))
# Command: Ƶ
# convert the next char from base 255 to base 10 and add 101
elif current_command == "\u01B5":
self.pointer_position += 1
current_command = self.commands[self.pointer_position]
self.stack.append(convert_from_base(current_command, 255) + 101)
# Command: δ
# pop a, b
# get the next command, push double vectorized command
elif current_command == "\u03B4":
self.pointer_position += 1
current_program = self.commands[self.pointer_position]
while current_program[-1] in ".\u20AC":
self.pointer_position += 1
current_program += self.commands[self.pointer_position]
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(b) is not list:
b = str(b)
if type(a) is not list:
a = str(a)
result = []
for outer_element in a:
inner_result = []
for inner_element in b:
value = self.value(current_program, stack=[outer_element, inner_element])
inner_result.append(value)
result.append(inner_result)
self.stack.append(result)
# Command: .g
# push length of self.stack
elif current_command == ".g":
self.stack.append(len(self.stack))
# Command: .ǝ
# pop a
# print a to STDERR
elif current_command == ".\u01DD":
a = self.pop_stack(default="")
print(a, file=stderr)
# Command: .0
# throw a division by zero error
elif current_command == ".0":
global_env.zero_division = True
# Command: .£
# pop a, b
# push a[-b:]
elif current_command == ".\u00a3":
b = self.pop_stack(default=0)
a = self.pop_stack(default="")
try:
b = [int(x) for x in b] if type(b) is list else int(b)
except:
a, b = b, a
try:
if type(a) is not list:
a = str(a)
if type(b) is list:
temp_list = []
temp_element = a
for Q in b:
temp_list.append(temp_element[-int(Q):])
temp_element = temp_element[:-int(Q)]
self.stack.append(temp_list)
else:
b = int(b)
self.stack.append(a[-b:])
except:
self.stack.append(a)
# Command: .œ
# pop a
# push partitions(a)
elif current_command == ".œ":
a = self.pop_stack(default=[])
result = partitions(a)
self.stack.append(result)
# Command: .æ
# Permute by function
elif current_command == ".æ":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a)
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
results = []
for element in a:
sub_result = [element]
while True:
value = self.value(statement, stack=[element])
if value in sub_result:
break
sub_result.append(value)
results.append(sub_result)
self.pointer_position += len(statement) + 1
result = list_permutations(results)
self.stack.append(result)
#
# Extended self.commands
#
elif current_command in ExtendedMath.commands_list:
arity = ExtendedMath.commands_list.get(current_command).arity
arguments = [self.pop_stack(default="") for _ in range(0, arity)]
self.stack.append(ExtendedMath.invoke_command(current_command, *arguments))
# Command: î
# pop a
# push round_up(a)
elif current_command == "\u00ee":
a = self.pop_stack(default="")
self.stack.append(single_vectorized_evaluation(
a, math.ceil, ast_int_eval
))
# Command: ǝ
# pop a,b,c
# insert b into a on location c
elif current_command == "\u01dd":
c = self.pop_stack(default="")
b = self.pop_stack(default="")
a = self.pop_stack(default="")
if type(c) is list:
for Q in c:
a = apply_safe(insert, a, b, Q)
self.stack.append(a)
else:
self.stack.append(apply_safe(insert, a, b, c))
#
# CONSTANTS
#
elif current_command in Constants.commands_list:
arity = Constants.commands_list.get(current_command).arity
arguments = [self.pop_stack(default="") for _ in range(0, arity)]
self.stack.append(Constants.invoke_command(current_command, *arguments))
# Command: .:
# pop a,b,c
# push a.replace(b, c)
elif current_command == ".:":
c = self.pop_stack(default="")
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(apply_safe(single_replace, a, b, c))
# Command: .;
# pop a,b,c
# push a.replace_first(b, c)
elif current_command == ".;":
c = self.pop_stack(default="")
b = self.pop_stack(default="")
a = self.pop_stack(default="")
self.stack.append(apply_safe(first_replace, a, b, c))
# Command: .A
# pop a
# push acronymified a
elif current_command == ".A":
a = self.pop_stack(default="")
if type(a) is not list:
a = str(a).split(" ")
self.stack.append(vectorized_aggregator(
a, lambda acc, val: acc + [val[0]], str, []
))
# Command: Δ
# pop a
# Repeat CODE until a doesn't change
elif current_command == "\u0394":
a = self.pop_stack()
statement, remaining = get_statements(self.commands[self.pointer_position + 1:])
curr_value, prev_value = a, None
range_variable = -1
while curr_value != prev_value:
range_variable += 1
curr_value, prev_value = self.value(statement, stack=[curr_value]), curr_value
self.stack.append(curr_value)
self.pointer_position += len(statement) + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--debug', help="Debug mode", action="store_true")
parser.add_argument('-s', '--safe', help="Safe mode", action="store_true")
parser.add_argument(
'-c', '--osabie', help="Encode from osabie", action="store_true")
parser.add_argument(
'-t', '--time', help="Time the program", action="store_true")
parser.add_argument(
'-e', '--eval', help="Evaluate as 05AB1E code", action="store",
type=str, nargs="?", default=argparse.SUPPRESS)
parser.add_argument(
"program_path", help="Program path", action="store", type=str,
nargs="?")
args = parser.parse_args()
filename = args.program_path
DEBUG = args.debug
SAFE_MODE = args.safe
ENCODE_OSABIE = args.osabie
TIME_IT = args.time
EVAL = None
if not filename:
try:
EVAL = args.eval
except:
parser.error("program_path is required if not using -e flag")
else:
# If EVAL is still None and there was no error
# then it was called without arguments
if not EVAL:
parser.error("no code passed to -e")
if EVAL:
code = EVAL
# Do not load from file if just eval'ing
elif ENCODE_OSABIE:
code = open(filename, "rb").read()
code = osabie_to_utf8(code)
else:
code = open(filename, "r", encoding="utf-8").read()
if code == "":
code = zero_byte_code
if TIME_IT:
start_time = time.time()
Osabie(code, debug=DEBUG, safe_mode=SAFE_MODE).run()
end_time = time.time()
print()
print("Elapsed: " + str(end_time - start_time) + " seconds")
else:
Osabie(code, debug=DEBUG, safe_mode=SAFE_MODE).run()
|
{
"content_hash": "e9dc3fe3dae7a421833cffa94c5e7e07",
"timestamp": "",
"source": "github",
"line_count": 4176,
"max_line_length": 120,
"avg_line_length": 32.713840996168585,
"alnum_prop": 0.45421006785591417,
"repo_name": "Emigna/05AB1E",
"id": "ec32690e4741d3466cb7dda685f040404cbfacb1",
"size": "136829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osabie.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "423753"
}
],
"symlink_target": ""
}
|
if 1:
from google.appengine.ext import testbed
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.app_identity import app_identity_keybased_stub
import local_config
email_address = local_config.SERVICE_EMAIL
private_key_path = local_config.SERVICE_KEY_FILE
stub = app_identity_keybased_stub.KeyBasedAppIdentityServiceStub(email_address=email_address,
private_key_path=private_key_path)
testbed = testbed.Testbed()
APP_IDENTITY_SERVICE_NAME = 'app_identity_service'
testbed.activate()
#testbed._register_stub(testbed.APP_IDENTITY_SERVICE_NAME, stub)
testbed._register_stub(APP_IDENTITY_SERVICE_NAME, stub)
testbed.init_datastore_v3_stub()
testbed.init_memcache_stub()
testbed.init_urlfetch_stub()
from google.appengine.ext import testbed
testbed = testbed.Testbed()
testbed.activate()
testbed.init_datastore_v3_stub()
testbed.init_memcache_stub()
from bqutil import *
|
{
"content_hash": "20829074fc69dbfce7275e27f28ecbdb",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 103,
"avg_line_length": 40.69230769230769,
"alnum_prop": 0.6947069943289225,
"repo_name": "mitodl/xanalytics",
"id": "0c9925223ef20d6c61f0424babd47f1a73ef7919",
"size": "1078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_bqutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93098"
},
{
"name": "HTML",
"bytes": "148637"
},
{
"name": "JavaScript",
"bytes": "4636968"
},
{
"name": "Python",
"bytes": "2675424"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.properties import RelationshipProperty
from werkzeug.exceptions import BadRequest
import iso8601
import sqlalchemy
from ggrc import db
from ggrc.login import get_current_user_id
from ggrc.models.reflection import AttributeInfo
from ggrc.models.types import JsonType
from ggrc.utils import url_for
from ggrc.utils import view_url_for
import ggrc.builder
import ggrc.models
import ggrc.services
"""JSON resource state representation handler for gGRC models."""
def get_json_builder(obj):
"""Instantiate or retrieve a JSON representation builder for the given
object.
"""
if type(obj) is type:
cls = obj
else:
cls = obj.__class__
# Lookup the builder instance in the builder module
builder = getattr(ggrc.builder, cls.__name__, None)
if not builder:
# Create the builder and cache it in the builder module
builder = Builder(cls)
setattr(ggrc.builder, cls.__name__, builder)
return builder
def publish_base_properties(obj):
ret = {}
self_url = url_for(obj)
if self_url:
ret['selfLink'] = self_url
view_url = view_url_for(obj)
if view_url:
ret['viewLink'] = view_url
return ret
def publish(obj, inclusions=(), inclusion_filter=None):
"""Translate ``obj`` into a valid JSON value. Objects with properties are
translated into a ``dict`` object representing a JSON object while simple
values are returned unchanged or specially formatted if needed.
"""
if inclusion_filter is None:
def inclusion_filter(x):
return True
publisher = get_json_builder(obj)
if publisher and getattr(publisher, '_publish_attrs', False):
ret = publish_base_properties(obj)
ret.update(publisher.publish_contribution(
obj, inclusions, inclusion_filter))
return ret
# Otherwise, just return the value itself by default
return obj
def publish_stub(obj, inclusions=(), inclusion_filter=None):
publisher = get_json_builder(obj)
if publisher:
ret = {}
self_url = url_for(obj)
if self_url:
ret['href'] = self_url
ret['type'] = obj.__class__.__name__
ret['context_id'] = obj.context_id
if hasattr(publisher, '_stub_attrs') and publisher._stub_attrs:
ret.update(publisher.publish_stubs(obj, inclusions, inclusion_filter))
return ret
# Otherwise, just return the value itself by default
return obj
def update(obj, json_obj):
"""Translate the state represented by ``json_obj`` into update actions
performed upon the model object ``obj``. After performing the update ``obj``
and ``json_obj`` should be equivalent representations of the model state.
"""
updater = get_json_builder(obj)
if updater:
updater.update(obj, json_obj)
# FIXME what to do if no updater??
# Nothing, perhaps log, assume omitted by design
def create(obj, json_obj):
"""Translate the state represented by ``json_obj`` into update actions
performed upon the new model object ``obj``. After performing the update
``obj`` and ``json_obj`` should be equivalent representations of the model
state.
"""
creator = get_json_builder(obj)
if creator:
creator.create(obj, json_obj)
class UpdateAttrHandler(object):
"""Performs the translation of a JSON state representation into update
actions performed on a model object instance.
"""
@classmethod
def do_update_attr(cls, obj, json_obj, attr):
"""Perform the update to ``obj`` required to make the attribute attr
equivalent in ``obj`` and ``json_obj``.
"""
class_attr = getattr(obj.__class__, attr)
if (hasattr(attr, '__call__')):
# The attribute has been decorated with a callable, grab the name and
# invoke the callable to get the value
attr_name = attr.attr_name
value = attr(cls, obj, json_obj)
elif not hasattr(cls, class_attr.__class__.__name__):
# The attribute is a function on the obj like custom_attributes in
# CustomAttributable mixin
attr_name = attr
value = class_attr(obj, json_obj)
else:
# Lookup the method to use to perform the update. Use reflection to
# key off of the type of the attribute and invoke the method of the
# same name.
attr_name = attr
method = getattr(cls, class_attr.__class__.__name__)
value = method(obj, json_obj, attr_name, class_attr)
if isinstance(value, (set, list)) \
and (
not hasattr(class_attr, 'property') or not
hasattr(class_attr.property, 'columns') or not isinstance(
class_attr.property.columns[0].type,
JsonType)
):
# SQLAlchemy instrumentation botches up if we replace entire collections
# It works if we update them with changes
new_set = set(value)
old_set = set(getattr(obj, attr_name))
coll_class_attr = getattr(obj.__class__, attr_name)
coll_attr = getattr(obj, attr_name)
# Join table objects require special handling so that we can be sure to
# set the modified_by_id correctly
if isinstance(coll_class_attr, AssociationProxy):
current_user_id = get_current_user_id()
proxied_attr = coll_class_attr.local_attr
proxied_property = coll_class_attr.remote_attr
proxied_set_map = dict([(getattr(i, proxied_property.key), i)
for i in getattr(obj, proxied_attr.key)])
coll_attr = getattr(obj, proxied_attr.key)
for item in new_set - old_set:
new_item = coll_class_attr.creator(item)
new_item.modified_by_id = current_user_id
coll_attr.append(new_item)
for item in old_set - new_set:
coll_attr.remove(proxied_set_map[item])
else:
for item in new_set - old_set:
coll_attr.append(item)
for item in old_set - new_set:
coll_attr.remove(item)
else:
setattr(obj, attr_name, value)
@classmethod
def InstrumentedAttribute(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for an ``InstrumentedAttribute``"""
method = getattr(cls, class_attr.property.__class__.__name__)
return method(obj, json_obj, attr_name, class_attr)
@classmethod
def ColumnProperty(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for a ``ColumnProperty``"""
method = getattr(
cls,
class_attr.property.expression.type.__class__.__name__,
cls.default_column_handler)
return method(obj, json_obj, attr_name, class_attr)
@classmethod
def default_column_handler(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for a simple value column"""
return json_obj.get(attr_name)
@classmethod
def DateTime(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for a ``Datetime`` column."""
value = json_obj.get(attr_name)
try:
if value:
d = iso8601.parse_date(value)
d = d.replace(tzinfo=None)
else:
d = None
return d
except iso8601.ParseError as e:
raise BadRequest(
'Malformed DateTime {0} for parameter {1}. '
'Error message was: {2}'.format(value, attr_name, e.message)
)
@classmethod
def Date(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for a ``Date`` column."""
value = json_obj.get(attr_name)
try:
return datetime.strptime(value, "%Y-%m-%d") if value else None
except ValueError as e:
try:
return datetime.strptime(value, "%m/%d/%Y") if value else None
except ValueError as e:
raise BadRequest(
'Malformed Date {0} for parameter {1}. '
'Error message was: {2}'.format(value, attr_name, e.message)
)
@classmethod
def query_for(cls, rel_class, json_obj, attr_name, uselist):
"""Resolve the model object instance referred to by the JSON value."""
if uselist:
# The value is a collection of links, resolve the collection of objects
value = json_obj.get(attr_name)
rel_ids = [o[u'id'] for o in value] if value else []
if rel_ids:
return db.session.query(rel_class).filter(
rel_class.id.in_(rel_ids)).all()
else:
return []
else:
rel_obj = json_obj.get(attr_name)
if rel_obj:
try:
# FIXME: Should this be .one() instead of .first() ?
return db.session.query(rel_class).filter(
rel_class.id == rel_obj[u'id']).first()
except(TypeError):
raise TypeError(''.join(['Failed to convert attribute ', attr_name]))
return None
@classmethod
def RelationshipProperty(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for a ``RelationshipProperty``."""
rel_class = class_attr.property.mapper.class_
return cls.query_for(
rel_class, json_obj, attr_name, class_attr.property.uselist)
@classmethod
def AssociationProxy(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for an ``AssociationProxy``."""
if getattr(class_attr, "publish_raw", False):
return json_obj.get(attr_name, {})
rel_class = class_attr.remote_attr.property.mapper.class_
return cls.query_for(rel_class, json_obj, attr_name, True)
@classmethod
def property(cls, obj, json_obj, attr_name, class_attr):
"""Translate the JSON value for an object method decorated as a
``property``.
"""
# FIXME need a way to decide this. Require link? Use URNs?
# reflective approaches won't work as this is used for polymorphic
# properties
# rel_class = None
# return cls.query_for(rel_class, json_obj, attr_name, True)
attr_value = json_obj.get(attr_name, None)
if attr_value:
import ggrc.models
rel_class_name = json_obj[attr_name]['type']
rel_class = ggrc.models.get_model(rel_class_name)
return cls.query_for(rel_class, json_obj, attr_name, False)
return None
@classmethod
def simple_property(cls, obj, json_obj, attr_name, class_attr):
return json_obj.get(attr_name)
"""
Builder strategy:
* For each non-present attribute, return an "AttributeBuilder" instance
which describes the objects needed to complete the representation.
* Maintain a set of requested (type, condition, stub_only?) tuples
* stub_only? tuples can be requested en-masse via UNION queries
* non-stub_only? tuples can override stub_only? tuples with identical
conditions
* When all available object representations have been built:
Query types:
(type, id) -> explicit link
(type, { keyX: valX, ... }) -> an actual query
note, keyX row/object values must be included in response to distinguish
results from aggregated queries on the same type
(type, [id, { keyX: valX, ... }, id2, { keyY: valY }, ...]) ->
an already-aggregated list of conditions
Query combination:
(type, id) -> aggregate into "id IN (...)"
(type, {}) -> aggregate into "...OR keyX = valX OR keyY = valY"
Query construction:
pass
Result storing:
{ type: { id: <result>, (keyX, ...): { (valX, ...): [<result>, ...] } } }
Result dispatch:
singles = { (type, id): [<Builder>, ...] }
"""
def build_type_query(type, result_spec):
model = ggrc.models.get_model(type)
mapper = model._sa_class_manager.mapper
columns = []
columns_indexes = {}
if len(list(mapper.self_and_descendants)) == 1:
type_column = sqlalchemy.literal(mapper.class_.__name__)
else:
# Handle polymorphic types with CASE
type_column = sqlalchemy.case(
value=mapper.polymorphic_on,
whens={
val: mapper.class_.__name__
for val, mapper in mapper.polymorphic_map.items()
})
columns.append(type_column)
columns_indexes['type'] = 0
columns.append(model.id)
columns_indexes['id'] = 1
columns.append(mapper.c.context_id)
columns_indexes['context_id'] = 2
columns.append(mapper.c.updated_at)
columns_indexes['updated_at'] = 3
conditions = {}
for keys, vals in result_spec.items():
for key in keys:
if key not in columns_indexes:
columns_indexes[key] = len(columns)
columns.append(mapper.c[key])
conditions.setdefault(keys, []).extend(vals.keys())
where_clauses = []
for keys, vals in conditions.items():
if len(keys) == 1:
# If the key is singular, use `IN (...)`
where_clauses.append(
columns[columns_indexes[keys[0]]].in_([v[0] for v in vals]))
else:
# If multiple keys, build `OR` of multiple `AND` clauses
clauses = []
cols = [columns[columns_indexes[k]] for k in keys]
for val in vals:
# Now build OR clause with (key, val) pairs
clause = []
for i, v in enumerate(val):
clause.append(cols[i] == val[i])
clauses.append(sqlalchemy.and_(*clause))
where_clauses.append(sqlalchemy.or_(*clauses))
where_clause = sqlalchemy.or_(*where_clauses)
query = db.session.query(*columns).filter(where_clause)
return columns_indexes, query
def build_stub_union_query(queries):
results = {}
for (type, conditions) in queries:
if isinstance(conditions, (int, long, str, unicode)):
# Assume `id` query
keys, vals = ('id',), (conditions,)
results.setdefault(type, {}).setdefault(keys, {}).setdefault(vals, [])
elif isinstance(conditions, dict):
keys, vals = zip(*sorted(conditions.items()))
results.setdefault(type, {}).setdefault(keys, {}).setdefault(vals, [])
else:
# FIXME: Handle aggregated conditions recursively
pass
column_count = 0
type_column_indexes = {}
type_queries = {}
for (type, result_spec) in results.items():
columns_indexes, query = build_type_query(type, result_spec)
type_column_indexes[type] = columns_indexes
type_queries[type] = query
if len(columns_indexes) > column_count:
column_count = len(columns_indexes)
for (type, query) in type_queries.items():
for _ in range(column_count - len(type_column_indexes[type])):
query = query.add_column(sqlalchemy.literal(None))
type_queries[type] = query
queries_for_union = type_queries.values()
if len(queries_for_union) == 0:
query = None
elif len(queries_for_union) == 1:
query = queries_for_union[0]
else:
query = db.session.query(
sqlalchemy.sql.expression.union(
*[q for q in type_queries.values()]).alias('union_query'))
return results, type_column_indexes, query
def _render_stub_from_match(match, type_columns):
type = match[type_columns['type']]
id = match[type_columns['id']]
return {
'type': type,
'id': id,
'context_id': match[type_columns['context_id']],
'href': url_for(type, id=id),
}
class LazyStubRepresentation(object):
def __init__(self, type, conditions):
self.type = type
if isinstance(conditions, (int, long)):
conditions = {'id': conditions}
self.conditions = conditions
self.condition_key, self.condition_val = zip(*sorted(conditions.items()))
def get_matches(self, results):
return results\
.get(self.type, {})\
.get(self.condition_key, {})\
.get(self.condition_val, [])
def render(self, results, type_columns):
matches = self.get_matches(results)
assert len(matches) <= 1, (results, self.type, self.condition_key,
self.condition_val)
if len(matches) == 1:
return _render_stub_from_match(matches[0], type_columns[self.type])
else:
return None
def walk_representation(obj): # noqa
if isinstance(obj, dict):
for k, v in obj.items():
if isinstance(v, dict):
for x in walk_representation(v):
yield x
elif isinstance(v, (list, tuple)):
for x in walk_representation(v):
yield x
else:
yield v, k, obj
elif isinstance(obj, (list, tuple)):
for i, v in enumerate(obj):
if isinstance(v, dict):
for x in walk_representation(v):
yield x
elif isinstance(v, (list, tuple)):
for x in walk_representation(v):
yield x
else:
yield v, i, obj
def gather_queries(resource):
queries = []
for val, key, obj in walk_representation(resource):
if isinstance(val, LazyStubRepresentation):
queries.append((val.type, val.conditions))
return queries
def reify_representation(resource, results, type_columns):
for val, key, obj in walk_representation(resource):
if isinstance(val, LazyStubRepresentation):
obj[key] = val.render(results, type_columns)
return resource
def publish_representation(resource):
queries = gather_queries(resource)
if len(queries) == 0:
return resource
else:
results, type_columns, query = build_stub_union_query(queries)
rows = query.all()
for row in rows:
type = row[0]
for columns, matches in results[type].items():
vals = tuple(row[type_columns[type][c]] for c in columns)
if vals in matches:
matches[vals].append(row)
return reify_representation(resource, results, type_columns)
class Builder(AttributeInfo):
"""JSON Dictionary builder for ggrc.models.* objects and their mixins."""
def generate_link_object_for_foreign_key(self, id, type, context_id=None):
"""Generate a link object for this object reference."""
return {'id': id, 'type': type, 'href': url_for(type, id=id),
'context_id': context_id}
def generate_link_object_for(
self, obj, inclusions, include, inclusion_filter):
"""Generate a link object for this object. If there are property paths
to be included specified in the ``inclusions`` parameter, those properties
will be added to the object representation. If the ``include`` parameter
is ``True`` the entire object will be represented in the result.
"""
if include and ((not inclusion_filter) or inclusion_filter(obj)):
return publish(obj, inclusions, inclusion_filter)
result = {
'id': obj.id, 'type': type(obj).__name__, 'href': url_for(obj),
'context_id': obj.context_id}
for path in inclusions:
if type(path) is not str and type(path) is not unicode:
attr_name, remaining_path = path[0], path[1:]
else:
attr_name, remaining_path = path, ()
result[attr_name] = self.publish_attr(
obj, attr_name, remaining_path, include, inclusion_filter)
return result
def publish_link_collection(
self, join_objects, inclusions, include, inclusion_filter):
"""The ``attr_name`` attribute is a collection of object references;
translate the collection of object references into a collection of link
objects for the JSON dictionary representation.
"""
# FIXME: Remove the "if o is not None" when we can guarantee referential
# integrity
return [
self.generate_link_object_for(o, inclusions, include, inclusion_filter)
for o in join_objects if o is not None]
def publish_link(
self, obj, attr_name, inclusions, include, inclusion_filter):
"""The ``attr_name`` attribute is an object reference; translate the object
reference into a link object for the JSON dictionary representation.
"""
attr_value = getattr(obj, attr_name)
if attr_value:
return self.generate_link_object_for(
attr_value, inclusions, include, inclusion_filter)
else:
return None
def publish_association_proxy(
self, obj, attr_name, class_attr, inclusions, include,
inclusion_filter):
# `associationproxy` uses another table as a join table, and context
# filtering must be done on the join table, or information leakage will
# result.
join_objects = []
for join_object in getattr(obj, class_attr.local_attr.key):
if (not inclusion_filter) or inclusion_filter(join_object):
join_objects.append(join_object)
if include:
target_objects = [
getattr(join_object, class_attr.remote_attr.key)
for join_object in join_objects]
return self.publish_link_collection(
target_objects, inclusions, include, inclusion_filter)
else:
if isinstance(class_attr.remote_attr, property):
target_name = class_attr.value_attr + '_id'
target_type = class_attr.value_attr + '_type'
return [
LazyStubRepresentation(
getattr(o, target_type), getattr(o, target_name))
for o in join_objects]
else:
target_mapper = class_attr.remote_attr.property.mapper
# Handle inheritance -- we must check the object itself for the type
if len(list(target_mapper.self_and_descendants)) > 1:
target_attr = class_attr.remote_attr.property.key
return [
self.generate_link_object_for(
getattr(o, target_attr),
inclusions,
include,
inclusion_filter)
for o in join_objects]
else:
target_name = list(
class_attr.remote_attr.property.local_columns)[0].key
target_type = class_attr.remote_attr.property.mapper.class_.__name__
return [
LazyStubRepresentation(
target_type, getattr(o, target_name))
for o in join_objects]
def publish_relationship(
self, obj, attr_name, class_attr, inclusions, include,
inclusion_filter):
uselist = class_attr.property.uselist
if uselist:
join_objects = getattr(obj, attr_name)
return self.publish_link_collection(
join_objects, inclusions, include, inclusion_filter)
elif include or class_attr.property.backref:
return self.publish_link(
obj, attr_name, inclusions, include, inclusion_filter)
else:
if class_attr.property.mapper.class_.__mapper__.polymorphic_on \
is not None:
target = getattr(obj, attr_name)
target_type = target.__class__.__name__
else:
target_type = class_attr.property.mapper.class_.__name__
target_name = list(class_attr.property.local_columns)[0].key
attr_value = getattr(obj, target_name)
if attr_value is not None:
return LazyStubRepresentation(target_type, attr_value)
else:
return None
def publish_attr(
self, obj, attr_name, inclusions, include, inclusion_filter):
class_attr = getattr(obj.__class__, attr_name)
if isinstance(class_attr, AssociationProxy):
if getattr(class_attr, 'publish_raw', False):
published_attr = getattr(obj, attr_name)
if hasattr(published_attr, "copy"):
return published_attr.copy()
else:
return published_attr
else:
return self.publish_association_proxy(
obj, attr_name, class_attr, inclusions, include, inclusion_filter)
elif isinstance(class_attr, InstrumentedAttribute) and \
isinstance(class_attr.property, RelationshipProperty):
return self.publish_relationship(
obj, attr_name, class_attr, inclusions, include, inclusion_filter)
elif class_attr.__class__.__name__ == 'property':
if not inclusions or include:
if (getattr(obj, '{0}_id'.format(attr_name))):
return LazyStubRepresentation(
getattr(obj, '{0}_type'.format(attr_name)),
getattr(obj, '{0}_id'.format(attr_name)))
else:
return self.publish_link(
obj, attr_name, inclusions, include, inclusion_filter)
else:
return getattr(obj, attr_name)
def _publish_attrs_for(
self, obj, attrs, json_obj, inclusions=[], inclusion_filter=None):
for attr in attrs:
if hasattr(attr, '__call__'):
attr_name = attr.attr_name
else:
attr_name = attr
local_inclusion = ()
for inclusion in inclusions:
if inclusion[0] == attr_name:
local_inclusion = inclusion
break
json_obj[attr_name] = self.publish_attr(
obj, attr_name, local_inclusion[1:], len(local_inclusion) > 0,
inclusion_filter)
def publish_attrs(self, obj, json_obj, extra_inclusions, inclusion_filter):
"""Translate the state represented by ``obj`` into the JSON dictionary
``json_obj``.
The ``inclusions`` parameter can specify a tree of property paths to be
inlined into the representation. Leaf attributes will be inlined completely
if they are links to other objects. The inclusions data structure is a
list where the first segment of a path is a string and the next segment
is a list of segment paths. Here are some examples:
..
('directives')
[('directives'),('cycles')]
[('directives', ('audit_frequency','organization')),('cycles')]
"""
inclusions = tuple((attr,) for attr in self._include_links)
inclusions = tuple(set(inclusions).union(set(extra_inclusions)))
return self._publish_attrs_for(
obj, self._publish_attrs, json_obj, inclusions, inclusion_filter)
@classmethod
def do_update_attrs(cls, obj, json_obj, attrs):
"""Translate every attribute in ``attrs`` from the JSON dictionary value
to a value or model object instance for references set for the attribute
in ``obj``.
"""
for attr_name in attrs:
UpdateAttrHandler.do_update_attr(obj, json_obj, attr_name)
def update_attrs(self, obj, json_obj):
"""Translate the state representation given by ``json_obj`` into the
model object ``obj``.
"""
self.do_update_attrs(obj, json_obj, self._update_attrs)
def create_attrs(self, obj, json_obj):
"""Translate the state representation given by ``json_obj`` into the new
model object ``obj``.
"""
self.do_update_attrs(obj, json_obj, self._create_attrs)
def publish_contribution(self, obj, inclusions, inclusion_filter):
"""Translate the state represented by ``obj`` into a JSON dictionary"""
json_obj = {}
self.publish_attrs(obj, json_obj, inclusions, inclusion_filter)
return json_obj
def publish_stubs(self, obj, inclusions, inclusion_filter):
"""Translate the state represented by ``obj`` into a JSON dictionary
containing an abbreviated representation.
"""
json_obj = {}
self._publish_attrs_for(
obj, self._stub_attrs, json_obj, inclusions, inclusion_filter)
return json_obj
def update(self, obj, json_obj):
"""Update the state represented by ``obj`` to be equivalent to the state
represented by the JSON dictionary ``json_obj``.
"""
self.update_attrs(obj, json_obj)
def create(self, obj, json_obj):
"""Update the state of the new model object ``obj`` to be equivalent to the
state represented by the JSON dictionary ``json_obj``.
"""
self.create_attrs(obj, json_obj)
|
{
"content_hash": "af2ac9a2e82e20620ece1acae8287532",
"timestamp": "",
"source": "github",
"line_count": 742,
"max_line_length": 79,
"avg_line_length": 36.30053908355795,
"alnum_prop": 0.6477816966771858,
"repo_name": "prasannav7/ggrc-core",
"id": "651997917e2de8ecf76541f355d583eee3eeed53",
"size": "27177",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ggrc/builder/json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167445"
},
{
"name": "Cucumber",
"bytes": "139629"
},
{
"name": "HTML",
"bytes": "1098331"
},
{
"name": "JavaScript",
"bytes": "1447363"
},
{
"name": "Makefile",
"bytes": "6225"
},
{
"name": "Mako",
"bytes": "2559"
},
{
"name": "Python",
"bytes": "2370461"
},
{
"name": "Shell",
"bytes": "33089"
}
],
"symlink_target": ""
}
|
"""
This module contains all the classes that handle the intermediate
representation language. It is basically the REIL language with minor
changes. Below there is an overview of the REIL language and its
instruction format. For full details see "REIL: A platform-independent
intermediate representation of disassembled code for static code
analysis."
All algorithms within the framework are designed to operate on the
intermediate representation. This provides great flexibility when it
comes to implement a cross-platform framework.
Instruction Format
------------------
mnemonic oprnd1, oprnd2, oprnd3
Instructions
------------
Arithmetic : ADD, SUB, MUL, DIV, MOD, BSH
Bitwise : AND, OR, XOR
Data Transfer : LDM, STM, STR
Conditional : BISZ, JCC
Other : UNDEF, UNKN, NOP
"""
# Display operands size in intruction
show_size = True
# TODO: Create module util and move this function there.
def split_address(address):
return address >> 0x08, address & 0xff
class ReilMnemonic(object):
"""Enumeration of IR mnemonics.
"""
# Arithmetic Instructions
ADD = 1
SUB = 2
MUL = 3
DIV = 4
MOD = 5
BSH = 6
# Bitwise Instructions
AND = 7
OR = 8
XOR = 9
# Data Transfer Instructions
LDM = 10
STM = 11
STR = 12
# Conditional Instructions
BISZ = 13
JCC = 14
# Other Instructions
UNKN = 15
UNDEF = 16
NOP = 17
# Added Instructions
RET = 18
# Extensions
SEXT = 19
@staticmethod
def to_string(mnemonic):
"""Return the string representation of the given mnemonic.
"""
strings = {
# Arithmetic Instructions
ReilMnemonic.ADD : "add",
ReilMnemonic.SUB : "sub",
ReilMnemonic.MUL : "mul",
ReilMnemonic.DIV : "div",
ReilMnemonic.MOD : "mod",
ReilMnemonic.BSH : "bsh",
# Bitwise Instructions
ReilMnemonic.AND : "and",
ReilMnemonic.OR : "or",
ReilMnemonic.XOR : "xor",
# Data Transfer Instructions
ReilMnemonic.LDM : "ldm",
ReilMnemonic.STM : "stm",
ReilMnemonic.STR : "str",
# Conditional Instructions
ReilMnemonic.BISZ : "bisz",
ReilMnemonic.JCC : "jcc",
# Other Instructions
ReilMnemonic.UNKN : "unkn" ,
ReilMnemonic.UNDEF : "undef" ,
ReilMnemonic.NOP : "nop" ,
# Added Instructions
ReilMnemonic.RET : "ret",
# Extensions
ReilMnemonic.SEXT : "sext",
}
return strings[mnemonic]
@staticmethod
def from_string(string):
"""Return the mnemonic represented by the given string.
"""
mnemonics = {
# Arithmetic Instructions
"add" : ReilMnemonic.ADD,
"sub" : ReilMnemonic.SUB,
"mul" : ReilMnemonic.MUL,
"div" : ReilMnemonic.DIV,
"mod" : ReilMnemonic.MOD,
"bsh" : ReilMnemonic.BSH,
# Bitwise Instructions
"and" : ReilMnemonic.AND,
"or" : ReilMnemonic.OR,
"xor" : ReilMnemonic.XOR,
# Data Transfer Instructions
"ldm" : ReilMnemonic.LDM,
"stm" : ReilMnemonic.STM,
"str" : ReilMnemonic.STR,
# Conditional Instructions
"bisz" : ReilMnemonic.BISZ,
"jcc" : ReilMnemonic.JCC,
# Other Instructions
"unkn" : ReilMnemonic.UNKN,
"undef" : ReilMnemonic.UNDEF,
"nop" : ReilMnemonic.NOP,
# Added Instructions
"ret" : ReilMnemonic.RET,
# Added Instructions
"sext" : ReilMnemonic.SEXT,
}
return mnemonics[string]
REIL_MNEMONICS = (
# Arithmetic Instructions
ReilMnemonic.ADD,
ReilMnemonic.SUB,
ReilMnemonic.MUL,
ReilMnemonic.DIV,
ReilMnemonic.MOD,
ReilMnemonic.BSH,
# Bitwise Instructions
ReilMnemonic.AND,
ReilMnemonic.OR,
ReilMnemonic.XOR,
# Data Transfer Instructions
ReilMnemonic.LDM,
ReilMnemonic.STM,
ReilMnemonic.STR,
# Conditional Instructions
ReilMnemonic.BISZ,
ReilMnemonic.JCC,
# Other Instructions
ReilMnemonic.UNKN,
ReilMnemonic.UNDEF,
ReilMnemonic.NOP,
# Added Instructions
ReilMnemonic.RET,
# Extensions
ReilMnemonic.SEXT,
)
class ReilInstruction(object):
"""Representation of a REIL instruction.
"""
__slots__ = [
'_mnemonic',
'_operands',
'_comment',
'_address',
]
def __init__(self):
# A REIL mnemonic
self._mnemonic = None
# A list of operand. Exactly 3.
self._operands = [ReilEmptyOperand()] * 3
# Optionally, a comment for the instruction.
self._comment = None
# A REIL address for the instruction.
self._address = None
@property
def mnemonic(self):
"""Get instruction mnemonic.
"""
return self._mnemonic
@property
def mnemonic_str(self):
"""Get instruction mnemonic as string.
"""
return ReilMnemonic.to_string(self._mnemonic)
@mnemonic.setter
def mnemonic(self, value):
"""Set instruction mnemonic.
"""
if value not in REIL_MNEMONICS:
raise Exception("Invalid instruction mnemonic : %s" % str(value))
self._mnemonic = value
@property
def operands(self):
"""Get instruction operands.
"""
return self._operands
@operands.setter
def operands(self, value):
"""Set instruction operands.
"""
if len(value) != 3:
raise Exception("Invalid instruction operands : %s" % str(value))
self._operands = value
@property
def address(self):
"""Get instruction address.
"""
return self._address
@address.setter
def address(self, value):
"""Set instruction address.
"""
self._address = value
@property
def comment(self):
"""Get instruction comment.
"""
return self._comment
@comment.setter
def comment(self, value):
"""Set instruction comment.
"""
self._comment = value
def __str__(self):
def print_oprnd(oprnd):
oprnd_str = str(oprnd)
size_str = str(oprnd.size) if oprnd.size else ""
sizes = {
256 : "DDQWORD",
128 : "DQWORD",
72 : "POINTER",
64 : "QWORD",
40 : "POINTER",
32 : "DWORD",
16 : "WORD",
8 : "BYTE",
1 : "BIT",
"" : "UNK",
}
if isinstance(oprnd, ReilEmptyOperand):
return "%s" % (oprnd_str)
else:
return "%s %s" % (sizes[oprnd.size if oprnd.size else ""], oprnd_str)
mnemonic_str = ReilMnemonic.to_string(self._mnemonic)
if show_size:
operands_str = ", ".join(map(print_oprnd, self._operands))
else:
operands_str = ", ".join(map(str, self._operands))
return "%-5s [%s]" % (mnemonic_str, operands_str)
def __hash__(self):
return hash(str(self))
def __getstate__(self):
state = {}
state['_mnemonic'] = self._mnemonic
state['_operands'] = self._operands
state['_comment'] = self._comment
state['_address'] = self._address
return state
def __setstate__(self, state):
self._mnemonic = state['_mnemonic']
self._operands = state['_operands']
self._comment = state['_comment']
self._address = state['_address']
class ReilOperand(object):
"""Representation of an IR instruction's operand.
"""
__slots__ = [
'_size',
]
def __init__(self, size):
# Size of the operand, in bits.
self._size = size
@property
def size(self):
"""Get operand size.
"""
return self._size
@size.setter
def size(self, value):
"""Set operand size.
"""
self._size = value
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
state['_size'] = self._size
return state
def __setstate__(self, state):
self._size = state['_size']
class ReilImmediateOperand(ReilOperand):
"""Representation of a REIL instruction immediate operand.
"""
__slots__ = [
'_immediate',
]
def __init__(self, immediate, size=None):
super(ReilImmediateOperand, self).__init__(size)
assert type(immediate) in [int, long], "Invalid immediate value type."
self._immediate = immediate
@property
def immediate(self):
"""Get immediate.
"""
if not self._size:
raise Exception("Operand size missing.")
return self._immediate & 2**self._size-1
def __str__(self):
if not self._size:
raise Exception("Operand size missing.")
string = hex(self._immediate & 2**self._size-1)
return string[:-1] if string[-1] == 'L' else string
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size and \
self._immediate == other._immediate
def __getstate__(self):
state = super(ReilImmediateOperand, self).__getstate__()
state['_immediate'] = self._immediate
return state
def __setstate__(self, state):
super(ReilImmediateOperand, self).__setstate__(state)
self._immediate = state['_immediate']
class ReilRegisterOperand(ReilOperand):
"""Representation of a REIL instruction register operand.
"""
__slots__ = [
'_name',
]
def __init__(self, name, size=None):
super(ReilRegisterOperand, self).__init__(size)
# Register name.
self._name = name
@property
def name(self):
"""Get IR register operand name.
"""
return self._name
def __str__(self):
return self._name
def __eq__(self, other):
return type(other) is type(self) and \
self._size == other._size and \
self._name == other._name
def __getstate__(self):
state = super(ReilRegisterOperand, self).__getstate__()
state['_name'] = self._name
return state
def __setstate__(self, state):
super(ReilRegisterOperand, self).__setstate__(state)
self._name = state['_name']
class ReilEmptyOperand(ReilRegisterOperand):
"""Representation of an IR instruction's empty operand.
"""
def __init__(self):
super(ReilEmptyOperand, self).__init__("EMPTY", size=None)
class ReilInstructionBuilder(object):
"""REIL Instruction Builder. Generate REIL instructions, easily.
"""
# Arithmetic Instructions
# ======================================================================== #
def gen_add(self, src1, src2, dst):
"""Return an ADD instruction.
"""
return self.build(ReilMnemonic.ADD, src1, src2, dst)
def gen_sub(self, src1, src2, dst):
"""Return a SUB instruction.
"""
return self.build(ReilMnemonic.SUB, src1, src2, dst)
def gen_mul(self, src1, src2, dst):
"""Return a MUL instruction.
"""
return self.build(ReilMnemonic.MUL, src1, src2, dst)
def gen_div(self, src1, src2, dst):
"""Return a DIV instruction.
"""
return self.build(ReilMnemonic.DIV, src1, src2, dst)
def gen_mod(self, src1, src2, dst):
"""Return a MOD instruction.
"""
return self.build(ReilMnemonic.MOD, src1, src2, dst)
def gen_bsh(self, src1, src2, dst):
"""Return a BSH instruction.
"""
return self.build(ReilMnemonic.BSH, src1, src2, dst)
# Bitwise Instructions
# ======================================================================== #
def gen_and(self, src1, src2, dst):
"""Return an AND instruction.
"""
return self.build(ReilMnemonic.AND, src1, src2, dst)
def gen_or(self, src1, src2, dst):
"""Return an OR instruction.
"""
return self.build(ReilMnemonic.OR, src1, src2, dst)
def gen_xor(self, src1, src2, dst):
"""Return a XOR instruction.
"""
return self.build(ReilMnemonic.XOR, src1, src2, dst)
# Data Transfer Instructions
# ======================================================================== #
def gen_ldm(self, src, dst):
"""Return a LDM instruction.
"""
return self.build(ReilMnemonic.LDM, src, ReilEmptyOperand(), dst)
def gen_stm(self, src, dst):
"""Return a STM instruction.
"""
return self.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst)
def gen_str(self, src, dst):
"""Return a STR instruction.
"""
return self.build(ReilMnemonic.STR, src, ReilEmptyOperand(), dst)
# Conditional Instructions
# ======================================================================== #
def gen_bisz(self, src, dst):
"""Return a BISZ instruction.
"""
return self.build(ReilMnemonic.BISZ, src, ReilEmptyOperand(), dst)
def gen_jcc(self, src, dst):
"""Return a JCC instruction.
"""
return self.build(ReilMnemonic.JCC, src, ReilEmptyOperand(), dst)
# Other Instructions
# ======================================================================== #
def gen_unkn(self):
"""Return an UNKN instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNKN, empty_reg, empty_reg, empty_reg)
def gen_undef(self):
"""Return an UNDEF instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.UNDEF, empty_reg, empty_reg, empty_reg)
def gen_nop(self):
"""Return a NOP instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.NOP, empty_reg, empty_reg, empty_reg)
# Ad hoc Instructions
# ======================================================================== #
def gen_ret(self):
"""Return a RET instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.RET, empty_reg, empty_reg, empty_reg)
# Extensions
# ======================================================================== #
def gen_sext(self, src, dst):
"""Return a SEXT instruction.
"""
empty_reg = ReilEmptyOperand()
return self.build(ReilMnemonic.SEXT, src, empty_reg, dst)
# Auxiliary functions
# ======================================================================== #
def build(self, mnemonic, oprnd1, oprnd2, oprnd3):
"""Return the specified instruction.
"""
ins = ReilInstruction()
ins.mnemonic = mnemonic
ins.operands = [oprnd1, oprnd2, oprnd3]
return ins
class DualInstruction(object):
"""Represents an assembler instruction paired with its IR
representation.
"""
__slots__ = [
'_address',
'_asm_instr',
'_ir_instrs',
]
def __init__(self, address, asm_instr, ir_instrs):
# Address of the assembler instruction.
self._address = address
# Assembler instruction.
self._asm_instr = asm_instr
# REIL translation of the assembler instruction. Note that one
# assemlber instruction is mapped to more than one REIL
# instruction.
self._ir_instrs = ir_instrs
@property
def address(self):
"""Get instruction address.
"""
return self._address
@property
def asm_instr(self):
"""Get assembly instruction.
"""
return self._asm_instr
@property
def ir_instrs(self):
"""Get IR representation of the assembly instruction.
"""
return self._ir_instrs
def __eq__(self, other):
return self.address == other.address and \
self.asm_instr == other.asm_instr
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
state['_address'] = self._address
state['_asm_instr'] = self._asm_instr
state['_ir_instrs'] = self._ir_instrs
return state
def __setstate__(self, state):
self._address = state['_address']
self._asm_instr = state['_asm_instr']
self._ir_instrs = state['_ir_instrs']
class ReilSequence(object):
"""Reil instruction sequence.
"""
def __init__(self):
self.__sequence = []
self.__next_seq_address = None
def append(self, instruction):
self.__sequence.append(instruction)
def get(self, index):
return self.__sequence[index]
def dump(self):
for instr in self.__sequence:
base_addr, index = split_address(instr.address)
print("{:08x}:{:02x}\t{}".format(base_addr, index, instr))
@property
def address(self):
return self.__sequence[0].address if self.__sequence else None
@property
def next_sequence_address(self):
return self.__next_seq_address
@next_sequence_address.setter
def next_sequence_address(self, address):
self.__next_seq_address = address
def __len__(self):
return len(self.__sequence)
def __iter__(self):
for instr in self.__sequence:
yield instr
class ReilContainerInvalidAddressError(Exception):
pass
class ReilContainer(object):
"""Reil instruction container.
"""
def __init__(self):
self.__container = {}
def add(self, sequence):
base_addr, _ = split_address(sequence.address)
if base_addr in self.__container.keys():
raise Exception("Invalid sequence")
self.__container[base_addr] = sequence
def fetch(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise ReilContainerInvalidAddressError()
return self.__container[base_addr].get(index)
def get_next_address(self, address):
base_addr, index = split_address(address)
if base_addr not in self.__container.keys():
raise Exception("Invalid address.")
addr = address
if index < len(self.__container[base_addr]) - 1:
addr += 1
else:
addr = self.__container[base_addr].next_sequence_address
return addr
def dump(self):
for base_addr in sorted(self.__container.keys()):
self.__container[base_addr].dump()
print("-" * 80)
def __iter__(self):
for addr in sorted(self.__container.keys()):
for instr in self.__container[addr]:
yield instr
|
{
"content_hash": "2d6b67ef6ebef511b4a8aaf977f4b36f",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 85,
"avg_line_length": 25.31640625,
"alnum_prop": 0.542251710127038,
"repo_name": "bj7/barf-project",
"id": "b34e8d2e536dbe267c98c85d0a13aaa43df7de15",
"size": "20789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barf/barf/core/reil/reil.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "6758"
},
{
"name": "Batchfile",
"bytes": "6697"
},
{
"name": "C",
"bytes": "20497"
},
{
"name": "Makefile",
"bytes": "9830"
},
{
"name": "Python",
"bytes": "798435"
},
{
"name": "Shell",
"bytes": "1708"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.