code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from .study import StudyHandler, StudyCreatorHandler, StudyStatusHandler
from .study_samples import (StudySamplesHandler, StudySamplesInfoHandler,
StudySamplesCategoriesHandler)
from .study_person import StudyPersonHandler
from .study_preparation import (StudyPrepCreatorHandler,
StudyPrepArtifactCreatorHandler)
__all__ = ['StudyHandler', 'StudySamplesHandler', 'StudySamplesInfoHandler',
'StudySamplesCategoriesHandler', 'StudyPersonHandler',
'StudyCreatorHandler', 'StudyPrepCreatorHandler',
'StudyPrepArtifactCreatorHandler', 'StudyStatusHandler']
ENDPOINTS = (
(r"/api/v1/study$", StudyCreatorHandler),
(r"/api/v1/study/([0-9]+)$", StudyHandler),
(r"/api/v1/study/([0-9]+)/samples/categories=([a-zA-Z\-0-9\.:,_]*)",
StudySamplesCategoriesHandler),
(r"/api/v1/study/([0-9]+)/samples", StudySamplesHandler),
(r"/api/v1/study/([0-9]+)/samples/info", StudySamplesInfoHandler),
(r"/api/v1/person(.*)", StudyPersonHandler),
(r"/api/v1/study/([0-9]+)/preparation/([0-9]+)/artifact",
StudyPrepArtifactCreatorHandler),
(r"/api/v1/study/([0-9]+)/preparation(.*)", StudyPrepCreatorHandler),
(r"/api/v1/study/([0-9]+)/status$", StudyStatusHandler)
)
|
ElDeveloper/qiita
|
qiita_pet/handlers/rest/__init__.py
|
Python
|
bsd-3-clause
| 1,643
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'HaiFeng'
__mtime__ = '2017/11/13'
"""
import time
from .structs import IntervalType
from .bar import Bar
from .data import Data
from .order import OrderItem
class Strategy(object):
'''策略类 '''
def __init__(self, dict_cfg):
'''初始化'''
'''策略标识'''
self.ID = 0
'''数据序列'''
self.Datas = []
"""起始测试时间
格式:yyyyMMdd[%Y%m%d]
默认:20170101"""
self.BeginDate = '20170101'
'''参数'''
self.Params = []
'''分笔测试'''
self.TickTest = False
if dict_cfg == '':
return
else:
self.ID = dict_cfg['ID']
self.Params = dict_cfg['Params']
self.BeginDate = str(dict_cfg['BeginDate'])
if 'TickTest' in dict_cfg:
self.TickTest = dict_cfg['TickTest']
for data in dict_cfg['Datas']:
newdata = Data(self.__BarUpdate, self.__OnOrder)
newdata.Instrument = data['Instrument']
newdata.Interval = data['Interval']
newdata.IntervalType = IntervalType[data['IntervalType']]
self.Datas.append(newdata)
@property
def Bars(self):
'''k'''
return self.Datas[0].Bars
@property
def Instrument(self):
'''合约'''
return self.Datas[0].Instrument
@property
def Interval(self):
'''周期'''
return self.Datas[0].Interval
@property
def IntervalType(self):
'''周期类型'''
return self.Datas[0].IntervalType
@property
def Orders(self):
'''买卖信号'''
return self.Datas[0].Orders
@property
def IndexDict(self):
'''指标字典
策略使用的指标保存在此字典中
以便管理程序显示和处理'''
return self.Datas[0].IndexDict
@property
def D(self):
'''时间'''
return self.Datas[0].D
@property
def H(self):
'''最高价'''
return self.Datas[0].H
@property
def L(self):
'''最低价'''
return self.Datas[0].L
@property
def O(self):
'''开盘价'''
return self.Datas[0].O
@property
def C(self):
'''收盘价'''
return self.Datas[0].C
@property
def V(self):
'''交易量'''
return self.Datas[0].V
@property
def I(self):
'''持仓量'''
return self.Datas[0].I
@property
def AvgEntryPriceShort(self):
'''开仓均价-空'''
return self.Datas[0].AvgEntryPriceShort
@property
def AvgEntryPriceLong(self):
'''开仓均价-多'''
return self.Datas[0].AvgEntryPriceLong
@property
def PositionLong(self):
'''持仓-多'''
return self.Datas[0].PositionLong
@property
def PositionShort(self):
'''持仓-空'''
return self.Datas[0].PositionShort
@property
def EntryDateLong(self):
'''开仓时间-多'''
return self.Datas[0].EntryDateLong
@property
def EntryPriceLong(self):
'''开仓价格-多'''
return self.Datas[0].EntryPriceLong
@property
def ExitDateShort(self):
'''平仓时间-空'''
return self.Datas[0].ExitDateShort
@property
def ExitPriceShort(self):
'''平仓价-空'''
return self.Datas[0].ExitPriceShort
@property
def EntryDateShort(self):
'''开仓时间-空'''
return self.Datas[0].EntryDateShort
@property
def EntryPriceShort(self):
'''开仓价-空'''
return self.Datas[0].EntryPriceShort
@property
def ExitDateLong(self):
'''平仓时间-多'''
return self.Datas[0].ExitDateLong
@property
def ExitPriceLong(self):
'''平仓价-多'''
return self.Datas[0].ExitPriceLong
@property
def LastEntryDateShort(self):
'''最后开仓时间-空'''
return self.Datas[0].LastEntryDateShort
@property
def LastEntryPriceShort(self):
'''最后开仓价-空'''
return self.Datas[0].LastEntryPriceShort
@property
def LastEntryDateLong(self):
'''最后开仓时间-多'''
return self.Datas[0].LastEntryDateLong
@property
def LastEntryPriceLong(self):
'''最后开仓价-多'''
return self.Datas[0].LastEntryPriceLong
@property
def IndexEntryLong(self):
'''开仓到当前K线数量-多'''
return self.Datas[0].IndexEntryLong
@property
def IndexEntryShort(self):
'''开仓到当前K线数量-空'''
return self.Datas[0].IndexEntryShort
@property
def IndexLastEntryLong(self):
'''最后平仓到当前K线数量-多'''
return self.Datas[0].IndexLastEntryLong
@property
def IndexLastEntryShort(self):
'''最后平仓到当前K线数量-空'''
return self.Datas[0].IndexLastEntryShort
@property
def IndexExitLong(self):
'''平仓到当前K线数量-多'''
return self.Datas[0].IndexExitLong
@property
def IndexExitShort(self):
'''平仓到当前K线数量-空'''
return self.Datas[0].IndexExitShort
@property
def Position(self):
'''持仓净头寸'''
return self.Datas[0].Position
@property
def CurrentBar(self):
'''当前K线序号(0开始)'''
return self.Datas[0].CurrentBar
def Buy(self, price: float, volume: int, remark: str = ''):
"""买开"""
self.Datas[0].Buy(price, volume, remark)
def Sell(self, price, volume, remark):
"""买平"""
self.Datas[0].Sell(price, volume, remark)
def SellShort(self, price, volume, remark):
"""卖开"""
self.Datas[0].SellShort(price, volume, remark)
def BuyToCover(self, price, volume, remark):
"""买平"""
self.Datas[0].BuyToCover(price, volume, remark)
def OnBarUpdate(self, data: Data, bar: Bar):
"""行情触发
历史行情:每分钟触发一次
实时行情:每分钟触发一次"""
pass
def __BarUpdate(self, data: Data, bar: Bar):
"""调用策略的逻辑部分"""
# self.OnBarUpdate(data, bar)
if data.Interval == self.Interval and data.IntervalType == self.IntervalType:
self.OnBarUpdate(data, bar)
def __OnOrder(self, data: Data, order: OrderItem):
"""调用外部接口的reqorder"""
# 同时接口发单可不注释
self._data_order(self, data, order)
# 外层接口调用
def _data_order(self, stra, data: Data, order: OrderItem):
"""继承类中实现此函数,有策略信号产生时调用"""
pass
|
haifengat/hf_at_py
|
hfpy/strategy.py
|
Python
|
apache-2.0
| 6,908
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
if not db.dry_run:
# For permissions to work properly after migrating
moved = ['restrictedareatype', 'restrictedarea', 'restrictedareaedge',
'city', 'cityedge', 'district', 'districtedge']
for model in moved:
existing = orm['contenttypes.contenttype'].objects.filter(app_label='land', model=model)
existing.update(app_label='zoning')
def backwards(self, orm):
pass
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['id']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'zoning.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City', 'db_table': "'l_commune'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '6', 'primary_key': 'True', 'db_column': "'insee'"}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'commune'"})
},
u'zoning.cityedge': {
'Meta': {'object_name': 'CityEdge', 'db_table': "'f_t_commune'", '_ormbases': [u'core.Topology']},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zoning.City']", 'db_column': "'commune'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"})
},
u'zoning.district': {
'Meta': {'ordering': "['name']", 'object_name': 'District', 'db_table': "'l_secteur'"},
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'secteur'"})
},
u'zoning.districtedge': {
'Meta': {'object_name': 'DistrictEdge', 'db_table': "'f_t_secteur'", '_ormbases': [u'core.Topology']},
'district': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zoning.District']", 'db_column': "'secteur'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"})
},
u'zoning.restrictedarea': {
'Meta': {'ordering': "['area_type', 'name']", 'object_name': 'RestrictedArea', 'db_table': "'l_zonage_reglementaire'"},
'area_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zoning.RestrictedAreaType']", 'db_column': "'type'"}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_column': "'zonage'"})
},
u'zoning.restrictedareaedge': {
'Meta': {'object_name': 'RestrictedAreaEdge', 'db_table': "'f_t_zonage'", '_ormbases': [u'core.Topology']},
'restricted_area': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['zoning.RestrictedArea']", 'db_column': "'zone'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"})
},
u'zoning.restrictedareatype': {
'Meta': {'object_name': 'RestrictedAreaType', 'db_table': "'f_b_zonage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'nom'"})
}
}
complete_apps = ['contenttypes', 'zoning']
|
johan--/Geotrek
|
geotrek/zoning/migrations/0002_fix_content_types.py
|
Python
|
bsd-2-clause
| 14,474
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import Queue
import pyglet
from pyglet.app import windows, BaseEventLoop
from pyglet.window.carbon import carbon, types, constants, _oscheck
EventHandlerProcPtr = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p)
EventLoopTimerProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
carbon.CreateEvent.argtypes = (ctypes.c_void_p,
ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_double,
ctypes.c_int, ctypes.c_void_p)
kEventDurationForever = ctypes.c_double(constants.kEventDurationForever)
kEventPriorityStandard = 1
kEventAttributeNone = 0
kEventAttributeUserEvent = 1
kEventParamPostTarget = constants._name('ptrg')
typeEventTargetRef = constants._name('etrg')
POST_EVENT_CLASS = constants._name('PYGL')
POST_EVENT_KIND = 0
# TODO when no windows are open Ctrl+C doesn't kill event loop. Install a sig
# handler?
class MTCarbonEventLoop(BaseEventLoop):
_running = False
def __init__(self):
self._post_event_queue = Queue.Queue()
def post_event(self, dispatcher, event, *args):
self._post_event_queue.put((dispatcher, event, args))
if not self._running:
return
event_class = POST_EVENT_CLASS
event_kind = POST_EVENT_KIND
event_ref = ctypes.c_void_p()
_oscheck(
carbon.CreateEvent(None,
event_class, event_kind, 0,
kEventAttributeUserEvent,
ctypes.byref(event_ref))
)
_oscheck(
carbon.SetEventParameter(event_ref,
kEventParamPostTarget,
typeEventTargetRef,
ctypes.sizeof(ctypes.c_void_p),
ctypes.byref(self._post_event_target))
)
_oscheck(
carbon.PostEventToQueue(self._event_queue, event_ref,
kEventPriorityStandard)
)
carbon.ReleaseEvent(event_ref)
def _setup_post_event_handler(self):
# Handler for PYGL events (interrupt from post_event)
# TODO remove later?
application_target = carbon.GetApplicationEventTarget()
self._post_event_target = ctypes.c_void_p(application_target)
proc = EventHandlerProcPtr(self._post_event_handler)
self._proc = proc
upp = carbon.NewEventHandlerUPP(proc)
event_types = types.EventTypeSpec()
event_types.eventClass = POST_EVENT_CLASS
event_types.eventKind = POST_EVENT_KIND
handler_ref = types.EventHandlerRef()
_oscheck(
carbon.InstallEventHandler(application_target,
upp,
1,
ctypes.byref(event_types),
ctypes.c_void_p(),
ctypes.byref(handler_ref))
)
def _post_event_handler(self, next_handler, ev, data):
while True:
try:
dispatcher, event, args = self._post_event_queue.get(False)
except Queue.Empty:
break
dispatcher.dispatch_event(event, *args)
return constants.noErr
def run(self):
self._setup()
e = ctypes.c_void_p()
event_dispatcher = carbon.GetEventDispatcherTarget()
self._event_loop = event_loop = carbon.GetMainEventLoop()
self._event_queue = carbon.GetMainEventQueue()
# Create timer
self._timer = timer = ctypes.c_void_p()
idle_event_proc = EventLoopTimerProc(self._timer_proc)
carbon.InstallEventLoopTimer(event_loop,
ctypes.c_double(0.1), #?
kEventDurationForever,
idle_event_proc,
None,
ctypes.byref(timer))
# TODO only once
self._setup_post_event_handler()
self._force_idle = False
self._allow_polling = True
self.dispatch_event('on_enter')
# Dispatch events posted before entered run looop
self._running = True #XXX consolidate
self._post_event_handler(None, None, None)
while not self.has_exit:
if self._force_idle:
duration = 0
else:
duration = kEventDurationForever
if carbon.ReceiveNextEvent(0, None, duration,
True, ctypes.byref(e)) == 0:
carbon.SendEventToEventTarget(e, event_dispatcher)
carbon.ReleaseEvent(e)
# Manual idle event
if (carbon.GetNumEventsInQueue(self._event_queue) == 0 or
self._force_idle):
self._force_idle = False
self._timer_proc(timer, None, False)
carbon.RemoveEventLoopTimer(self._timer)
self.dispatch_event('on_exit')
def _stop_polling(self):
carbon.SetEventLoopTimerNextFireTime(self._timer, ctypes.c_double(0.0))
def _enter_blocking(self):
carbon.SetEventLoopTimerNextFireTime(self._timer, ctypes.c_double(0.0))
self._allow_polling = False
def _exit_blocking(self):
self._allow_polling = True
def _timer_proc(self, timer, data, in_events=True):
allow_polling = True
for window in windows:
# Check for live resizing
if window._resizing is not None:
allow_polling = False
old_width, old_height = window._resizing
rect = types.Rect()
carbon.GetWindowBounds(window._window,
constants.kWindowContentRgn,
ctypes.byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
if width != old_width or height != old_height:
window._resizing = width, height
window.switch_to()
window.dispatch_event('on_resize', width, height)
# Check for live dragging
if window._dragging:
allow_polling = False
# Check for deferred recreate
if window._recreate_deferred:
if in_events:
# Break out of ReceiveNextEvent so it can be processed
# in next iteration.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
else:
# Do it now.
window._recreate_immediate()
sleep_time = self.idle()
if sleep_time is None:
sleep_time = constants.kEventDurationForever
elif sleep_time < 0.01 and allow_polling and self._allow_polling:
# Switch event loop to polling.
if in_events:
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = constants.kEventDurationForever
carbon.SetEventLoopTimerNextFireTime(timer, ctypes.c_double(sleep_time))
pyglet.app.EventLoop = MTCarbonEventLoop
|
sangh/LaserShow
|
pyglet-hg/experimental/mt_media/mt_app_carbon.py
|
Python
|
bsd-3-clause
| 9,254
|
"""
@author: Seven Lju
@date: 2016.04.27
"""
constStops = [
'\n', '\t', ' ', '~', '!', '#', '$', '%',
'@', '&', '*', '(', ')', '-', '=', '+', '[',
']', '{', '}', '\\', '|', '\'', '"', ';',
':', ',', '<', '.', '>', '/', '?', '^', '`'
]
class TextWalker(object):
def __init__(self, text, stops=constStops):
self.text = text
self.cursor = 0
self.n = len(text)
self.stops = stops
self.token = ""
self.stop = '\n'
def __iter__(self):
return self
def __next__(self):
if self.cursor >= self.n:
raise StopIteration()
i = self.cursor
while True:
if i >= self.n:
self.stop = '\0'
break
self.stop = self.text[i]
if self.stop in self.stops:
break
i += 1
self.token = self.text[self.cursor:i]
self.cursor = i + 1
return (self.token, self.stop)
def next(self):
return self.__next__()
def skipString(self, pair=None, markEscape='\\'):
_stops = self.stops
if pair is None:
pair = self.stop
self.stops = [pair, markEscape]
i = self.cursor
token = ""
while True:
string = self.__next__()
token += string[0]
stop = string[1]
if stop == pair:
break
elif stop == markEscape:
token += stop
token += self.text[self.cursor]
self.cursor += 1
if stop == '\0':
break
if self.cursor >= self.n or self.cursor <= 0:
token += self.stop
break
self.stops = _stops
self.token = token
return (self.token, self.stop)
def skipLongString(self, start, end=None, markEscape='\\'):
if end is None:
end = start
if self.cursor >= self.n:
return ("", '\0')
if not (start and end):
return ("", self.stop)
if self.stop != start[0]:
return ("", self.stop)
if self.text[self.cursor:self.cursor+len(start)-1] != start[1:]:
return ("", self.stop)
_stops = self.stops
token = ""
end1st = end[0]
endlen = len(end)
self.stops = [end1st, markEscape]
self.cursor += len(start) - 1
while True:
string = self.__next__()
token += string[0]
stop = string[1]
if stop == end1st and self.cursor + endlen - 1 <= self.n:
if self.text[self.cursor:self.cursor+endlen-1] == end[1:]:
self.stop = end
break
else:
token += stop
elif stop == markEscape:
token += stop
token += self.text[self.cursor]
self.cursor += 1
if stop == '\0':
break
if self.cursor >= self.n or self.cursor <= 0:
token += self.stop
break
self.cursor += endlen - 1
self.stops = _stops
self.token = token
return (self.token, self.stop)
def seeForward(self, n):
return self.text[self.cursor:self.cursor + n]
def seeBackward(self, n):
start = self.cursor - n
return self.text[start:self.cursor]
|
dna2github/dna2oldmemory
|
PyLangParser/source/walker.py
|
Python
|
mit
| 2,919
|
import xml.etree.ElementTree as ET
import time
from objects.cube.rubiks_cube import RubiksCube
from objects.xml import xml_step
from objects.xml.abstact_xml import AbstractXml
from objects.xml.xml_cube import XmlCube
from xml_step import Step
from helper import Helper
class XmlObject(AbstractXml):
def __init__(self, author="Secret", size="3", set_date=True):
self._author = author
if set_date:
self._date = time.strftime("%Y-%m-%d %H:%M:%S")
else:
self._date = set_date
self._size = size
self._start_cube = None
self._start_cube_turn =None
self._result_cube = None
self._result_cube_turn =None
self._codes = []
def set_size(self, size):
if size > 2:
self._size = size
def set_start(self, rubikscube, turn_cube=None):
self._start_cube = rubikscube
self._start_cube_turn = turn_cube
def set_result(self, rubikscube, turn_cube=None):
self._result_cube = rubikscube
self._result_cube_turn = turn_cube
def add_code(self, code):
if isinstance(code, Step):
self._codes.append(code)
else:
raise ValueError("Input isn't an instance of Step")
def set_code(self, codes):
self._codes = codes
def from_xml(self, xml):
self._author = xml.find("Author").text
self._date = xml.find("Date").text
self._size = int(xml.find("Size").text)
self._start_cube = XmlCube.from_xml(xml.find("Cubes/Start"), self._size)
self._result_cube = XmlCube.from_xml(xml.find("Cubes/Result"), self._size)
for element in list(xml.find("Steps")):
step = Step()
step.from_xml(element)
self._codes.append(step)
#Todo fetch the steps
def get_xml(self):
if self._start_cube is None or \
self._result_cube is None or \
len(self._codes) is 0:
raise ValueError("Not all needed values have been set")
move = ET.Element('Move')
author = ET.Element('Author')
author.text = self._author
move.append(author)
date = ET.Element('Date')
date.text = self._date
move.append(date)
size = ET.Element('Size')
size.text = str(self._size)
move.append(size)
cubes = ET.Element('Cubes')
cubes.append(XmlCube.get_xml(self._start_cube, "Start"))
if self._start_cube_turn is None:
start_cube_turn = ET.Element("Start-Turn")
start_cube_turn.text = "False"
cubes.append(start_cube_turn)
else:
cubes.append(self._start_cube_turn("Start-Turn"))
cubes.append(XmlCube.get_xml(self._result_cube, "Result"))
if self._result_cube_turn is None:
start_cube_turn = ET.Element("Result-Turn")
start_cube_turn.text = "False"
cubes.append(start_cube_turn)
else:
cubes.append(self._result_cube_turn("Result-Turn"))
move.append(cubes)
steps = ET.Element("Steps")
for i in self._codes:
steps.append(i.get_xml())
move.append(steps)
return move
|
Willempie/Artificial_Intelligence_Cube
|
objects/xml/xml_move.py
|
Python
|
apache-2.0
| 3,237
|
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {"title": "Q", "type": "string"},
"name": "q",
"in": "query",
},
{
"required": False,
"schema": {"title": "Skip", "type": "integer", "default": 0},
"name": "skip",
"in": "query",
},
{
"required": False,
"schema": {"title": "Limit", "type": "integer", "default": 100},
"name": "limit",
"in": "query",
},
],
}
},
"/users/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Users",
"operationId": "read_users_users__get",
"parameters": [
{
"required": False,
"schema": {"title": "Q", "type": "string"},
"name": "q",
"in": "query",
},
{
"required": False,
"schema": {"title": "Skip", "type": "integer", "default": 0},
"name": "skip",
"in": "query",
},
{
"required": False,
"schema": {"title": "Limit", "type": "integer", "default": 100},
"name": "limit",
"in": "query",
},
],
}
},
},
"components": {
"schemas": {
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
@pytest.fixture(name="client")
def get_client():
from docs_src.dependencies.tutorial001_py310 import app
client = TestClient(app)
return client
@needs_py310
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
@needs_py310
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/items", 200, {"q": None, "skip": 0, "limit": 100}),
("/items?q=foo", 200, {"q": "foo", "skip": 0, "limit": 100}),
("/items?q=foo&skip=5", 200, {"q": "foo", "skip": 5, "limit": 100}),
("/items?q=foo&skip=5&limit=30", 200, {"q": "foo", "skip": 5, "limit": 30}),
("/users", 200, {"q": None, "skip": 0, "limit": 100}),
("/openapi.json", 200, openapi_schema),
],
)
def test_get(path, expected_status, expected_response, client: TestClient):
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
|
tiangolo/fastapi
|
tests/test_tutorial/test_dependencies/test_tutorial001_py310.py
|
Python
|
mit
| 5,538
|
from .autocomplete import *
|
vinoth3v/In_addon_thodar
|
thodar/page/__init__.py
|
Python
|
apache-2.0
| 28
|
# (c) Copyright 2014, University of Manchester
#
# This file is part of PyNSim.
#
# PyNSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyNSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyNSim. If not, see <http://www.gnu.org/licenses/>.
from pynsim import Node
class Junction(Node):
type = 'junction'
_properties = dict(
max_capacity=1000,
)
class IrrigationNode(Node):
"""
A general irrigation node, with proprties demand, max and deficit.
At each timestep, this node will use its internal seasonal
water requirements to set a demand figure. THe allocator will then
calculate deficit based on its max allowed and demand.
"""
type = 'irrigation'
_properties = dict(
#percentage
max_allowed = 100,
demand = 0,
deficit=0,
)
def setup(self, timestamp):
"""
Get water requirements for this timestep based on my internal
requirements dictionary.
"""
self.demand = self._seasonal_water_req[timestamp]
class CitrusFarm(IrrigationNode):
_seasonal_water_req = {
"2014-01-01": 100,
"2014-02-01": 200,
"2014-03-01": 300,
"2014-04-01": 400,
"2014-05-01": 500,
}
class VegetableFarm(IrrigationNode):
_seasonal_water_req = {
"2014-01-01": 150,
"2014-02-01": 250,
"2014-03-01": 350,
"2014-04-01": 450,
"2014-05-01": 550,
}
class SurfaceReservoir(Node):
"""
Node from which all the other nodes get their water. This reservoir
is given its water allocation from its institution -- the ministry of water.
"""
type = 'surface reservoir'
_properties = dict(
release = 0,
capacity = 1000,
max_release = 1000,
)
def setup(self, timestamp):
"""
The ministry of water has given me my release details, so there
is no need for me to set anything up myself.
"""
pass
|
UMWRG/pynsim
|
examples/trivial_demo/components/node.py
|
Python
|
gpl-3.0
| 2,510
|
#!/usr/bin/env python
"""Flask-script management script."""
import dateutil.parser
import datetime
import database
import math
from server_common import database_session, get_mturk_connection
from sqlalchemy import func, desc
from flask.ext.script import Manager
from admin_site import APP
MANAGER = Manager(APP)
def overlap(geom_a, geom_b):
"""Computes the overlap between two bounding boxes."""
intersection_score = intersection(geom_a, geom_b)
area1 = (geom_a.x2 - geom_a.x1) * (geom_a.y2 - geom_a.y1)
area2 = (geom_b.y2 - geom_b.y1) * (geom_b.x2 - geom_b.x1)
union_score = area1 + area2 - intersection_score
overlap_score = intersection_score / union_score
return overlap_score
def intersection(geom_a, geom_b):
"""Computes the interesction of two bounding boxes."""
intersection_score = func.greatest(0,
(func.least(geom_a.x2, geom_b.x2) -
func.greatest(geom_a.x1, geom_b.x1))) * \
func.greatest(0,
(func.least(geom_a.y2, geom_b.y2) -
func.greatest(geom_a.y1, geom_b.y1)))
return intersection_score
def union(geom_a, geom_b):
"""Computes the union of two bounding boxes."""
intersection_score = intersection(geom_a, geom_b)
area1 = (geom_a.x2 - geom_a.x1) * (geom_a.y2 - geom_a.y1)
area2 = (geom_b.y2 - geom_b.y1) * (geom_b.x2 - geom_b.x1)
union_score = area1 + area2 - intersection_score
return union_score
def delete_mturk (session, obj, allow):
"""Deletes an MTurk assignment, if allowed."""
if obj.assignment is not None:
if allow:
session.delete(obj.assignment)
else:
raise Exception('Cannot delete user - Has MTurk assignments')
def map_vehicles(session, a, b):
"""Computes association between vehicle annotations."""
mapping = {}
for vehicle in a.vehicles:
overlap_score = overlap(vehicle, database.Vehicle)
if len(b.vehicles) == 0:
mapping[vehicle.id] = None
continue
selected = session.query(database.Vehicle) \
.filter(database.Vehicle.id.in_([elt.id for elt in b.vehicles])) \
.filter(overlap_score > 0.7) \
.filter(database.Vehicle.type == vehicle.type) \
.filter(func.acos(func.cos((database.Vehicle.theta - vehicle.theta)/180*math.pi)) < math.radians(10)) \
.order_by(desc(overlap_score)) \
.first()
mapping[vehicle.id] = None if not selected else selected.id
return mapping
@MANAGER.command
@database_session(fetch_user=False, return_random_error=False)
def pair_vehicles(session):
"""Build annotation-to-annotation correspondence."""
photos = session.query(database.Photo) \
.join(database.Annotation) \
.join(database.Revision) \
.filter(database.Revision.final == True) \
.group_by(database.Photo.id) \
.having(func.count(database.Annotation.id) == 2)
for photo in photos:
revisions = session.query(database.Revision) \
.join(database.Annotation) \
.filter(database.Annotation.pid == photo.id) \
.filter(database.Revision.final == True) \
.all()
assert(len(revisions) == 2)
a_to_b = map_vehicles(session, revisions[0], revisions[1])
b_to_a = map_vehicles(session, revisions[1], revisions[0])
for a in a_to_b:
b = a_to_b[a]
if not b:
continue
newa = b_to_a[b]
vehicle_a = session.query(database.Vehicle) \
.filter(database.Vehicle.id == a) \
.one()
if vehicle_a.partner_id:
assert vehicle_a.partner_id == b
else:
assert vehicle_a.partner_id is None
vehicle_a.partner_id = b
vehicle_b = session.query(database.Vehicle) \
.filter(database.Vehicle.id == b) \
.one()
if vehicle_b.partner_id:
assert vehicle_b.partner_id == a
else:
assert vehicle_b.partner_id is None
vehicle_b.partner_id = a
session.commit()
@MANAGER.option('--username', type=str)
@MANAGER.option('--allow-mturk', action='store_true')
@database_session(fetch_user=False, return_random_error=False)
def delete_user(session, username, allow_mturk):
"""Deletes a user from the system."""
user = session.query(database.User) \
.filter_by(username=username) \
.one()
for bbox_session in user.bbox_sessions:
delete_mturk(session, bbox_session, allow_mturk)
session.delete(bbox_session)
for annotation in user.annotations:
for revision in annotation.revisions:
delete_mturk(session, revision, allow_mturk)
session.delete(revision)
session.delete(annotation)
for problem in user.problems:
session.delete(problem)
for daynight in user.daynights:
delete_mturk(session, daynight, allow_mturk)
session.delete(daynight)
for click_session in user.clicksessions:
for click in click_session.clicks:
session.delete(click)
delete_mturk(session, click_session, allow_mturk)
session.delete(click_session)
for approve_pair_session in user.approve_pair_sessions:
delete_mturk(session, approve_pair_session, allow_mturk)
session.delete(approve_pair_session)
for occlusion_session in user.occlusionsessions:
for occlusion_ranking in occlusion_session.occlusions:
session.delete(occlusion_ranking)
delete_mturk(session, occlusion_session, allow_mturk)
session.delete(occlusion_session)
session.delete(user)
session.flush()
session.commit()
@MANAGER.command
@database_session(fetch_user=False, return_random_error=False)
def print_users(session):
"""Prints a lits of users in the system."""
users = session.query(database.User)
for user in users:
print(user.username)
@MANAGER.option('--sandbox', action='store_true')
@database_session(fetch_user=False, return_random_error=False)
def gc_pending(session, sandbox):
"""Goes though our DB and purges any non-existant pending assignments."""
connection = get_mturk_connection(sandbox)
# pylint: disable-msg=E1101
labeler_assignments = session.query(database.Assignment) \
.join(database.HIT) \
.join(database.HITType) \
.filter(database.Assignment.status == 'Pending') \
.filter(database.HITType.sandbox == sandbox)
# pylint: enable-msg=E1101
for labeler_assignment in labeler_assignments:
try:
connection.get_assignment(labeler_assignment.assignmentid)
except:
labeler_assignment.abandoned = True
session.flush()
session.commit()
@MANAGER.option('--sandbox', action='store_true')
@database_session(fetch_user=False, return_random_error=False)
def sync_mturk(session, sandbox):
"""Imports the current state of mturk to make sure we are in sync."""
connection = get_mturk_connection(sandbox)
mturk_hits = connection.get_all_hits()
for mturk_hit in mturk_hits:
labeler_hittype = session.query(database.HITType) \
.filter_by(hittypeid=mturk_hit.HITTypeId) \
.first()
if labeler_hittype is None:
print('Creating HITType ' + mturk_hit.HITTypeId)
labeler_hittype = database.HITType(
hittypeid=mturk_hit.HITTypeId,
title=mturk_hit.Title,
description=mturk_hit.Description,
reward=float(mturk_hit.Amount),
duration=datetime.timedelta(
seconds=int(mturk_hit.AssignmentDurationInSeconds)
),
keywords=mturk_hit.Keywords,
approval_delay=datetime.timedelta(
seconds=int(mturk_hit.AutoApprovalDelayInSeconds)
),
sandbox=sandbox,
)
session.add(labeler_hittype)
labeler_hit = session.query(database.HIT) \
.filter_by(hitid=mturk_hit.HITId) \
.first()
if labeler_hit is None:
print('Creating HIT ' + mturk_hit.HITId)
creation = dateutil.parser.parse(mturk_hit.CreationTime)
expiration = dateutil.parser.parse(mturk_hit.Expiration)
labeler_hit = database.HIT(
hitid=mturk_hit.HITId,
creation=creation,
lifetime=expiration - creation,
max_assignments=mturk_hit.MaxAssignments,
HITType=labeler_hittype,
)
session.add(labeler_hit)
mturk_assignments = connection.get_assignments(mturk_hit.HITId)
for mturk_assigment in mturk_assignments:
labeler_assignment = session.query(database.Assignment) \
.filter_by(assignmentid=mturk_assigment.AssignmentId) \
.first()
if labeler_assignment is None:
labeler_user = session.query(database.User) \
.filter_by(username=mturk_assigment.WorkerId) \
.first()
if labeler_user is None:
print('Creating user ' + mturk_assigment.WorkerId)
labeler_user = database.User(
username=mturk_assigment.WorkerId,
lastactivity=datetime.datetime.now(),
)
session.add(labeler_user)
print('Creating Assignment ' + mturk_assigment.AssignmentId)
labeler_assignment = database.Assignment(
user=labeler_user,
assignmentid=mturk_assigment.AssignmentId,
HIT=labeler_hit,
status=mturk_assigment.AssignmentStatus,
)
session.add(labeler_assignment)
if labeler_assignment.status != mturk_assigment.AssignmentStatus:
print('Recorded Status: %s, Actual Status: %s' % (
labeler_assignment.status,
mturk_assigment.AssignmentStatus
))
labeler_assignment.status = mturk_assigment.AssignmentStatus
session.flush()
session.commit()
if __name__ == '__main__':
MANAGER.run()
|
kmatzen/nyc3dcars-labeler
|
src/labeler/manage.py
|
Python
|
bsd-3-clause
| 10,532
|
# coding = utf-8
from selenium import webdriver
import os
import time #调入time函数
browser = webdriver.Firefox()
browser.get("http://www.baidu.com")
time.sleep(0.3) #休眠0.3秒
browser.find_element_by_id("kw").send_keys("selenium")
browser.find_element_by_id("su").click()
time.sleep(3) # 休眠3秒
print(driver.title) # 把页面title 打印出来
browser.quit()
|
xiaoxiaoyao/PythonApplication1
|
PythonApplication1/爬虫练习/FIREFOX/StartFirefox.py
|
Python
|
unlicense
| 382
|
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the PyQwt License
# Copyright (C) 2003-2009 Gerard Vermeulen, for the original PyQwt example
# Copyright (c) 2015 Pierre Raybaut, for the PyQt5/PySide port and further
# developments (e.g. ported to python-qwt API)
# (see LICENSE file for more details)
import sys
import numpy as np
from qwt.qt.QtGui import QApplication, QPen
from qwt.qt.QtCore import Qt
from qwt import QwtPlot, QwtScaleDraw, QwtPlotGrid, QwtPlotCurve, QwtPlotItem
class CartesianAxis(QwtPlotItem):
"""Supports a coordinate system similar to
http://en.wikipedia.org/wiki/Image:Cartesian-coordinate-system.svg"""
def __init__(self, masterAxis, slaveAxis):
"""Valid input values for masterAxis and slaveAxis are QwtPlot.yLeft,
QwtPlot.yRight, QwtPlot.xBottom, and QwtPlot.xTop. When masterAxis is
an x-axis, slaveAxis must be an y-axis; and vice versa."""
QwtPlotItem.__init__(self)
self.__axis = masterAxis
if masterAxis in (QwtPlot.yLeft, QwtPlot.yRight):
self.setAxes(slaveAxis, masterAxis)
else:
self.setAxes(masterAxis, slaveAxis)
self.scaleDraw = QwtScaleDraw()
self.scaleDraw.setAlignment((QwtScaleDraw.LeftScale,
QwtScaleDraw.RightScale,
QwtScaleDraw.BottomScale,
QwtScaleDraw.TopScale)[masterAxis])
def draw(self, painter, xMap, yMap, rect):
"""Draw an axis on the plot canvas"""
xtr = xMap.transform
ytr = yMap.transform
if self.__axis in (QwtPlot.yLeft, QwtPlot.yRight):
self.scaleDraw.move(round(xtr(0.0)), yMap.p2())
self.scaleDraw.setLength(yMap.p1()-yMap.p2())
elif self.__axis in (QwtPlot.xBottom, QwtPlot.xTop):
self.scaleDraw.move(xMap.p1(), round(ytr(0.0)))
self.scaleDraw.setLength(xMap.p2()-xMap.p1())
self.scaleDraw.setScaleDiv(self.plot().axisScaleDiv(self.__axis))
self.scaleDraw.draw(painter, self.plot().palette())
class CartesianPlot(QwtPlot):
"""Creates a coordinate system similar system
http://en.wikipedia.org/wiki/Image:Cartesian-coordinate-system.svg"""
def __init__(self, *args):
QwtPlot.__init__(self, *args)
self.setTitle('Cartesian Coordinate System Demo')
# create a plot with a white canvas
self.setCanvasBackground(Qt.white)
# set plot layout
self.plotLayout().setCanvasMargin(0)
self.plotLayout().setAlignCanvasToScales(True)
# attach a grid
grid = QwtPlotGrid()
grid.attach(self)
grid.setPen(QPen(Qt.black, 0, Qt.DotLine))
# attach a x-axis
xaxis = CartesianAxis(QwtPlot.xBottom, QwtPlot.yLeft)
xaxis.attach(self)
self.enableAxis(QwtPlot.xBottom, False)
# attach a y-axis
yaxis = CartesianAxis(QwtPlot.yLeft, QwtPlot.xBottom)
yaxis.attach(self)
self.enableAxis(QwtPlot.yLeft, False)
# calculate 3 NumPy arrays
x = np.arange(-2*np.pi, 2*np.pi, 0.01)
y = np.pi*np.sin(x)
z = 4*np.pi*np.cos(x)*np.cos(x)*np.sin(x)
# attach a curve
curve = QwtPlotCurve('y = pi*sin(x)')
curve.attach(self)
curve.setPen(QPen(Qt.green, 2))
curve.setData(x, y)
# attach another curve
curve = QwtPlotCurve('y = 4*pi*sin(x)*cos(x)**2')
curve.attach(self)
curve.setPen(QPen(Qt.black, 2))
curve.setData(x, z)
self.replot()
def make():
demo = CartesianPlot()
demo.resize(400, 300)
demo.show()
return demo
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = make()
sys.exit(app.exec_())
|
mindw/python-qwt
|
examples/CartesianDemo.py
|
Python
|
lgpl-2.1
| 3,786
|
from __future__ import division
import patterny.synthesize.topic as _tsynthesize
import patterny.ml.similarity as _similarity
import requests
class AnalysisRecommendation(object):
def __init__(self, k, delta, n_topics, tmodel, problem_similarity_vector, analysis_similarity_map=None):
self.k = k
self.delta = delta
self.tmodel = tmodel
self.n_topics = n_topics
if not analysis_similarity_map:
analysis_similarity_map = {}
self.analysis_similarity_map = analysis_similarity_map
self.problem_similarity_vector = problem_similarity_vector
def metrics_for_top_topic(self, bug_id, analysis, top_topics):
if not top_topics:
return None, None
t = next(iter(top_topics))
if t['topic'] in self.tmodel.topics:
t_ids = self.tmodel.topics[t['topic']].ids
t_analyses = self.tmodel.topics[t['topic']].analyses
t_sim_matrix = self.tmodel.topics[t['topic']].sim_matrix
self.update_analysis_similarity_map(t_ids, t_analyses)
topic_synthesized_analysis = _tsynthesize.synthesize(
t_ids, t_analyses, t_sim_matrix,
threshold=self.delta
)
recommended_analysis = topic_synthesized_analysis[:self.k]
precision = self.precision_for_top_topic(bug_id, analysis, recommended_analysis, self.delta)
likelihood = self.likelihood_for(precision)
return precision, likelihood
return None, None
def precision_for_top_topic(self, bug_id, analysis, recommended_analysis, threshold):
tp = 0
total = len(recommended_analysis)
session = requests.Session()
for recommended_analysis in recommended_analysis:
key = (bug_id, recommended_analysis.id)
if self.analysis_similarity_map and key in self.analysis_similarity_map:
similarity = self.analysis_similarity_map[key]['similarity']
else:
similarity = _similarity.sss_similarity(session, analysis, recommended_analysis.analysis)
self.analysis_similarity_map[key] = dict(similarity=similarity)
if similarity >= threshold:
tp += 1
precision = tp / total
return precision
def likelihood_for(self, precision):
if precision > 0.0:
return 1
return 0
def metrics_for_n_topics(self, bug_id, analysis, topic_indexes):
if not topic_indexes:
return None, None
threshold = self.delta
tp = 0
total = len(topic_indexes)
session = requests.Session()
for tidx in topic_indexes:
topic_id = tidx[0]
recommendation_idx = tidx[1]
if topic_id in self.tmodel.topics:
t_ids = self.tmodel.topics[topic_id].ids
t_analyses = self.tmodel.topics[topic_id].analyses
self.update_analysis_similarity_map(t_ids, t_analyses)
topic_synthesized_analysis = _tsynthesize.synthesize(
t_ids, t_analyses,
threshold=threshold,
similarity_pairs=self.analysis_similarity_map
)
if recommendation_idx < len(topic_synthesized_analysis):
recommended = topic_synthesized_analysis[recommendation_idx]
key = (bug_id, recommended.id)
if self.analysis_similarity_map and key in self.analysis_similarity_map:
similarity = self.analysis_similarity_map[key]['similarity']
else:
similarity = _similarity.sss_similarity(session, analysis, recommended.analysis)
self.analysis_similarity_map[key] = dict(similarity=similarity)
if similarity >= threshold:
tp += 1
precision = tp / total
likelihood = self.likelihood_for(precision)
return precision, likelihood
def topics_for_similar_bugs(self, bugs):
bug_ids = self.ids_for_similar_problems(bugs)
return self.tmodel.topics_of(bug_ids)
def bugs_with_similar_problems(self, bug_id, problem, problem_similarity_threshold, max_k=10):
bugs_with_similar_problems = self.problem_similarity_vector.similarity(
bug_id,
problem,
threshold=problem_similarity_threshold
)
bugs_with_similar_problems = bugs_with_similar_problems[0: min(
max_k, len(bugs_with_similar_problems)
)]
return bugs_with_similar_problems
def ids_for_similar_problems(self, bugs):
return [s['id'] for s in bugs]
def top_topic_index_topic(self, topics_for_similar_bugs, bugs_with_similar_problems):
result = []
topic_count = {}
topic_index = {}
topic_index_list = []
for bug, topics in topics_for_similar_bugs.iteritems():
for topic_id in topics:
self.__process_current_topic_index(
list(filter(lambda j: j['id'] == bug, bugs_with_similar_problems)),
topic_count,
topic_id,
topic_index,
topic_index_list
)
for topic, data in topic_count.iteritems():
result.append(dict(topic=topic, count=data['count'], highest_sim=data['highest_sim']))
if result:
result = sorted(result, key=lambda k: (k['count'], k['highest_sim']), reverse=True)
if topic_index_list:
topic_index_list = topic_index_list[:self.k]
return result, topic_index_list
def __process_current_topic_index(self, bugs, topic_count, topic_id, topic_index,
topic_index_list):
if topic_id not in topic_count:
topic_count[topic_id] = dict(count=0, highest_sim=0)
if topic_id not in topic_index:
topic_index[topic_id] = 0
# Update map with topic and i-th index in that topic
topic_index_list.append((topic_id, topic_index[topic_id]))
topic_index[topic_id] += 1
# Update map with topic count
topic_count[topic_id]['count'] += 1
current_sim = list(
map(lambda i: i['prob'], bugs)
)
# Define the highest similarity inside a topic
if current_sim:
current_sim = next(iter(current_sim))
if current_sim >= topic_count[topic_id]['highest_sim']:
topic_count[topic_id]['highest_sim'] = current_sim
def update_analysis_similarity_map(self, t_ids, t_analyses):
session = requests.Session()
for i in xrange(0, len(t_ids)):
for j in xrange(0, len(t_ids)):
id_i = t_ids[i]
id_j = t_ids[j]
analysis_i = t_analyses[i]
analysis_j = t_analyses[j]
key = (id_i, id_j)
if key not in self.analysis_similarity_map:
similarity = _similarity.sss_similarity(session, analysis_i, analysis_j)
self.analysis_similarity_map[key] = dict(similarity=similarity)
|
marquesarthur/BugAnalysisRecommender
|
patterny/patterny/recommender/analysis.py
|
Python
|
mit
| 7,272
|
from __future__ import with_statement
from fabric.api import task
@task
def locales():
"""
Check locales configuration
"""
from fabtools import require
require.system.locale('en_US.UTF-8')
require.system.locale('fr_FR.UTF-8')
|
juanantoniofm/accesible-moodle
|
fabtools/tests/fabfiles/system.py
|
Python
|
gpl-2.0
| 255
|
import unittest
from oscleanup.utils import *
class TestUtils(unittest.TestCase):
def test_fff(self):
# source the creds
#
x = fff()
self.assertIsNotNone(x)
|
sureshkvl/os-cleanup
|
oscleanup/tests/test_utils.py
|
Python
|
apache-2.0
| 198
|
import datetime
from sqlalchemy import Column, Date, DateTime, ForeignKey, Integer, String, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, relationship
Base = declarative_base()
class User(Base):
"""Define a User."""
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String, unique=True)
created_at = Column(DateTime, default=datetime.datetime.utcnow)
birthday = Column(Date)
address = relationship("Address", uselist=False, backref=backref("user"))
def __unicode__(self):
"""Give a readable representation of an instance."""
return "%s" % self.name
def __repr__(self):
"""Give a unambiguous representation of an instance."""
return "<%s#%s>" % (self.__class__.__name__, self.id)
@hybrid_property
def dummy(self):
"""Create a dummy hybrid property."""
return self.name[0:3]
@dummy.expression
def dummy(cls):
"""Create a dummy expression."""
return func.substr(cls.name, 0, 3)
class Address(Base):
"""Define an Address."""
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
description = Column(String, unique=True)
user_id = Column(Integer, ForeignKey("users.id"))
def __unicode__(self):
"""Give a readable representation of an instance."""
return "%s" % (self.id)
def __repr__(self):
"""Give a unambiguous representation of an instance."""
return "<%s#%s>" % (self.__class__.__name__, self.id)
|
Pegase745/sqlalchemy-datatables
|
tests/models.py
|
Python
|
mit
| 1,630
|
import os
import tarfile
from configobj import ConfigObj
from twisted.internet.defer import inlineCallbacks
from Tribler.Core.CacheDB.SqliteCacheDBHandler import (BasicDBHandler, LimitedOrderedDict)
from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
from Tribler.Core.Config.tribler_config import TriblerConfig, CONFIG_SPEC_PATH
from Tribler.Core.Session import Session
from Tribler.Test.Core.base_test import TriblerCoreTest
from Tribler.Test.common import TESTS_DATA_DIR
from Tribler.dispersy.util import blocking_call_on_reactor_thread
BUSYTIMEOUT = 5000
class TestLimitedOrderedDict(TriblerCoreTest):
def test_limited_ordered_dict(self):
od = LimitedOrderedDict(3)
od['foo'] = 'bar'
od['bar'] = 'foo'
od['foobar'] = 'foobar'
self.assertEqual(len(od), 3)
od['another'] = 'another'
self.assertEqual(len(od), 3)
class AbstractDB(TriblerCoreTest):
def setUpPreSession(self):
self.config = TriblerConfig(ConfigObj(configspec=CONFIG_SPEC_PATH))
self.config.set_state_dir(self.getStateDir())
self.config.set_torrent_checking_enabled(False)
self.config.set_megacache_enabled(False)
self.config.set_dispersy_enabled(False)
self.config.set_mainline_dht_enabled(False)
self.config.set_torrent_collecting_enabled(False)
self.config.set_libtorrent_enabled(False)
self.config.set_video_server_enabled(False)
self.config.set_torrent_store_enabled(False)
@blocking_call_on_reactor_thread
@inlineCallbacks
def setUp(self):
yield super(AbstractDB, self).setUp()
self.setUpPreSession()
self.session = Session(self.config, ignore_singleton=True)
tar = tarfile.open(os.path.join(TESTS_DATA_DIR, 'bak_new_tribler.sdb.tar.gz'), 'r|gz')
tar.extractall(self.session_base_dir)
db_path = os.path.join(self.session_base_dir, 'bak_new_tribler.sdb')
self.sqlitedb = SQLiteCacheDB(db_path, busytimeout=BUSYTIMEOUT)
self.sqlitedb.initialize()
self.session.sqlite_db = self.sqlitedb
def tearDown(self):
self.sqlitedb.close()
self.sqlitedb = None
self.session.del_instance()
self.session = None
super(AbstractDB, self).tearDown(self)
class TestSqliteBasicDBHandler(AbstractDB):
@blocking_call_on_reactor_thread
@inlineCallbacks
def setUp(self):
yield super(TestSqliteBasicDBHandler, self).setUp()
self.db = BasicDBHandler(self.session, u"Peer")
@blocking_call_on_reactor_thread
def test_size(self):
size = self.db.size() # there are 3995 peers in the table, however the upgrade scripts remove 8 superpeers
assert size == 3987, size
|
vandenheuvel/tribler
|
Tribler/Test/Core/test_sqlitecachedbhandler.py
|
Python
|
lgpl-3.0
| 2,757
|
# Author: Gorka Muñoz
# Example of use of the confusion scheme introduced in https://arxiv.org/abs/1610.02048
# to differenciate phases. In this case, we will try to differentiate between two numbers
# of the MNIST database. The value we get from the Linear Discriminant Analysis will make
# the form of the order parameter. The goal is to find the critical point, e.g. in which
# value of the order parameter there is a phase transition. In the context of the MNIST,
# we want to find the value of the LDA which differentiates between the two chosen numbers.
import numpy as np
import matplotlib.pyplot as plt
from sklearn import discriminant_analysis
from keras.datasets import mnist
from scipy.signal import argrelextrema
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
from keras import backend as K
#-----------------------------------------------------------------------------
# Data preparation
(x_train, y_train), (x_test, y_test) = mnist.load_data()
number_1 = 1
number_2 = 0
## Train set
x_train = x_train[(y_train == number_1) | (y_train == number_2)]
y_train = y_train[(y_train == number_1) | (y_train == number_2)]
X_m = x_train.astype(float)
y_m = y_train.astype(float)
X_m_CNN = X_m
X_m = X_m.reshape(X_m.shape[0], X_m.shape[1]**2)
# X_m = X_m[1:1000,:] # we truncate the data to have a faster program. Feel free to test with hole database
# y_m = y_m[1:1000]
## Test set
x_test = x_test[(y_test == number_1) | (y_test == number_2)]
y_test = y_test[(y_test == number_1) | (y_test == number_2)]
x_test = x_test.astype(float)
y_test = y_test.astype(float)
x_test_CNN = x_test
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1]**2)
# Reshaping Images for CNN
img_rows, img_cols = 28, 28
if K.image_data_format() == 'channels_first':
X_m_CNN = X_m_CNN.reshape(X_m_CNN.shape[0], 1, img_rows, img_cols)
x_test_CNN = x_test_CNN.reshape(x_test_CNN[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_m_CNN = X_m_CNN.reshape(X_m_CNN.shape[0], img_rows, img_cols, 1)
x_test_CNN = x_test_CNN.reshape(x_test_CNN.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_m_CNN = X_m_CNN.astype('float32')
x_test_CNN = x_test_CNN.astype('float32')
X_m_CNN /= 255
x_test_CNN /= 255
#-----------------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
# Train
X2 = X_m.copy()
X2.flat[::X_m.shape[1] + 1] += 0.01 # Make X invertible
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y_m)
plt.figure()
plt.title('Linear Discriminant Analysis')
plt.scatter(X_lda[(y_m == number_1)], y_m[y_m == number_1])
plt.scatter(X_lda[(y_m == number_2)], y_m[y_m == number_2])
# Test
X3 = x_test.copy()
X3.flat[::x_test.shape[1] + 1] += 0.01 # Make X invertible
X_lda_test = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X3, y_test)
#-----------------------------------------------------------------------------
# Neural Network
def build_model():
model = Sequential()
model.add(Dense(50, input_dim=X_m.shape[1], init='uniform', activation='sigmoid'))
model.add(Dense(50, init='uniform', activation = 'sigmoid'))
model.add(Dense(50, init='uniform', activation = 'sigmoid'))
model.add(Dense(50, init='uniform', activation = 'sigmoid'))
model.add(Dense(2, init='uniform', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def CNN_build_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), init='uniform',
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), init='uniform', activation='relu'))
model.add(Conv2D(64, (3, 3), init='uniform', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, init='uniform', activation='relu'))
model.add(Dense(128, init='uniform', activation='relu'))
model.add(Dense(128, init='uniform', activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(2, init='uniform', activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
#-----------------------------------------------------------------------------
# Confusion scheme
# We go through the possible values of critical c
values_c = np.linspace(min(X_lda)[0], max(X_lda)[0], 15)
#values_c = [values_c[3]]
acc = []
count = 0
for c in values_c:
print(np.where(values_c == c)[0][0])
y_nn = np.zeros_like(y_m)
y_nn[(X_lda[:,0] < c)] = 0
y_nn[(X_lda[:,0] > c)] = 1
y_train_2D = keras.utils.to_categorical(y_nn, 2)
# y_train_2D = y_nn
#model = build_model()
model = CNN_build_model()
model.fit(X_m_CNN, y_train_2D, epochs=5, batch_size=256, verbose=1)
predictions = model.predict(x_test_CNN)
y_nn_test = np.zeros_like(y_test)
y_nn_test[(X_lda_test[:,0] < c)] = 0
y_nn_test[(X_lda_test[:,0] > c)] = 1
y_test_2D = keras.utils.to_categorical(y_nn_test, 2)
acc.append(sum(abs(predictions[:,1] - y_nn_test)))
#acc.append(sum(abs(predictions[:,0] - y_nn_test)))
print(acc)
critical_value = values_c[argrelextrema(np.array(acc), np.less)[0][0]]
#-----------------------------------------------------------------------------
# Results
plt.figure()
plt.title('W_shaped performance')
plt.xlabel('LDA value')
plt.xlabel('Accuracy of the NN')
plt.plot(values_c, acc)
plt.savefig('W_shape')
plt.figure()
plt.title('Linear Discrimant Analysis')
plt.ylabel('Number of MNIST')
plt.xlabel('LDA value')
plt.scatter(X_lda_test, y_test,)
plt.plot((critical_value, critical_value), (number_1, number_2), 'r-', label = 'Critical point')
plt.legend()
|
peterwittek/qml-rg
|
Archiv_Session_Spring_2018/Coding_Exercises/week1_confusion_CNN.py
|
Python
|
gpl-3.0
| 6,200
|
import pytest
from vcr.persisters.filesystem import FilesystemPersister
from vcr.serializers import jsonserializer, yamlserializer
@pytest.mark.parametrize("cassette_path, serializer", [
('tests/fixtures/migration/old_cassette.json', jsonserializer),
('tests/fixtures/migration/old_cassette.yaml', yamlserializer),
])
def test_load_cassette_with_old_cassettes(cassette_path, serializer):
with pytest.raises(ValueError) as excinfo:
FilesystemPersister.load_cassette(cassette_path, serializer)
assert "run the migration script" in excinfo.exconly()
@pytest.mark.parametrize("cassette_path, serializer", [
('tests/fixtures/migration/not_cassette.txt', jsonserializer),
('tests/fixtures/migration/not_cassette.txt', yamlserializer),
])
def test_load_cassette_with_invalid_cassettes(cassette_path, serializer):
with pytest.raises(Exception) as excinfo:
FilesystemPersister.load_cassette(cassette_path, serializer)
assert "run the migration script" not in excinfo.exconly()
|
poussik/vcrpy
|
tests/unit/test_persist.py
|
Python
|
mit
| 1,021
|
#This example is meant to be used from within the CadQuery module of FreeCAD.
import cadquery
import Part
#Parameter definitions
p_outerWidth = 100.0 # Outer width of box enclosure
p_outerLength = 150.0 # Outer length of box enclosure
p_outerHeight = 50.0 # Outer height of box enclosure
p_thickness = 3.0 # Thickness of the box walls
p_sideRadius = 10.0 # Radius for the curves around the sides of the bo
p_topAndBottomRadius = 2.0 # Radius for the curves on the top and bottom edges of the box
p_screwpostInset = 12.0 # How far in from the edges the screwposts should be placed
p_screwpostID = 4.0 # Inner diameter of the screwpost holes, should be roughly screw diameter not including threads
p_screwpostOD = 10.0 # Outer diameter of the screwposts. Determines overall thickness of the posts
p_boreDiameter = 8.0 # Diameter of the counterbore hole, if any
p_boreDepth = 1.0 # Depth of the counterbore hole, if
p_countersinkDiameter = 0.0 # Outer diameter of countersink. Should roughly match the outer diameter of the screw head
p_countersinkAngle = 90.0 # Countersink angle (complete angle between opposite sides, not from center to one side)
p_flipLid = True # Whether to place the lid with the top facing down or not.
p_lipHeight = 1.0 # Height of lip on the underside of the lid. Sits inside the box body for a snug fit.
#Outer shell
oshell = cadquery.Workplane("XY").rect(p_outerWidth, p_outerLength).extrude(p_outerHeight + p_lipHeight)
#Weird geometry happens if we make the fillets in the wrong order
if p_sideRadius > p_topAndBottomRadius:
oshell.edges("|Z").fillet(p_sideRadius)
oshell.edges("#Z").fillet(p_topAndBottomRadius)
else:
oshell.edges("#Z").fillet(p_topAndBottomRadius)
oshell.edges("|Z").fillet(p_sideRadius)
#Inner shell
ishell = oshell.faces("<Z").workplane(p_thickness, True)\
.rect((p_outerWidth - 2.0 * p_thickness),(p_outerLength - 2.0 * p_thickness))\
.extrude((p_outerHeight - 2.0 * p_thickness), False) # Set combine false to produce just the new boss
ishell.edges("|Z").fillet(p_sideRadius - p_thickness)
#Make the box outer box
box = oshell.cut(ishell)
#Make the screwposts
POSTWIDTH = (p_outerWidth - 2.0 * p_screwpostInset)
POSTLENGTH = (p_outerLength - 2.0 * p_screwpostInset)
postCenters = box.faces(">Z").workplane(-p_thickness)\
.rect(POSTWIDTH, POSTLENGTH, forConstruction=True)\
.vertices()
for v in postCenters.all():
v.circle(p_screwpostOD / 2.0).circle(p_screwpostID / 2.0)\
.extrude((-1.0) * ((p_outerHeight + p_lipHeight) - (2.0 * p_thickness)), True)
#Split lid into top and bottom parts
(lid, bottom) = box.faces(">Z").workplane(-p_thickness - p_lipHeight).split(keepTop=True, keepBottom=True).all()
#Translate the lid, and subtract the bottom from it to produce the lid inset
lowerLid = lid.translate((0, 0, -p_lipHeight))
cutlip = lowerLid.cut(bottom).translate((p_outerWidth + p_thickness, 0, p_thickness - p_outerHeight + p_lipHeight))
#Compute centers for counterbore/countersink or counterbore
topOfLidCenters = cutlip.faces(">Z").workplane().rect(POSTWIDTH, POSTLENGTH, forConstruction=True).vertices()
#Add holes of the desired type
if p_boreDiameter > 0 and p_boreDepth > 0:
topOfLid = topOfLidCenters.cboreHole(p_screwpostID, p_boreDiameter, p_boreDepth, (2.0) * p_thickness)
elif p_countersinkDiameter > 0 and p_countersinkAngle > 0:
topOfLid = topOfLidCenters.cskHole(p_screwpostID, p_countersinkDiameter, p_countersinkAngle, (2.0) * p_thickness)
else:
topOfLid= topOfLidCenters.hole(p_screwpostID, 2.0 * p_thickness)
#Flip lid upside down if desired
if p_flipLid:
topOfLid.rotateAboutCenter((1, 0, 0), 180)
#Return the combined result
result = topOfLid.combineSolids(bottom)
#Boiler plate code to render our solid in FreeCAD's GUI
Part.show(result.toFreecad())
|
dcowden/cadquery-freecad-module
|
CadQuery/Examples/Ex023_Parametric_Enclosure.py
|
Python
|
lgpl-3.0
| 3,817
|
"""
AST nodes for C constructs.
"""
import os
import types
import subprocess
import logging
log = logging.getLogger(__name__)
from ctypes import CFUNCTYPE
from ctree.nodes import CtreeNode, File
import ctree
from ctree.util import singleton, highlight, truncate
from ctree.types import get_ctype, get_common_ctype
import hashlib
import ctypes
class CNode(CtreeNode):
"""Base class for all C nodes in ctree."""
def codegen(self, indent=0):
from ctree.c.codegen import CCodeGen
from ctree.transforms import DeclarationFiller
return CCodeGen(indent).visit(self)
def label(self):
from ctree.c.dotgen import CDotGenLabeller
return CDotGenLabeller().visit(self)
def __add__(self, other):
return Add(self, other)
def __neg__(self):
return BitNot(self)
def __sub__(self, other):
return Sub(self, other)
def __or__(self, other):
return BitOr(self, other)
def __and__(self, other):
return BitAnd(self, other)
def __xor__(self, other):
return BitXor(self, other)
def __lshift__(self, other):
if isinstance(other, int):
return BitShL(self, Constant(other))
return BitShL(self, other)
def __rshift__(self, other):
if isinstance(other, int):
return BitShR(self, Constant(other))
return BitShR(self, other)
def __mul__(self, other):
return Mul(self, other)
def __div__(self, other):
return Div(self, other)
__truediv__ = __div__
def __mod__(self, other):
return Mod(self, other)
class CFile(CNode, File):
"""Represents a .c file."""
_ext = "c"
def __init__(self, name="generated", body=None, config_target='c', path = None):
CNode.__init__(self)
File.__init__(self, name, body, path)
self.config_target = config_target
def get_bc_filename(self):
return "%s.bc" % self.name
def get_so_filename(self):
return "{}.so".format(self.name)
def _compile(self, program_text):
# print(repr(self.path), repr(self.get_filename()))
c_src_file = os.path.join(self.path, self.get_filename())
so_file = os.path.join(self.path, self.get_so_filename())
program_hash = hashlib.sha512(program_text.strip().encode()).hexdigest()
so_file_exists = os.path.exists(so_file)
old_hash = self.program_hash
hash_match = old_hash == program_hash
log.debug("Old hash: %s \n New hash: %s", old_hash, program_hash)
recreate_c_src = program_text and program_text != self.empty and not hash_match
recreate_so = recreate_c_src or not so_file_exists
log.debug("RECREATE_C_SRC: %s \t RECREATE_so: %s \t HASH_MATCH: %s",
recreate_c_src, recreate_so, hash_match)
if not program_text:
log.debug("Program not found. Attempting to use cached version")
#create c_src
if recreate_c_src:
with open(c_src_file, 'w') as c_file:
c_file.write(program_text)
log.info("file for generated C: %s", c_src_file)
# syntax-highlight and print C program
highlighted = highlight(program_text, 'c')
log.info("generated C program: (((\n%s\n)))", highlighted)
self.program_hash = program_hash
#create ll_bc_file
if recreate_so:
# call clang to generate LLVM bitcode file
log.debug('Regenerating so.')
CC = ctree.CONFIG.get(self.config_target, 'CC')
CFLAGS = ctree.CONFIG.get(self.config_target, 'CFLAGS')
LDFLAGS = ctree.CONFIG.get(self.config_target, 'LDFLAGS')
compile_cmd = "%s -shared %s -o %s %s %s" % (CC, CFLAGS, so_file,
c_src_file, LDFLAGS)
log.info("compilation command: %s", compile_cmd)
subprocess.check_call(compile_cmd, shell=True)
# log.info("file for generated so: %s", so_file)
#use cached version otherwise
if not (so_file_exists or recreate_so):
raise NotImplementedError('No Cached version found')
# load llvm bitcode
# import llvm.core
# import llvmlite.binding as llvm
# with open(ll_bc_file, 'rb') as bc:
# ll_module = llvm.module.parse_bitcode(bc.read())
# syntax-highlight and print LLVM program
#preserve_src_drhighlighted = highlight(str(ll_module), 'llvm')
#log.debug("generated LLVM Program: (((\n%s\n)))", highlighted)
return so_file
class Statement(CNode):
"""Section B.2.3 6.6."""
pass
class Expression(CNode):
"""Cite me."""
class Return(Statement):
"""Section B.2.3 6.6.6 line 4."""
_fields = ['value']
def __init__(self, value=None):
self.value = value
super(Return, self).__init__()
class If(Statement):
"""Cite me."""
_fields = ['cond', 'then', 'elze']
def __init__(self, cond=None, then=None, elze=None):
self.cond = cond
self.then = then
self.elze = elze
super(If, self).__init__()
class While(Statement):
"""Cite me."""
_fields = ['cond', 'body']
_requires_semicolon = lambda self: False
def __init__(self, cond=None, body=None):
self.cond = cond
self.body = body if body else []
super(While, self).__init__()
class DoWhile(Statement):
_fields = ['body', 'cond']
def __init__(self, body=None, cond=None):
self.body = body if body else []
self.cond = cond
super(DoWhile, self).__init__()
class For(Statement):
_fields = ['init', 'test', 'incr', 'body']
def __init__(self, init=None, test=None, incr=None, body=None, pragma=None):
self.init = init
self.test = test
self.incr = incr
if body is None:
body = []
self.body = body
self.pragma = pragma
super(For, self).__init__()
# class Define(Statement):
# mbd: deprecated. see ctree.cpp.nodes.CppDefine
class FunctionCall(Expression):
"""Cite me."""
_fields = ['func', 'args']
def __init__(self, func=None, args=None):
self.func = func
self.args = args if args else []
super(FunctionCall, self).__init__()
class Literal(Expression):
"""Cite me."""
pass
class Constant(Literal):
"""Section B.1.4 6.1.3."""
_fields = ['value']
def __init__(self, value=None):
self.value = value
super(Constant, self).__init__()
def get_type(self):
return get_ctype(self.value)
class Hex(Constant):
pass
class Block(Statement):
"""Cite me."""
_fields = ['body']
def __init__(self, body=None):
self.body = body if body else []
super(Block, self).__init__()
def _requires_semicolon(self):
return False
class MultiNode(Block):
"""
Some Python nodes need to be translated to a block of nodes but Visitors can't do that.
"""
class String(Literal):
"""Cite me."""
def __init__(self, *values):
self.values = values
super(String, self).__init__()
class SymbolRef(Literal):
"""Cite me."""
_next_id = 0
_fields = ['name','type']
def __init__(self, name=None, sym_type=None, _global=False,
_local=False, _const=False, _static=False):
"""
Create a new symbol with the given name. If a declaration
type is specified, the symbol is considered a declaration
and unparsed with the type.
"""
self.name = name
if sym_type is not None:
assert not isinstance(sym_type, type)
self.type = sym_type
self._global = _global
self._local = _local
self._const = _const
self._static = _static
super(SymbolRef, self).__init__()
def set_global(self, value=True):
self._global = value
return self
def set_local(self, value=True):
self._local = value
return self
def set_const(self, value=True):
self._const = value
return self
def set_static(self, value=True):
self._static = value
return self
@classmethod
def unique(cls, name="name", sym_type=None):
"""
Factory for making unique symbols.
"""
sym = SymbolRef("%s_%d" % (name, cls._next_id), sym_type)
cls._next_id += 1
return sym
def copy(self, declare=False):
if declare:
return SymbolRef(self.name, self.type, self._global)
else:
return SymbolRef(self.name)
class FunctionDecl(Statement):
"""Cite me."""
_fields = ['params', 'defn']
def __init__(self, return_type=None, name=None, params=None, defn=None):
self.return_type = return_type
self.name = name
self.params = params if params else []
self.defn = defn if defn else []
self.inline = False
self.static = False
self.kernel = False
super(FunctionDecl, self).__init__()
def get_type(self):
type_sig = []
# return type
if self.return_type is None:
type_sig.append(self.return_type)
else:
assert not isinstance(self.return_type, type), \
"Expected a ctypes instance or None, got %s (%s)." % \
(self.return_type, type(self.return_type))
type_sig.append( type(self.return_type) )
# parameter types
for param in self.params:
assert not isinstance(param.type, type), \
"Expected a ctypes instance or None, got %s (%s)." % \
(param.type, type(param.type))
type_sig.append( type(param.type) )
return CFUNCTYPE(*type_sig)
def set_inline(self, value=True):
self.inline = value
return self
def set_static(self, value=True):
self.static = value
return self
def set_kernel(self, value=True):
self.kernel = value
return self
class UnaryOp(Expression):
"""Cite me."""
_fields = ['arg']
def __init__(self, op=None, arg=None):
self.op = op
self.arg = arg
super(UnaryOp, self).__init__()
class BinaryOp(Expression):
"""Cite me."""
_fields = ['left', 'op', 'right']
def __init__(self, left=None, op=None, right=None):
self.left = left
self.op = op
self.right = right
super(BinaryOp, self).__init__()
def get_type(self, env=None):
# FIXME: integer promotions and stuff like that
if hasattr(self.left, 'get_type'):
left_type = self.left.get_type()
elif isinstance(self.left, SymbolRef) and env is not None \
and env._has_key(self.left.name):
left_type = env._lookup(self.left.name)
elif hasattr(self.left, 'type'):
left_type = self.left.type
else:
left_type = None
if hasattr(self.right, 'get_type'):
right_type = self.right.get_type()
elif isinstance(self.right, SymbolRef) and env is not None \
and env._has_key(self.right.name):
right_type = env._lookup(self.right.name)
elif hasattr(self.right, 'type'):
right_type = self.right.type
else:
right_type = None
return get_common_ctype(filter(lambda x: x is not None, [right_type,
left_type]))
class AugAssign(Expression):
"""Cite me."""
_fields = ['target', 'value']
def __init__(self, target=None, op=None, value=None):
self.target = target
self.op = op
self.value = value
super(AugAssign, self).__init__()
class TernaryOp(Expression):
"""Cite me."""
_fields = ['cond', 'then', 'elze']
def __init__(self, cond=None, then=None, elze=None):
self.cond = cond
self.then = then
self.elze = elze
super(TernaryOp, self).__init__()
class Cast(Expression):
"""doc"""
_fields = ['value']
def __init__(self, sym_type=None, value=None):
self.type = sym_type
self.value = value
super(Cast, self).__init__()
class ArrayDef(Expression):
"""doc"""
_fields = ['target', 'size', 'body']
def __init__(self, target=None, size=None, body=None):
self.target = target
self.size = size
self.body = body if body else []
super(ArrayDef, self).__init__()
class Array(Expression):
_fields = ['type', 'size', 'body']
def __init__(self, type=None, size = None, body = None):
self.body = body or []
self.size = size or len(self.body)
self.type = type
super(Array, self).__init__()
def get_type(self):
return self.type
class Break(Statement):
_requires_semicolon = lambda self : True
class Continue(Statement):
_requires_semicolon = lambda self : True
class Pass(Statement):
_requires_semicolon = lambda self: False
@singleton
class Op:
class _Op(object):
def __init__(self):
self._force_parentheses = False
def __str__(self):
return self._c_str
class PreInc(_Op):
_c_str = "++"
class PreDec(_Op):
_c_str = "--"
class PostInc(_Op):
_c_str = "++"
class PostDec(_Op):
_c_str = "--"
class Ref(_Op):
_c_str = "&"
class Deref(_Op):
_c_str = "*"
class SizeOf(_Op):
_c_str = "sizeof"
class Add(_Op):
_c_str = "+"
class AddUnary(_Op):
_c_str = "+"
class Sub(_Op):
_c_str = "-"
class SubUnary(_Op):
_c_str = "-"
class Mul(_Op):
_c_str = "*"
class Div(_Op):
_c_str = "/"
class Mod(_Op):
_c_str = "%"
class Gt(_Op):
_c_str = ">"
class Lt(_Op):
_c_str = "<"
class GtE(_Op):
_c_str = ">="
class LtE(_Op):
_c_str = "<="
class Eq(_Op):
_c_str = "=="
class NotEq(_Op):
_c_str = "!="
class BitAnd(_Op):
_c_str = "&"
class BitOr(_Op):
_c_str = "|"
class BitNot(_Op):
_c_str = "~"
class BitShL(_Op):
_c_str = "<<"
class BitShR(_Op):
_c_str = ">>"
class BitXor(_Op):
_c_str = "^"
class And(_Op):
_c_str = "&&"
class Or(_Op):
_c_str = "||"
class Not(_Op):
_c_str = "!"
class Comma(_Op):
_c_str = ","
class Dot(_Op):
_c_str = "."
class Arrow(_Op):
_c_str = "->"
class Assign(_Op):
_c_str = "="
class ArrayRef(_Op):
_c_str = "[]"
# ---------------------------------------------------------------------------
# factory routines for building UnaryOps, BinaryOps, etc.
def PreInc(a):
return UnaryOp(Op.PreInc(), a)
def PreDec(a):
return UnaryOp(Op.PreDec(), a)
def PostInc(a):
return UnaryOp(Op.PostInc(), a)
def PostDec(a):
return UnaryOp(Op.PostDec(), a)
def BitNot(a):
return UnaryOp(Op.BitNot(), a)
def Not(a):
return UnaryOp(Op.Not(), a)
def Ref(a):
return UnaryOp(Op.Ref(), a)
def Deref(a):
return UnaryOp(Op.Deref(), a)
def SizeOf(a):
return UnaryOp(Op.SizeOf(), a)
def Add(a, b=None):
if b is not None:
return BinaryOp(a, Op.Add(), b)
else:
return UnaryOp(Op.AddUnary(), a)
def Sub(a, b=None):
if b is not None:
return BinaryOp(a, Op.Sub(), b)
else:
return UnaryOp(Op.SubUnary(), a)
def Mul(a, b):
return BinaryOp(a, Op.Mul(), b)
def Div(a, b):
return BinaryOp(a, Op.Div(), b)
def Mod(a, b):
return BinaryOp(a, Op.Mod(), b)
def Gt(a, b):
return BinaryOp(a, Op.Gt(), b)
def Lt(a, b):
return BinaryOp(a, Op.Lt(), b)
def GtE(a, b):
return BinaryOp(a, Op.GtE(), b)
def LtE(a, b):
return BinaryOp(a, Op.LtE(), b)
def Eq(a, b):
return BinaryOp(a, Op.Eq(), b)
def NotEq(a, b):
return BinaryOp(a, Op.NotEq(), b)
def BitAnd(a, b):
return BinaryOp(a, Op.BitAnd(), b)
def BitOr(a, b):
return BinaryOp(a, Op.BitOr(), b)
def BitShL(a, b):
return BinaryOp(a, Op.BitShL(), b)
def BitShR(a, b):
return BinaryOp(a, Op.BitShR(), b)
def BitXor(a, b):
return BinaryOp(a, Op.BitXor(), b)
def And(a, b):
return BinaryOp(a, Op.And(), b)
def Or(a, b):
return BinaryOp(a, Op.Or(), b)
def Comma(a, b):
return BinaryOp(a, Op.Comma(), b)
def Dot(a, b):
return BinaryOp(a, Op.Dot(), b)
def Arrow(a, b):
return BinaryOp(a, Op.Arrow(), b)
def Assign(a, b):
return BinaryOp(a, Op.Assign(), b)
def ArrayRef(a, b):
return BinaryOp(a, Op.ArrayRef(), b)
def AddAssign(a, b):
return AugAssign(a, Op.Add(), b)
def SubAssign(a, b):
return AugAssign(a, Op.Sub(), b)
def MulAssign(a, b):
return AugAssign(a, Op.Mul(), b)
def DivAssign(a, b):
return AugAssign(a, Op.Div(), b)
def ModAssign(a, b):
return AugAssign(a, Op.Mod(), b)
def BitXorAssign(a, b):
return AugAssign(a, Op.BitXor(), b)
def BitAndAssign(a, b):
return AugAssign(a, Op.BitAnd(), b)
def BitOrAssign(a, b):
return AugAssign(a, Op.BitOr(), b)
def BitShLAssign(a, b):
return AugAssign(a, Op.BitShL(), b)
def BitShRAssign(a, b):
return AugAssign(a, Op.BitShR(), b)
|
mbdriscoll/ctree
|
ctree/c/nodes.py
|
Python
|
bsd-2-clause
| 17,455
|
# coding: utf-8
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiscoveryDiscoveredDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operation': 'GooglelongrunningOperation'
}
attribute_map = {
'operation': 'operation'
}
def __init__(self, operation=None): # noqa: E501
"""DiscoveryDiscoveredDetails - a model defined in Swagger""" # noqa: E501
self._operation = None
self.discriminator = None
if operation is not None:
self.operation = operation
@property
def operation(self):
"""Gets the operation of this DiscoveryDiscoveredDetails. # noqa: E501
Output only. An operation that indicates the status of the current scan. # noqa: E501
:return: The operation of this DiscoveryDiscoveredDetails. # noqa: E501
:rtype: GooglelongrunningOperation
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this DiscoveryDiscoveredDetails.
Output only. An operation that indicates the status of the current scan. # noqa: E501
:param operation: The operation of this DiscoveryDiscoveredDetails. # noqa: E501
:type: GooglelongrunningOperation
"""
self._operation = operation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DiscoveryDiscoveredDetails, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscoveryDiscoveredDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
grafeas/client-python
|
grafeas/models/discovery_discovered_details.py
|
Python
|
apache-2.0
| 3,542
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embedding layers for ReadTwice."""
import math
from typing import Optional, Text
import tensorflow as tf
from readtwice.layers import tensor_utils
class EmbeddingLookup(tf.keras.layers.Layer):
"""Embedding lookup layer for id tensor input.
This layer functions very similarly to `tf.keras.layers.Embedding` except:
1. The `use_one_hot_lookup` option can enable potentially faster TPU lookup
via one-hot multiplication.
2. The optional `input_mask` argument will ensure that all masked embedding
vectors are 0.
3. The optional `projection_size` argument makes it easy to project the
embedding size to a different output size as done by ALBERT.
"""
def __init__(self,
vocab_size,
embedding_size,
projection_size = 0,
initializer_range = 0.02,
use_one_hot_lookup = False,
name = 'embedding_lookup',
**kwargs):
"""Init.
Args:
vocab_size: Size of the embedding vocabulary. Must be positive and larger
than the maximum input id.
embedding_size: Width of the embedding table. Must be positive.
projection_size: If positive and different from embedding_size, the output
from the embedding table lookup will be projected via a dense layer to
this size.
initializer_range: The standard deviation of the truncated normal
initializer for initializing the embedding table.
use_one_hot_lookup: Whether to use tf.one_hot for embedding lookup instead
of tf.gather. Default is False, but setting to True may be more
efficient on TPUs for vocab sizes that aren't too large.
name: Name of the layer.
**kwargs: Forwarded to super.
"""
super(EmbeddingLookup, self).__init__(name=name, **kwargs)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.projection_size = projection_size
self.initializer_range = initializer_range
self.use_one_hot_lookup = use_one_hot_lookup
def build(self, input_shape):
"""Keras build function.
Args:
input_shape: TensorShape of the input; unused.
"""
self.embedding_table = self.add_weight(
name='embedding_table',
shape=[self.vocab_size, self.embedding_size],
dtype=tf.float32,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=self.initializer_range),
trainable=True)
if self.projection_size > 0 and self.embedding_size != self.projection_size:
self.embedding_projection = tf.keras.layers.Dense(
units=self.projection_size,
activation=None,
use_bias=True,
kernel_initializer=tf.keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(self.embedding_size)),
bias_initializer='zeros',
name='projection')
super(EmbeddingLookup, self).build(input_shape)
def call(self,
input_ids,
input_mask = None):
"""Calls the layer.
Args:
input_ids: <int>[batch_size, ...] Tensor of ids to look up. All ids must
be between 0 (inclusive) and `self.vocab_size` (exclusive).
input_mask: <int>[batch_size, ...] Tensor of the same shape as
`input_ids`. Should have only 0 and 1 values, with 0 for ids to mask and
1 otherwise. The returned embeddings for all masked ids will be 0, so
the corresponding ids in `input_ids` are ignored.
Returns:
<float32>[input_ids.shape, embedding_size] Tensor of embeddings.
"""
if input_mask is not None:
# Make all masked ids 0 since their embeddings will be set to 0 later.
input_ids *= input_mask
output = (
tensor_utils.gather_by_one_hot(self.embedding_table, input_ids) if
self.use_one_hot_lookup else tf.gather(self.embedding_table, input_ids))
if self.projection_size > 0 and self.embedding_size != self.projection_size:
output = self.embedding_projection(output)
# Zero out embeddings for masked ids if any. Generally, should be
# the last step as some of the previous ops (e.g. projection that ends up
# adding a bias) could potentially have altered the tensors for masked ids.
if input_mask is not None:
output *= tf.expand_dims(tf.cast(input_mask, dtype=output.dtype), axis=-1)
return output
|
google-research/google-research
|
readtwice/layers/embedding.py
|
Python
|
apache-2.0
| 4,969
|
# sample documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 16 21:22:43 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'academictorrents'
copyright = u'academictorrents'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.0.1'
# The full version, including alpha/beta/rc tags.
release = 'v0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sampledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sample.tex', u'sample Documentation',
u'Kenneth Reitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'academictorrents', u'academictorrents Documentation',
[u'academictorrents'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'academictorrents', u'academictorrents',
u'academictorrents', 'academictorrents', 'Academic Torrents Python and R APIs',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
AcademicTorrents/python-r-api
|
docs/conf.py
|
Python
|
mit
| 7,748
|
#
# FishPi - An autonomous drop in the ocean
#
# Main View classes for POCV UI.
#
import tkFont
from Tkinter import *
from PIL import Image, ImageTk
class MainView(Frame, object):
""" MainView class for POCV UI. """
def __init__(self, master, view_controller):
super(MainView, self).__init__(master, bd=1, relief=GROOVE)
self.pack()
self.create_widgets(master, view_controller)
def create_widgets(self, master, view_controller):
""" Create widgets for view. """
# top frame
self.top_frame = Frame(master, bd=1, relief=GROOVE)
self.top_frame.pack(fill=X)
# map frame (in top sub-frame)
self.map_frame = MapFrame(self.top_frame, view_controller)
self.map_frame.pack(side=LEFT, fill=X)
# camera frame (in top sub-frame)
self.camera_frame = CameraFrame(self.top_frame, view_controller)
self.camera_frame.pack(side=LEFT, fill=X, expand=True)
# bottom sub-frame (in main frame)
self.bottom_frame = Frame(master, bd=1, relief=GROOVE)
self.bottom_frame.pack(fill=BOTH, expand=True)
# route frame (in bottom sub-frame)
self.route_frame = RouteFrame(self.bottom_frame, view_controller)
self.route_frame.pack(side=LEFT, fill=BOTH, padx=5, pady=5, expand=True)
# info frame (in bottom sub-frame)
self.info_frame = InfoFrame(self.bottom_frame, view_controller)
self.info_frame.pack(side=LEFT, fill=BOTH, pady=5, expand=True)
# controls frame (in bottom sub-frame)
self.controls_frame = ControlsFrame(self.bottom_frame, view_controller)
self.controls_frame.pack(side=LEFT, fill=BOTH, padx=5, pady=5, expand=True)
def update_callback(self):
""" Callback for any view objects that need to requery (rather than observe a model. """
self.camera_frame.update_callback()
class MapFrame(Frame, object):
""" UI Frame displaying map. """
def __init__(self, master, view_controller):
super(MapFrame, self).__init__(master, bd=1, relief=GROOVE)
self._view_controller = view_controller
# get map image
image = view_controller.get_current_map()
# scale and display image
width, height = image.size
scale = .12
image_resized = image.resize((int(width*scale), int(height*scale)), Image.ANTIALIAS)
photo = ImageTk.PhotoImage(image_resized)
# add overlay
self.top = Canvas(self, width=480, height=240)
self.top.create_image((25,0), image=photo, anchor=NW)
self.top.create_oval((35,190,75,230), width=2, fill="white")
self.top.create_text((55,210), text="H", font=14)
self.image=photo
self.top.bind("<Button-1>", self.click_callback)
self.top.bind("<B1-Motion>", self.move_callback)
self.top.pack(fill=X)
def click_callback(self, event):
print "clicked at", event.x, event.y
def move_callback(self, event):
print event.x, event.y
class CameraFrame(Frame, object):
""" UI Frame displaying camera image. """
def __init__(self, master, view_controller):
super(CameraFrame, self).__init__(master, bd=1, relief=SUNKEN)
self._view_controller = view_controller
# display image
self.cnvs_camera = Canvas(self, width=320, height=240)
self.update_image()
self.cnvs_camera.pack(fill=BOTH)
def update_image(self):
# get latest image
image = self._view_controller.last_img
photo = ImageTk.PhotoImage(image)
# display it
self.cnvs_camera.create_image((0,0), image=photo, anchor=NW)
#self.cnvs_camera.configure(image = photo)
self.image = photo
def update_callback(self):
self.update_image()
class InfoFrame(Frame, object):
""" UI Frame displaying information and status. """
def __init__(self, master, view_controller):
super(InfoFrame, self).__init__(master, bd=1, relief=SUNKEN)
self._view_controller = view_controller
Label(self, text = "Location Info:", pady=6, anchor=W, justify=LEFT).grid(row=0, columnspan=2, sticky=W)
# latitude
Label(self, text = "Latitude:", padx=3, anchor=W, justify=LEFT).grid(row=1, sticky=W)
Label(self, textvariable=view_controller.model.GPS_latitude).grid(row=1, column=1)
# longitude
Label(self, text = "Longitude:", padx=3, anchor=W, justify=LEFT).grid(row=2, sticky=W)
Label(self, textvariable=view_controller.model.GPS_longitude).grid(row=2, column=1)
# compass heading info
Label(self, text = "Compass Heading:", padx=3, anchor=W, justify=LEFT).grid(row=3, sticky=W)
Label(self, textvariable=view_controller.model.compass_heading).grid(row=3, column=1)
# GPS heading info
Label(self, text = "GPS Heading:", padx=3, anchor=W, justify=LEFT).grid(row=4, sticky=W)
Label(self, textvariable=view_controller.model.GPS_heading).grid(row=4, column=1)
Label(self, text = "GPS Speed (knots):", padx=3, anchor=W, justify=LEFT).grid(row=5, sticky=W)
Label(self, textvariable=view_controller.model.GPS_speed).grid(row=5, column=1)
Label(self, text = "GPS Altitude:", padx=3, anchor=W, justify=LEFT).grid(row=6, sticky=W)
Label(self, textvariable=view_controller.model.GPS_altitude).grid(row=6, column=1)
# GPS status
Checkbutton(self, text="GPX fix?", font=tkFont.Font(weight="bold"), state=DISABLED, variable=view_controller.model.GPS_fix).grid(row=7, column=0, columnspan=2, sticky=E)
Label(self, text = "# satellites:", padx=3, anchor=W, justify=LEFT).grid(row=8, sticky=W)
Label(self, textvariable=view_controller.model.GPS_satellite_count).grid(row=8, column=1)
Label(self, text = "Other Info:", pady=6, anchor=W, justify=LEFT).grid(row=9, columnspan=2, sticky=W)
# date and time
Label(self, text = "Time:", padx=3, anchor=W, justify=LEFT).grid(row=10, sticky=W)
Label(self, textvariable=view_controller.model.time).grid(row=10, column=1)
Label(self, text = "Date:", padx=3, anchor=W, justify=LEFT).grid(row=11, sticky=W)
Label(self, textvariable=view_controller.model.date).grid(row=11, column=1)
Label(self, text = "Temperature:", padx=3, anchor=W, justify=LEFT).grid(row=12, sticky=W)
Label(self, textvariable=view_controller.model.temperature).grid(row=12, column=1)
class ControlsFrame(Frame, object):
""" UI Frame displaying controls for heading and throttle. """
def __init__(self, master, view_controller):
super(ControlsFrame, self).__init__(master, bd=1, relief=SUNKEN)
self._view_controller = view_controller
Label(self, text = "Control Mode:", pady=6, bd=1, anchor=W, justify=LEFT).pack(fill=X, padx=2, expand=True)
# top frame
self.top_frame = Frame(self)
self.top_frame.pack(fill=X)
# mode buttons
self.btn_manual = Button(self.top_frame, text="Manual", command=self.on_set_manual_mode)
self.btn_manual.config(relief=SUNKEN)
self.btn_manual.pack(side=LEFT, padx=3)
self.btn_pause = Button(self.top_frame, text="Pause", command=self.on_pause)
self.btn_pause.pack(side=LEFT)
self.btn_auto = Button(self.top_frame, text="AutoPilot", command=self.on_set_auto_pilot_mode)
self.btn_auto.pack(side=LEFT, padx=3)
# centre frame
self.lbl_heading = Label(self, text = "Steering (Manual)", pady=6, bd=1, anchor=W, justify=LEFT)
self.lbl_heading.pack(fill=X, padx=2, expand=True)
# rudder heading
self.scl_rudder = Scale(self, orient=HORIZONTAL, from_=-45, to=45, command=self.on_rudder)
self.scl_rudder.set(0)
self.scl_rudder.pack(fill=X, expand=True, padx=5)
self.btn_zero_heading = Button(self, text="Centre Rudder", command=self.on_zero_heading)
self.btn_zero_heading.pack(padx=3)
# throttle level
self.lbl_throttle = Label(self, text = "Throttle (Manual):", pady=6, bd=1, anchor=W, justify=LEFT)
self.lbl_throttle.pack(fill=X, padx=2, expand=True)
self.btn_zero_throttle = Button(self, text="Zero Throttle", command=self.on_zero_throttle)
self.btn_zero_throttle.pack(side=RIGHT, padx=3)
self.scl_speed_controller = Scale(self, length=200, from_=100, to=-100, command=self.on_throttle)
self.scl_speed_controller.set(0)
self.scl_speed_controller.pack(fill=Y, pady=5)
def on_set_manual_mode(self):
""" event handler for mode change """
# set ui
self.scl_speed_controller.set(0)
self.scl_speed_controller.config(from_=100, to=-100)
self.lbl_throttle.config(text="Throttle (Manual):")
self.btn_zero_throttle.config(text="Zero Throttle")
self.scl_rudder.set(0)
self.scl_rudder.config(from_=-45, to=45)
self.lbl_heading.config(text="Steering (Manual):")
self.btn_zero_heading.config(text="Centre Rudder")
self.btn_manual.config(relief=SUNKEN)
self.btn_auto.config(relief=RAISED)
# set controller
self._view_controller.set_manual_mode()
def on_pause(self):
""" event handler for mode change """
# set ui
self.scl_speed_controller.set(0)
self.scl_rudder.set(0)
self.btn_manual.config(relief=RAISED)
self.btn_auto.config(relief=RAISED)
# set controller
self._view_controller.halt()
def on_set_auto_pilot_mode(self):
""" event handler for mode change """
# set ui
self.scl_speed_controller.set(0)
self.scl_speed_controller.config(from_=100, to=-100)
self.lbl_throttle.config(text="Speed (Auto):")
self.btn_zero_throttle.config(text="Zero Speed")
self.scl_rudder.set(0)
self.scl_rudder.config(from_=-180, to=180)
self.lbl_heading.config(text="Heading (Auto):")
self.btn_zero_heading.config(text="Centre Heading")
self.btn_manual.config(relief=RAISED)
self.btn_auto.config(relief=SUNKEN)
# set controller
self._view_controller.set_auto_pilot_mode()
def on_zero_throttle(self):
""" event handler for throttle change """
self.scl_speed_controller.set(0)
def on_zero_heading(self):
""" event handler for heading change """
# only apply in manual mode
self.scl_rudder.set(0)
def on_rudder(self, value):
""" event handler for heading change """
if self._view_controller.auto_mode_enabled:
# auto mode
self._view_controller.set_heading(value)
else:
# manual mode
self._view_controller.set_steering(value)
def on_throttle(self, value):
""" event handler for throttle change """
if self._view_controller.auto_mode_enabled:
# auto mode
self._view_controller.set_speed(value)
else:
# manual mode
self._view_controller.set_throttle(value)
class RouteFrame(Frame, object):
""" UI Frame with buttons for user interactions. """
def __init__(self, master, view_controller):
super(RouteFrame, self).__init__(master, bd=1, relief=SUNKEN)
self._view_controller = view_controller
Label(self, text = "Route Planning:", pady=6, anchor=W, justify=LEFT).grid(row=0, columnspan=4, sticky=W)
# waypoints list
Label(self, text = "Waypoint", pady=3, anchor=W, justify=LEFT).grid(row=1, column=0, sticky=W)
Label(self, text = "Latitude", pady=3, anchor=W, justify=LEFT).grid(row=1, column=1, sticky=W)
Label(self, text = "Longitude", pady=3, anchor=W, justify=LEFT).grid(row=1, column=2, sticky=W)
Label(self, text = "Remove", pady=3, anchor=W, justify=LEFT).grid(row=1, column=3, sticky=W)
self.lstbx_waypoints = Listbox(self, height=10)
self.lstbx_waypoints.grid(row=2, rowspan=10, columnspan=4, padx=3, sticky=NSEW)
# load / save route
self.btn_load = Button(self, text="Load GPX", command=self.on_load_gpx)
self.btn_load.grid(row=13, column=0)
self.btn_save = Button(self, text="Save GPX", state=DISABLED, command=self.on_save_gpx)
self.btn_save.grid(row=13, column=1)
self.btn_route = Button(self, pady=4, text="Open Plannner", state=DISABLED, command=self.on_open_planner)
self.btn_route.grid(row=13, column=2, columnspan=2)
# misc
Checkbutton(self, text = "Capture Images", variable=view_controller.model.capture_img_enabled).grid(row=15, column=2, columnspan=2, sticky=E)
def on_load_gpx(self):
""" event handler for loading gpx file """
self._view_controller.load_gpx()
self.lstbx_waypoints.delete(0,END)
for item in self._view_controller.model.waypoints:
self.lstbx_waypoints.insert(END, item)
def on_save_gpx(self):
""" event handler for save gpx file """
self._view_controller.save_gpx()
def on_open_planner(self):
""" event handler for planner window """
pass
|
FishPi/FishPi-POCV---Command---Control
|
fishpi/ui/main_view_tk.py
|
Python
|
bsd-2-clause
| 13,484
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidOrder
class gdax (Exchange):
def describe(self):
return self.deep_extend(super(gdax, self).describe(), {
'id': 'gdax',
'name': 'GDAX',
'countries': 'US',
'rateLimit': 1000,
'userAgent': self.userAgents['chrome'],
'hasCORS': True,
'hasFetchOHLCV': True,
'hasDeposit': True,
'hasWithdraw': True,
'hasFetchOrder': True,
'hasFetchOrders': True,
'hasFetchOpenOrders': True,
'hasFetchClosedOrders': True,
'timeframes': {
'1m': 60,
'5m': 300,
'15m': 900,
'30m': 1800,
'1h': 3600,
'2h': 7200,
'4h': 14400,
'12h': 43200,
'1d': 86400,
'1w': 604800,
'1M': 2592000,
'1y': 31536000,
},
'urls': {
'test': 'https://api-public.sandbox.gdax.com',
'logo': 'https://user-images.githubusercontent.com/1294454/27766527-b1be41c6-5edb-11e7-95f6-5b496c469e2c.jpg',
'api': 'https://api.gdax.com',
'www': 'https://www.gdax.com',
'doc': 'https://docs.gdax.com',
'fees': [
'https://www.gdax.com/fees',
'https://support.gdax.com/customer/en/portal/topics/939402-depositing-and-withdrawing-funds/articles',
],
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'api': {
'public': {
'get': [
'currencies',
'products',
'products/{id}/book',
'products/{id}/candles',
'products/{id}/stats',
'products/{id}/ticker',
'products/{id}/trades',
'time',
],
},
'private': {
'get': [
'accounts',
'accounts/{id}',
'accounts/{id}/holds',
'accounts/{id}/ledger',
'coinbase-accounts',
'fills',
'funding',
'orders',
'orders/{id}',
'payment-methods',
'position',
'reports/{id}',
'users/self/trailing-volume',
],
'post': [
'deposits/coinbase-account',
'deposits/payment-method',
'funding/repay',
'orders',
'position/close',
'profiles/margin-transfer',
'reports',
'withdrawals/coinbase',
'withdrawals/crypto',
'withdrawals/payment-method',
],
'delete': [
'orders',
'orders/{id}',
],
},
},
'fees': {
'trading': {
'tierBased': True, # complicated tier system per coin
'percentage': True,
'maker': 0.0,
'taker': 0.30 / 100, # worst-case scenario: https://www.gdax.com/fees/BTC-USD
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 25,
},
'deposit': {
'BCH': 0,
'BTC': 0,
'LTC': 0,
'ETH': 0,
'EUR': 0.15,
'USD': 10,
},
},
},
})
async def fetch_markets(self):
markets = await self.publicGetProducts()
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['id']
base = market['base_currency']
quote = market['quote_currency']
symbol = base + '/' + quote
amountLimits = {
'min': market['base_min_size'],
'max': market['base_max_size'],
}
priceLimits = {
'min': market['quote_increment'],
'max': None,
}
costLimits = {
'min': priceLimits['min'],
'max': None,
}
limits = {
'amount': amountLimits,
'price': priceLimits,
'cost': costLimits,
}
precision = {
'amount': -math.log10(float(amountLimits['min'])),
'price': -math.log10(float(priceLimits['min'])),
}
taker = self.fees['trading']['taker']
if (base == 'ETH') or (base == 'LTC'):
taker = 0.003
result.append(self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
'precision': precision,
'limits': limits,
'taker': taker,
}))
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privateGetAccounts()
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['currency']
account = {
'free': float(balance['available']),
'used': float(balance['hold']),
'total': float(balance['balance']),
}
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetProductsIdBook(self.extend({
'id': self.market_id(symbol),
'level': 2, # 1 best bidask, 2 aggregated, 3 full
}, params))
return self.parse_order_book(orderbook)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = self.extend({
'id': market['id'],
}, params)
ticker = await self.publicGetProductsIdTicker(request)
timestamp = self.parse8601(ticker['time'])
bid = None
ask = None
if 'bid' in ticker:
bid = float(ticker['bid'])
if 'ask' in ticker:
ask = float(ticker['ask'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': bid,
'ask': ask,
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': self.safe_float(ticker, 'price'),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume']),
'quoteVolume': None,
'info': ticker,
}
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(trade['time'])
side = 'sell' if (trade['side'] == 'buy') else 'buy'
symbol = None
if market:
symbol = market['symbol']
fee = None
if 'fill_fees' in trade:
fee = {
'cost': float(trade['fill_fees']),
'currency': market['quote'],
}
return {
'id': str(trade['trade_id']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': float(trade['price']),
'amount': float(trade['size']),
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.publicGetProductsIdTrades(self.extend({
'id': market['id'], # fixes issue #2
}, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[3],
ohlcv[2],
ohlcv[1],
ohlcv[4],
ohlcv[5],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
granularity = self.timeframes[timeframe]
request = {
'id': market['id'],
'granularity': granularity,
}
if since:
request['start'] = self.iso8601(since)
if not limit:
limit = 200 # max = 200
request['end'] = self.iso8601(limit * granularity * 1000 + since)
response = await self.publicGetProductsIdCandles(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_time(self):
response = self.publicGetTime()
return self.parse8601(response['iso'])
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'active': 'open',
'open': 'open',
'done': 'closed',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(order['created_at'])
symbol = None
if not market:
if order['product_id'] in self.markets_by_id:
market = self.markets_by_id[order['product_id']]
status = self.parse_order_status(order['status'])
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'size')
filled = self.safe_float(order, 'filled_size')
remaining = amount - filled
cost = self.safe_float(order, 'executed_value')
if market:
symbol = market['symbol']
return {
'id': order['id'],
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'status': status,
'symbol': symbol,
'type': order['type'],
'side': order['side'],
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': None,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'all',
}
market = None
if symbol:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'status': 'done',
}
market = None
if symbol:
market = self.market(symbol)
request['product_id'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
async def create_order(self, market, type, side, amount, price=None, params={}):
await self.load_markets()
# oid = str(self.nonce())
order = {
'product_id': self.market_id(market),
'side': side,
'size': amount,
'type': type,
}
if type == 'limit':
order['price'] = price
response = await self.privatePostOrders(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privateDeleteOrdersId({'id': id})
async def get_payment_methods(self):
response = await self.privateGetPaymentMethods()
return response
async def deposit(self, currency, amount, address, params={}):
await self.load_markets()
request = {
'currency': currency,
'amount': amount,
}
method = 'privatePostDeposits'
if 'payment_method_id' in params:
# deposit from a payment_method, like a bank account
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
# deposit into GDAX account from a Coinbase account
method += 'CoinbaseAccount'
else:
# deposit methodotherwise we did not receive a supported deposit location
# relevant docs link for the Googlers
# https://docs.gdax.com/#deposits
raise NotSupported(self.id + ' deposit() requires one of `coinbase_account_id` or `payment_method_id` extra params')
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' deposit() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
async def withdraw(self, currency, amount, address, params={}):
await self.load_markets()
request = {
'currency': currency,
'amount': amount,
}
method = 'privatePostWithdrawals'
if 'payment_method_id' in params:
method += 'PaymentMethod'
elif 'coinbase_account_id' in params:
method += 'CoinbaseAccount'
else:
method += 'Crypto'
request['crypto_address'] = address
response = await getattr(self, method)(self.extend(request, params))
if not response:
raise ExchangeError(self.id + ' withdraw() error: ' + self.json(response))
return {
'info': response,
'id': response['id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.urls['api'] + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
# payload = body if (body) else ''
what = nonce + method + request + payload
secret = base64.b64decode(self.secret)
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': self.decode(signature),
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body):
if code == 400:
if body[0] == "{":
response = json.loads(body)
message = response['message']
if message.find('price too small') >= 0:
raise InvalidOrder(self.id + ' ' + message)
elif message.find('price too precise') >= 0:
raise InvalidOrder(self.id + ' ' + message)
elif message == 'Invalid API Key':
raise AuthenticationError(self.id + ' ' + message)
raise ExchangeError(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'message' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
tritoanst/ccxt
|
python/ccxt/async/gdax.py
|
Python
|
mit
| 18,421
|
"""Stemgraphic.graphic.
Stemgraphic provides a complete set of functions to handle everything related to stem-and-leaf plots.
Stemgraphic.graphic is a module implementing a graphical stem-and-leaf plot function and a stem-and-leaf heatmap plot
function for numerical data. It also provides a density_plot
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy as np
import pandas as pd
try:
from plotly.offline import iplot_mpl
plotly_module = True
except ModuleNotFoundError:
plotly_module = False
import seaborn as sns
from warnings import warn
from .helpers import jitter, key_calc, legend, min_max_count, dd
from .text import quantize, stem_data
def density_plot(
df,
var=None,
ax=None,
bins=None,
box=None,
density=True,
density_fill=True,
display=1000,
fig_only=True,
fit=None,
hist=None,
hues=None,
hue_labels=None,
jitter=None,
kind=None,
leaf_order=1,
legend=True,
limit_var=False,
norm_hist=None,
random_state=None,
rug=None,
scale=None,
singular=True,
strip=None,
swarm=None,
title=None,
violin=None,
x_min=0,
x_max=None,
y_axis_label=True,
):
"""density_plot.
Various density and distribution plots conveniently packaged into one function. Density plot normally forces
tails at each end which might go beyond the data. To force min/max to be driven by the data, use limit_var.
To specify min and max use x_min and x_max instead. Nota Bene: defaults to _decimation_ and _quantization_ mode.
See density_plot notebook for examples of the different combinations of plots.
Why this instead of seaborn:
Stem-and-leaf plots naturally quantize data. The amount of loss is based on scale and leaf_order and on the data
itself. This function which wraps several seaborn distribution plots was added in order to compare various
measures of density and distributions based on various levels of decimation (sampling, set through display)
and of quantization (set through scale and leaf_order). Also, there is no option in seaborn to fill the area
under the curve...
:param df: list, numpy array, time series, pandas or dask dataframe
:param var: variable to plot, required if df is a dataframe
:param ax: matplotlib axes instance, usually from a figure or other plot
:param bins: Specification of hist bins, or None to use Freedman-Diaconis rule
:param box: bool, if True plots a box plot. Similar to using violin, use one or the other
:param density: bool, if True (default) plots a density plot
:param density_fill: bool, if True (default) fill the area under the density curve
:param display: maximum number rows to use (1000 default) for calculations, forces sampling if < len(df)
:param fig_only: bool, if True (default) returns fig, ax, else returns fix, ax, max_peak, true_min, true_max
:param fit: object with fit method, returning a tuple that can be passed to a pdf method
:param hist: bool, if True plot a histogram
:param hues: optional, a categorical variable for multiple plots
:param hue_labels: optional, if using a column that is an object and/or categorical needing translation
:param jitter: for strip plots only, add jitter. strip + jitter is similar to using swarm, use one or the other
:param leaf_order: the order of magnitude of the leaf. The higher the order, the less quantization.
:param legend: bool, if True plots a legend
:param limit_var: use min / max from the data, not density plot
:param norm_hist: bool, if True histogram will be normed
:param random_state: initial random seed for the sampling process, for reproducible research
:param rug: bool, if True plot a rug plot
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param singular: force display of a density plot using a singular value, by simulating values of each side
:param strip: bool, if True displays a strip plot
:param swarm: swarm plot, similar to strip plot. use one or the other
:param title: if present, adds a title to the plot
:param violin: bool, if True plots a violin plot. Similar to using box, use one or the other
:param x_min: force X axis minimum value. See also limit_var
:param x_max: force Y axis minimum value. See also limit_var
:param y_axis_label: bool, if True displays y axis ticks and label
:return: see fig_only
"""
if kind:
if "box" in kind:
box = True
if "hist" in kind:
hist = True
if "rug" in kind:
rug = True
if "strip" in kind:
strip = True
if "swarm" in kind:
swarm = True
if "violin" in kind:
violin = True
max_peak = 0
peak_y = 0
true_min = "nan"
true_max = "nan"
if ax is None:
fig, ax = plt.subplots(figsize=(20, 16))
else:
fig = ax.get_figure()
if title:
ax.set_title(title)
hue_categories = sorted(df[hues].dropna().unique()) if hues else ["all"]
hue_labels = hue_labels if hue_labels else hue_categories
for i, hue_val in enumerate(hue_categories):
ignore = False
if hue_val == "all":
to_plot = df[var] if var else df
else:
to_plot = df[var][df[hues] == hue_val] if var else df
if leaf_order:
to_plot = quantize(
to_plot,
display=display,
leaf_order=leaf_order,
random_state=random_state,
)
elif display:
to_plot = to_plot.sample(n=display)
if density and len(to_plot) == 1:
if singular:
try:
to_plot = pd.Series(
[to_plot.values[0] * 0.995, to_plot.values[0] * 1.005]
)
except AttributeError:
to_plot = pd.Series([to_plot[0] * 0.995, to_plot[0] * 1.005])
else:
warn(
"Cannot plot a density plot using a singular value. Use singular=True to simulate extra data points.."
)
return None
if density or hist or rug or fit:
import warnings
warnings.filterwarnings("ignore")
sns.distplot(
x=to_plot,
ax=ax,
bins=bins,
fit=fit,
hist=hist,
kde=density,
norm_hist=norm_hist,
rug=rug,
)
try:
line = ax.lines[i]
except IndexError:
# facet has eliminated some conditions, continue iteration
continue
x = line.get_xydata()[:, 0]
y = line.get_xydata()[:, 1]
true_min = min(x)
true_max = max(x)
peak_y = max(y)
if density and density_fill:
ax.fill_between(x, y, alpha=0.2)
if peak_y > max_peak and not ignore:
max_peak = peak_y
if strip and swarm:
warn(
"Cannot plot a strip and swarm plot, they share the same space. Choose one."
)
return None
if box and violin:
warn(
"Cannot plot a box and violin plot, they share the same space. Choose one."
)
return None
if box or strip or swarm or violin:
ax2 = ax.twinx()
all = df[var].dropna()
if hue_val == "all":
if strip:
if jitter:
sns.stripplot(x=all, jitter=jitter, ax=ax2)
else:
sns.stripplot(x=all, ax=ax2)
elif swarm:
sns.swarmplot(x=all, ax=ax2)
if box:
sns.boxplot(x=all, ax=ax2)
elif violin:
sns.violinplot(x=all, ax=ax2)
else:
# outside the visible area, for legend
ax.scatter(
0, max_peak + 1, marker="s", c="C{}".format(len(hues)), label="all"
)
if strip:
if jitter:
sns.stripplot(
x=all, jitter=jitter, ax=ax2, color="C{}".format(len(hues))
)
else:
sns.stripplot(x=all, ax=ax2, color="C{}".format(len(hues)))
elif swarm:
sns.swarmplot(x=all, ax=ax2, color="C{}".format(len(hues)))
if box:
sns.boxplot(x=all, ax=ax2, color="C{}".format(len(hues)))
elif violin:
sns.violinplot(x=all, ax=ax2, color="C{}".format(len(hues)))
hue_labels += ["all"]
ax2.set(ylim=(-0.01 if violin else -0.3, 10 if (box or violin) else 4))
if limit_var:
true_min = min(to_plot)
true_max = max(to_plot)
ax.set_xlim(true_min, true_max)
elif x_max:
ax.set_xlim(x_min, x_max)
if density or hist or rug:
if swarm or (strip and jitter):
ax.set_ylim(-0.006, max_peak + 0.006)
else:
ax.set_ylim(0, max_peak + 0.006)
if legend:
ax.legend(
hue_labels, ncol=3, loc="upper right", fontsize="medium", frameon=False
)
if not y_axis_label:
ax.axes.get_yaxis().set_visible(False)
ax.axes.set_xlabel("")
plt.box(False)
sns.despine(left=True, bottom=True, top=True, right=True)
if fig_only:
return fig, ax
else:
return fig, ax, max_peak, true_min, true_max
def heatmap(
df,
annotate=False,
asFigure=False,
ax=None,
caps=None,
column=None,
compact=False,
display=900,
flip_axes=False,
interactive=True,
leaf_order=1,
persistence=None,
random_state=None,
scale=None,
trim=False,
trim_blank=True,
unit="",
zoom=None,
):
"""heatmap.
The heatmap displays the same underlying data as the stem-and-leaf plot, but instead of stacking the leaves,
they are left in their respective columns. Row '42' and Column '7' would have the count of numbers starting
with '427' of the given scale. by opposition to the text heatmap, the graphical heatmap does not remove
empty rows by default. To activate this feature, use compact=True.
The heatmap is useful to look at patterns. For distribution, stem_graphic is better suited.
:param df: list, numpy array, time series, pandas or dask dataframe
:param annotate: display annotations (Z) on heatmap
:param asFigure: return plot as plotly figure (for web applications)
:param ax: matplotlib axes instance, usually from a figure or other plot
:param caps: for compatibility
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param flip_axes: bool, default is False
:param interactive: if cufflinks is loaded, renders as interactive plot in notebook
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param persistence: filename. save sampled data to disk, either as pickle (.pkl) or csv (any other extension)
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param trim_blank: remove the blank between the delimiter and the first leaf, defaults to True
:param unit: specify a string for the unit ('$', 'Kg'...). Used for outliers and for legend, defaults to ''
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
:return: count matrix, scale and matplotlib ax or figure if interactive and asFigure are True
"""
try:
cols = len(df.columns)
except AttributeError:
# wasn't a multi column data frame, might be a list
cols = 1
if cols > 1:
if column is None:
# We have to figure out the first numerical column on our own
start_at = 1 if df.columns[0] == "id" else 0
for i in range(start_at, len(df.columns)):
if df.dtypes[i] in ("int64", "float64"):
column = i
break
if dd:
df = df[df.columns.values[column]]
else:
df = df.loc[:, column]
min_val, max_val, total_rows = min_max_count(df)
scale_factor, pair, rows = stem_data(
df,
break_on=None,
column=column,
compact=compact,
display=display,
leaf_order=leaf_order,
omin=min_val,
omax=max_val,
outliers=False,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
trim=trim,
zoom=zoom,
)
max_leaves = len(max(rows, key=len))
if max_leaves > display / 3:
# more than 1/3 on a single stem, let's try one more time
if random_state:
random_state += 1
scale_factor2, pair2, rows2 = stem_data(
df,
break_on=None,
column=column,
compact=compact,
display=display,
leaf_order=1,
omin=min_val,
omax=max_val,
outliers=False,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
trim=trim,
zoom=zoom,
)
max_leaves2 = len(max(rows2, key=len))
if max_leaves2 < max_leaves:
max_leaves = max_leaves2
scale_factor = scale_factor2
pair = pair2
rows = rows2
split_rows = [i.split("|") for i in rows]
# redo the leaves in a matrix form
# this should be refactored as an option for stem_data, like rows_only for ngram_data
matrix = []
for stem, leaves in split_rows:
row_count = [stem]
for num in "0123456789":
row_count.append(leaves.count(num))
matrix.append(row_count)
num_matrix = pd.DataFrame(
matrix, columns=["stem", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
)
num_matrix.set_index("stem", inplace=True)
if flip_axes:
num_matrix = num_matrix.T
if trim_blank:
num_matrix.applymap(lambda x: x.strip() if type(x) is str else x)
title = "Stem-and-leaf heatmap ({} x {} {})".format(
pair.replace("|", "."), scale_factor, unit
)
if interactive:
try:
fig = num_matrix.iplot(kind="heatmap", asFigure=asFigure, title=title)
except AttributeError:
if ax is None:
fig, ax = plt.subplots(figsize=(9, 9))
plt.yticks(rotation=0)
ax.set_title(title)
sns.heatmap(num_matrix, annot=annotate, ax=ax)
else:
if ax is None:
fig, ax = plt.subplots(figsize=(12, 12))
plt.yticks(rotation=0)
ax.set_title(title)
sns.heatmap(num_matrix, annot=annotate, ax=ax)
return num_matrix, scale_factor, fig if asFigure else ax
def leaf_scatter(
df,
alpha=0.1,
asc=True,
ax=None,
break_on=None,
column=None,
compact=False,
delimiter_color="C3",
display=900,
figure_only=True,
flip_axes=False,
font_kw=None,
grid=False,
interactive=True,
leaf_color="k",
leaf_jitter=False,
leaf_order=1,
legend_pos="best",
mirror=False,
persistence=None,
primary_kw=None,
random_state=None,
scale=None,
scaled_leaf=True,
zoom=None,
):
"""leaf_scatter.
Scatter for numerical values based on leaf for X axis (scaled or not) and stem for Y axis.
:param df: list, numpy array, time series, pandas or dask dataframe
:param alpha: opacity of the dots, defaults to 10%
:param asc: stem (Y axis) sorted in ascending order, defaults to True
:param ax: matplotlib axes instance, usually from a figure or other plot
:param break_on: force a break of the leaves at x in (5, 10), defaults to 10
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param delimiter_color: color of the line between aggregate and stem and stem and leaf
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param figure_only: bool if True (default) returns matplotlib (fig,ax), False returns (fig,ax,df)
:param flip_axes: X becomes Y and Y becomes X
:param font_kw: keyword dictionary, font parameters
:param grid: show grid
:param interactive: if plotly is available, renders as interactive plot in notebook. False to render image.
:param leaf_color: font color of the leaves
:param leaf_jitter: add jitter to see density of each specific stem/leaf combo
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param legend_pos: One of 'top', 'bottom', 'best' or None, defaults to 'best'.
:param mirror: mirror the plot in the axis of the delimiters
:param persistence: filename. save sampled data to disk, either as pickle (.pkl) or csv (any other extension)
:param primary_kw: stem-and-leaf plot additional arguments
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param scaled_leaf: scale leafs, bool
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
:return:
"""
try:
cols = len(df.columns)
except AttributeError:
# wasn't a multi column data frame, might be a list
cols = 1
if cols > 1:
if column is None:
# We have to figure out the first numerical column on our own
start_at = 1 if df.columns[0] == "id" else 0
for i in range(start_at, len(df.columns)):
if df.dtypes[i] in ("int64", "float64"):
column = i
break
df = df.loc[:, column].dropna()
if font_kw is None:
font_kw = {}
if primary_kw is None:
primary_kw = {}
base_fontsize = font_kw.get("fontsize", 12)
min_val, max_val, total_rows = min_max_count(df)
fig = None
if leaf_color is None:
leaf_color = "k"
leaf_alpha = 0
if total_rows == 0:
warn("No data to plot")
return None, None
scale_factor, pair, rows, sorted_data, stems = stem_data(
df,
break_on=break_on,
column=column,
compact=compact,
display=display,
full=True,
leaf_order=leaf_order,
omin=min_val,
omax=max_val,
outliers=False,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
zoom=zoom,
)
st, lf = pair.split("|")
if scaled_leaf:
x = [abs(int(leaf * 10)) for leaf, stem in sorted_data]
else:
x = [abs(leaf) for leaf, stem in sorted_data]
text_data = x
if leaf_jitter:
x = jitter(x, scale=1 if scaled_leaf else scale_factor)
if total_rows <= display:
y = sorted(df)
else:
y = [(stem + leaf) * scale_factor for leaf, stem in sorted_data]
if ax is None:
fig, ax = plt.subplots(figsize=(10, 8))
else:
fig = ax.get_figure()
ax.scatter(x, y, alpha=alpha, label="Ylab + X" if scaled_leaf else "Ylab + X * 10")
for i, text in enumerate(text_data):
ax.annotate(text, (x[i], y[i]), fontsize=base_fontsize, color=leaf_color, alpha=leaf_alpha)
plt.box(on=None)
ax.axes.axvline(
x=-0.5 if scaled_leaf else -0.5 / scale_factor, color=delimiter_color
)
ax.axes.get_xaxis().set_visible(False)
if mirror:
ax.set_ylim(ax.get_ylim()[::-1]) if flip_axes else ax.set_xlim(
ax.get_xlim()[::-1]
)
if not asc:
ax.set_xlim(ax.get_xlim()[::-1]) if flip_axes else ax.set_ylim(
ax.get_ylim()[::-1]
)
if grid:
plt.grid(axis="y")
if legend_pos is not None:
ax.legend()
if plotly_module and interactive:
return iplot_mpl(fig)
elif figure_only:
return fig, ax
else:
return fig, ax, df
def stem_graphic(
df,
df2=None,
aggregation=True,
alpha=0.1,
asc=True,
ax=None,
ax2=None,
bar_color="C0",
bar_outline=None,
break_on=None,
column=None,
combined=None,
compact=False,
delimiter_color="C3",
display=900,
figure_only=True,
flip_axes=False,
font_kw=None,
leaf_color="k",
leaf_order=1,
legend_pos="best",
median_alpha=0.25,
median_color="C4",
mirror=False,
outliers=None,
outliers_color="C3",
persistence=None,
primary_kw=None,
random_state=None,
scale=None,
secondary=False,
secondary_kw=None,
secondary_plot=None,
show_stem=True,
title=None,
trim=False,
trim_blank=True,
underline_color=None,
unit="",
zoom=None,
):
"""stem_graphic.
A graphical stem and leaf plot. `stem_graphic` provides horizontal, vertical or mirrored layouts, sorted in
ascending or descending order, with sane default settings for the visuals, legend, median and outliers.
:param df: list, numpy array, time series, pandas or dask dataframe
:param df2: string, filename, url, list, numpy array, time series, pandas or dask dataframe (optional).
for back 2 back stem-and-leaf plots
:param aggregation: Boolean for sum, else specify function
:param alpha: opacity of the bars, median and outliers, defaults to 10%
:param asc: stem sorted in ascending order, defaults to True
:param ax: matplotlib axes instance, usually from a figure or other plot
:param ax2: matplotlib axes instance, usually from a figure or other plot for back to back
:param bar_color: the fill color of the bar representing the leaves
:param bar_outline: the outline color of the bar representing the leaves
:param break_on: force a break of the leaves at x in (5, 10), defaults to 10
:param column: specify which column (string or number) of the dataframe to use,
else the first numerical is selected
:param combined: list (specific subset to automatically include, say, for comparisons), or None
:param compact: do not display empty stem rows (with no leaves), defaults to False
:param delimiter_color: color of the line between aggregate and stem and stem and leaf
:param display: maximum number of data points to display, forces sampling if smaller than len(df)
:param figure_only: bool if True (default) returns matplotlib (fig,ax), False returns (fig,ax,df)
:param flip_axes: X becomes Y and Y becomes X
:param font_kw: keyword dictionary, font parameters
:param leaf_color: font color of the leaves
:param leaf_order: how many leaf digits per data point to display, defaults to 1
:param legend_pos: One of 'top', 'bottom', 'best' or None, defaults to 'best'.
:param median_alpha: opacity of median and outliers, defaults to 25%
:param median_color: color of the box representing the median
:param mirror: mirror the plot in the axis of the delimiters
:param outliers: display outliers - these are from the full data set, not the sample. Defaults to Auto
:param outliers_color: background color for the outlier boxes
:param persistence: filename. save sampled data to disk, either as pickle (.pkl) or csv (any other extension)
:param primary_kw: stem-and-leaf plot additional arguments
:param random_state: initial random seed for the sampling process, for reproducible research
:param scale: force a specific scale for building the plot. Defaults to None (automatic).
:param secondary: bool if True, this is a secondary plot - mostly used for back-to-back plots
:param secondary_kw: any matplotlib keyword supported by .plot(), for the secondary plot
:param secondary_plot: One or more of 'dot', 'kde', 'margin_kde', 'rug' in a comma delimited string or None
:param show_stem: bool if True (default) displays the stems
:param title: string to display as title
:param trim: ranges from 0 to 0.5 (50%) to remove from each end of the data set, defaults to None
:param trim_blank: remove the blank between the delimiter and the first leaf, defaults to True
:param underline_color: color of the horizontal line under the leaves, None for no display
:param unit: specify a string for the unit ('$', 'Kg'...). Used for outliers and for legend, defaults to ''
:param zoom: zoom level, on top of calculated scale (+1, -1 etc)
:return: matplotlib figure and axes instance
"""
try:
cols = len(df.columns)
except AttributeError:
# wasn't a multi column data frame, might be a list
cols = 1
if cols > 1:
if column is None:
# We have to figure out the first numerical column on our own
start_at = 1 if df.columns[0] == "id" else 0
for i in range(start_at, len(df.columns)):
if df.dtypes[i] in ("int64", "float64"):
column = i
break
# if dd:
# df = df[df.columns.values[column]]
# else:
df = df.loc[:, column].dropna()
if font_kw is None:
font_kw = {}
if primary_kw is None:
primary_kw = {}
base_fontsize = font_kw.get("fontsize", 12)
aggr_fontsize = font_kw.get("aggr_fontsize", base_fontsize - 2)
aggr_fontweight = font_kw.get("aggr_fontweight", "normal")
aggr_facecolor = font_kw.get("aggr_facecolor", None)
aggr_fontcolor = font_kw.get("aggr_color", "k")
stem_fontsize = font_kw.get("stem_fontsize", base_fontsize)
stem_fontweight = font_kw.get("stem_fontweight", "normal")
stem_facecolor = font_kw.get("stem_facecolor", None)
stem_fontcolor = font_kw.get("stem_color", "k")
pad = primary_kw.get("pad", 1.5)
if outliers is None:
outliers = True
remove_duplicate = True
else:
outliers = outliers
remove_duplicate = False
leaf_alpha = 1
if leaf_color is None:
leaf_color = "k"
leaf_alpha = 0
min_val, max_val, total_rows = min_max_count(df)
if total_rows == 0:
warn("No data to plot")
return None, None
scale_factor, pair, rows, _, stems = stem_data(
df,
break_on=break_on,
column=column,
compact=compact,
display=display,
full=True,
leaf_order=leaf_order,
omin=min_val,
omax=max_val,
outliers=False,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
trim=trim,
zoom=zoom,
)
max_leaves = len(max(rows, key=len))
if max_leaves > display / 3:
# more than 1/3 on a single stem, let's try one more time
if random_state:
random_state += 1
scale_factor2, pair2, rows2, _, stems2 = stem_data(
df,
break_on=break_on,
column=column,
compact=compact,
display=display,
full=True,
leaf_order=leaf_order,
omin=min_val,
omax=max_val,
outliers=False,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
trim=trim,
zoom=zoom,
)
max_leaves2 = len(max(rows2, key=len))
if max_leaves2 < max_leaves:
max_leaves = max_leaves2
scale_factor = scale_factor2
pair = pair2
rows = rows2
stems = stems2
st, lf = pair.split("|")
n = display if total_rows > display else total_rows
fig = None
ax1 = None
if flip_axes:
height = max_leaves
if height < 20:
height = 20
width = len(rows) + 3
else:
height = len(rows) + 3
width = max_leaves / (max_leaves / 30)
if width < 20:
width = 20
if combined is None:
combined = stems
aggr_offset = -0.5
aggr_line_offset = 1
if df2 is not None:
if flip_axes:
warn(
"Error: flip_axes is not available with back to back stem-and-leaf plots."
)
return None
if ax2:
scale = scale_factor
min_val_df2, max_val_df2, total_rows = min_max_count(df2)
scale_factor_df2, _, _, rows_df2, stems_df2 = stem_data(
df2,
break_on=break_on,
column=column,
compact=compact,
display=display,
full=True,
leaf_order=leaf_order,
omin=min_val_df2,
omax=max_val_df2,
outliers=outliers,
persistence=persistence,
random_state=random_state,
scale=scale,
total_rows=total_rows,
trim=trim,
zoom=zoom,
)
compact_combined = list(set(stems + stems_df2))
combined_min = min(compact_combined)
combined_max = max(compact_combined)
if compact:
combined = compact_combined
else:
combined = list(
np.arange(combined_min, combined_max, 0.5 if break_on == 5 else 1)
)
cnt_offset_df2 = 0
while stems[cnt_offset_df2] not in stems_df2 and cnt_offset_df2 < len(stems):
cnt_offset_df2 += 1
max_leaves_df2 = len(max(rows_df2, key=len))
total_width = max_leaves + max_leaves_df2 # / 2.2 + 3
if total_width < 20:
total_width = 20
total_height = (
combined_max + 1 - combined_min
) # cnt_offset_df2 + len(stems_df2)
if ax2 is None:
fig, (ax1, ax) = plt.subplots(
1, 2, sharey=True, figsize=((total_width / 4), (total_height / 4))
)
else:
ax1 = ax2
ax1.set_xlim((-1, width + 0.05))
ax1.set_ylim((-1, height + 0.05))
plt.box(on=None)
ax1.axes.get_yaxis().set_visible(False)
ax1.axes.get_xaxis().set_visible(False)
_ = stem_graphic(
df2, # NOQA
alpha=alpha,
ax=ax1,
aggregation=mirror and aggregation,
asc=asc,
bar_color=bar_color,
bar_outline=bar_outline,
break_on=break_on,
column=column,
combined=combined,
compact=compact,
delimiter_color=delimiter_color,
display=display,
flip_axes=False,
legend_pos=None,
median_alpha=median_alpha,
median_color=median_color,
mirror=not mirror,
outliers=outliers,
random_state=random_state,
secondary=True,
secondary_kw=secondary_kw,
secondary_plot=secondary_plot,
show_stem=True,
trim=trim,
trim_blank=trim_blank,
underline_color=underline_color,
unit=unit,
zoom=zoom,
)
else:
total_width = width
total_height = height
if ax is None:
fig = plt.figure(figsize=((width / 4), (total_height / 4)))
ax = fig.add_axes(
(0.05, 0.05, 0.9, 0.9),
aspect="equal",
frameon=False,
xlim=(-1, width + 0.05),
ylim=(-1, height + 0.05),
)
else:
ax.set_xlim((-1, width + 0.05))
ax.set_ylim((-1, height + 0.05))
fig = ax.get_figure()
plt.box(on=True)
ax.axis("off")
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
# Title
if df2 is not None or secondary:
title_offset = -2 if mirror else 4
else:
title_offset = 0 if mirror else 2
if title:
if flip_axes:
ax.set_title(title, y=title_offset)
else:
ax.set_title(title, loc="center")
# Offsets
offset = 0
if ax1 is not None:
aggr_offset = -3.8
aggr_line_offset = -0.5
if mirror:
ax.set_ylim(ax.get_ylim()[::-1]) if flip_axes else ax.set_xlim(
ax.get_xlim()[::-1]
)
offset = -2 if secondary else 0.5
if not asc:
ax.set_xlim(ax.get_xlim()[::-1]) if flip_axes else ax.set_ylim(
ax.get_ylim()[::-1]
)
tot = 0
min_s = 99999999
med = None
first_val = None
cnt_offset = 0
while combined[cnt_offset] not in stems and cnt_offset < len(stems):
cnt_offset += 1
for item_num, item in enumerate(rows):
cnt = item_num + cnt_offset
stem, leaf = item.split("|")
if trim_blank:
leaf = leaf.strip()
if stem != " ":
stem = stem.strip()
last_stem = int(stem)
if int(stem) < min_s:
min_s = last_stem
if first_val is None:
first_val = key_calc(stem, leaf[0 if asc else -1], scale_factor)
tot += int(len(leaf.strip())) # TODO: currently only valid if leaf order is 1
if tot > n / 2 and med is None and median_color is not None:
med = abs(tot - n / 2 - len(leaf.strip()))
if flip_axes:
ax.text(
cnt,
2.5 + med / 2.23,
"_",
fontsize=base_fontsize,
color=leaf_color,
bbox={"facecolor": median_color, "alpha": median_alpha, "pad": pad},
alpha=leaf_alpha,
ha="left",
va="top" if mirror else "bottom",
rotation=90,
)
else:
ax.text(
2.5 + med / 2.23,
cnt + (asc is False),
"_",
fontsize=base_fontsize,
color=leaf_color, # NOQA
bbox={"facecolor": median_color, "alpha": median_alpha, "pad": pad},
alpha=leaf_alpha,
ha="left",
va="bottom",
)
if flip_axes:
if (aggregation and secondary and not mirror) or (
aggregation and not secondary
):
ax.text(
cnt + offset,
0,
tot,
fontsize=aggr_fontsize,
rotation=90,
color=aggr_fontcolor,
bbox={"facecolor": aggr_facecolor, "alpha": alpha, "pad": pad}
if aggr_facecolor is not None
else {"alpha": 0},
fontweight=aggr_fontweight,
va="center",
ha="right" if mirror else "left",
)
# STEM
if show_stem:
ax.text(
cnt + offset,
1.5,
stem,
fontweight=stem_fontweight,
color=stem_fontcolor,
bbox={"facecolor": stem_facecolor, "alpha": alpha, "pad": pad}
if stem_facecolor is not None
else {"alpha": 0},
fontsize=stem_fontsize,
va="center",
ha="right" if mirror else "left",
)
# LEAF
ax.text(
cnt,
2.1,
leaf[::-1] if mirror else leaf,
fontsize=base_fontsize,
color=leaf_color,
ha="left",
va="top" if mirror else "bottom",
rotation=90,
alpha=leaf_alpha,
bbox={
"facecolor": bar_color,
"edgecolor": bar_outline,
"alpha": alpha,
"pad": pad,
},
)
else:
if (aggregation and secondary and not mirror) or (
aggregation and not secondary
):
ax.text(
aggr_offset,
cnt + 0.5,
tot,
fontsize=aggr_fontsize,
color=aggr_fontcolor,
bbox={"facecolor": aggr_facecolor, "alpha": alpha, "pad": pad}
if aggr_facecolor is not None
else {"alpha": 0},
fontweight=aggr_fontweight,
va="center",
ha="right" if mirror else "left",
)
# STEM
if show_stem:
ax.text(
2.4,
cnt + 0.5,
stem,
fontweight=stem_fontweight,
color=stem_fontcolor,
bbox={"facecolor": stem_facecolor, "alpha": alpha, "pad": pad}
if stem_facecolor is not None
else {"alpha": 0},
fontsize=stem_fontsize,
va="center",
ha="left" if mirror else "right",
)
# LEAF
ax.text(
2.6,
cnt + 0.5,
leaf[::-1] if mirror else leaf,
fontsize=base_fontsize,
va="center",
ha="right" if mirror else "left",
color=leaf_color,
alpha=leaf_alpha,
bbox={
"facecolor": bar_color,
"edgecolor": bar_outline,
"alpha": alpha,
"pad": pad,
},
)
if underline_color:
ax.hlines(cnt, 2.6, 2.6 + len(leaf) / 2, color=underline_color)
last_val = key_calc(last_stem, leaf, scale_factor)
if (
remove_duplicate
and (np.isclose(first_val, min_val) or np.isclose(first_val, max_val))
and (np.isclose(last_val, min_val) or np.isclose(last_val, max_val))
): # NOQA
outliers = False
cur_font = FontProperties()
if flip_axes:
ax.hlines(2, min_s, min_s + 1 + cnt, color=delimiter_color, alpha=0.7)
if (aggregation and secondary and not mirror) or (
aggregation and not secondary
):
ax.hlines(1, min_s, min_s + 1 + cnt, color=delimiter_color, alpha=0.7)
if outliers:
ax.text(
min_s - 1.5,
1.5,
"{} {}".format(min_val, unit),
fontsize=base_fontsize,
rotation=90,
va="center",
ha="left" if asc else "right",
bbox={"facecolor": "red", "alpha": alpha, "pad": 2},
)
ax.text(
min_s + cnt + 1.6,
1.5,
"{} {}".format(max_val, unit),
fontsize=base_fontsize,
rotation=90,
va="center",
ha="left" if asc else "right",
bbox={"facecolor": "red", "alpha": alpha, "pad": 2},
)
ax.hlines(1.5, min_s, min_s - 0.5, color=delimiter_color, alpha=0.7)
ax.hlines(
1.5,
min_s + 1 + cnt,
min_s + 1.5 + cnt,
color=delimiter_color,
alpha=0.7,
)
legend(
ax,
width,
min_s + cnt,
asc,
flip_axes,
mirror,
st,
lf,
scale_factor,
delimiter_color,
aggregation,
cur_font,
n,
legend_pos,
unit,
)
else:
line_length = 1 + cnt if (ax1 is None) or df2 is not None else 1 + max(stems)
if (aggregation and secondary and not mirror) or (
aggregation and not secondary
):
ax.vlines(
aggr_line_offset,
cnt_offset,
line_length,
color=delimiter_color,
alpha=0.7,
)
if show_stem:
ax.vlines(2.4, cnt_offset, line_length, color=delimiter_color, alpha=0.7)
if outliers:
ax.text(
1.5,
-1,
"{} {}".format(min_val, unit),
fontsize=base_fontsize,
va="center",
ha="center",
bbox={"facecolor": outliers_color, "alpha": alpha, "pad": 2},
)
ax.text(
1.5,
cnt + 2,
"{} {}".format(max_val, unit),
fontsize=12,
va="center",
ha="center",
bbox={"facecolor": outliers_color, "alpha": alpha, "pad": 2},
)
ax.vlines(1.5, -0.5, 0, color=delimiter_color, alpha=0.7)
ax.vlines(1.5, 1 + cnt, 1.5 + cnt, color=delimiter_color, alpha=0.7)
legend(
ax,
width,
cnt,
asc,
flip_axes,
mirror,
st,
lf,
scale_factor,
delimiter_color,
aggregation and not secondary,
cur_font,
n,
legend_pos,
unit,
)
if secondary_plot is not None:
secondary_kw = secondary_kw or {"alpha": 0.5}
start_at = 1.5
from scipy.stats import gaussian_kde
try:
y = df.dropna()
except AttributeError:
y = df
gkde = gaussian_kde(y)
ind = np.linspace(
min_val, int((int(lf) / 10 + int(st)) * int(scale_factor)), len(rows) * 10
)
if "overlay_kde" in secondary_plot:
if flip_axes:
ax.plot(
(ind / scale_factor) + 0.01 if asc else -1,
0.9 + start_at + gkde.evaluate(ind) * scale_factor * width * 6,
**secondary_kw
)
else:
ax.plot(
0.9 + start_at + gkde.evaluate(ind) * scale_factor * width * 6,
(ind / scale_factor) + 0.01 if asc else -1,
**secondary_kw
)
elif "kde" in secondary_plot:
if flip_axes:
ax.plot(
(ind / scale_factor) + 0.01 if asc else -1,
start_at + gkde.evaluate(ind) * scale_factor * width * 6 / width,
**secondary_kw
)
else:
ax.plot(
start_at + gkde.evaluate(ind) * scale_factor * width * 6 / width,
(ind / scale_factor) + 0.01 if asc else -1,
**secondary_kw
)
if "rug" in secondary_plot:
y = df.sample(frac=display / total_rows).dropna()
if flip_axes:
ax.plot(
(y / scale_factor) + 0.01 if asc else -1,
y * 0 + 1.2,
"|",
color="k",
**secondary_kw
)
else:
ax.plot(
y * 0 + 1.2,
(y / scale_factor) + 0.01 if asc else -1,
"_",
color="k",
**secondary_kw
)
elif secondary_plot == "dot":
y = df.sample(frac=display / total_rows).dropna()
if flip_axes:
ax.plot(
(y / scale_factor) + 0.01 if asc else -1,
y * 0 + 1.2,
"o",
markeredgewidth=1,
markerfacecolor="None",
markeredgecolor="k",
**secondary_kw
)
else:
ax.plot(
y * 0 + 1.2,
(y / scale_factor) + 0.01 if asc else -1,
"o",
markeredgewidth=1,
markerfacecolor="None",
markeredgecolor="k",
**secondary_kw
)
if flip_axes:
ax.plot(total_height, 0)
# ax.plot(0, total_width)
else:
# ax.plot(0, total_height)
ax.plot(total_width, 0)
# This is needed due to a warning we have no control over.
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fig.tight_layout()
if figure_only:
return fig, ax
else:
return fig, ax, df
|
fdion/stemgraphic
|
stemgraphic/graphic.py
|
Python
|
mit
| 45,609
|
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from StringIO import StringIO
from zope.interface import implements
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred, succeed
from twisted.web.iweb import IBodyProducer
from twisted.internet.interfaces import IConsumer
class _BufferReader(Protocol):
def __init__(self, finished):
self.finished = finished
self.received = StringIO()
def dataReceived(self, bytes):
self.received.write(bytes)
def connectionLost(self, reason):
self.finished.callback(self.received.getvalue())
def readBody(response):
if response.length == 0:
return succeed(None)
finished = Deferred()
response.deliverBody(_BufferReader(finished))
return finished
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, body):
if not isinstance(body, str):
raise TypeError(
"StringProducer body must be str, not %r" % (type(body),))
self._body = body
self.length = len(self._body)
def startProducing(self, consumer):
consumer.write(self._body)
return succeed(None)
def stopProducing(self):
pass
def resumeProducing(self):
pass
def pauseProducing(self):
pass
class MemoryConsumer(object):
implements(IConsumer)
def __init__(self):
self._buffer = []
def write(self, bytes):
self._buffer.append(bytes)
def value(self):
if len(self._buffer) > 1:
self._buffer = ["".join(self._buffer)]
return "".join(self._buffer)
|
trevor/calendarserver
|
contrib/performance/httpclient.py
|
Python
|
apache-2.0
| 2,220
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-06 04:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [(b'coding', '0001_initial'), (b'coding', '0002_auto_20160506_0424'), (b'coding', '0003_auto_20160506_0427')]
initial = True
dependencies = [
('main', '0001_squashed_0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('assigned_comments', models.ManyToManyField(blank=True, to=b'main.Comment')),
('assigned_submissions', models.ManyToManyField(blank=True, to=b'main.Submission')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Code',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField(blank=True, null=True)),
('css_class', models.CharField(blank=True, max_length=64, null=True)),
('key', models.CharField(blank=True, max_length=1, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CodeScheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('name', models.CharField(max_length=64)),
('description', models.TextField()),
('mutually_exclusive', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CommentCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Comment')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SubmissionCodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted_date', models.DateTimeField(blank=True, null=True)),
('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')),
('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_created_by', to=settings.AUTH_USER_MODEL)),
('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_modified_by', to=settings.AUTH_USER_MODEL)),
('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Submission')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='code',
name='scheme',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='code_schemes',
field=models.ManyToManyField(to=b'coding.CodeScheme'),
),
migrations.AddField(
model_name='assignment',
name='coder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_deleted_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='assignment',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_modified_by', to=settings.AUTH_USER_MODEL),
),
]
|
geosoco/reddit_coding
|
coding/migrations/0001_squashed_0003_auto_20160506_0427.py
|
Python
|
bsd-3-clause
| 8,587
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_attribute,
parse_duration,
try_get,
update_url_query,
)
from ..compat import compat_str
class USATodayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?usatoday\.com/(?:[^/]+/)*(?P<id>[^?/#]+)'
_TESTS = [{
# Brightcove Partner ID = 29906170001
'url': 'http://www.usatoday.com/media/cinematic/video/81729424/us-france-warn-syrian-regime-ahead-of-new-peace-talks/',
'md5': '033587d2529dc3411a1ab3644c3b8827',
'info_dict': {
'id': '4799374959001',
'ext': 'mp4',
'title': 'US, France warn Syrian regime ahead of new peace talks',
'timestamp': 1457891045,
'description': 'md5:7e50464fdf2126b0f533748d3c78d58f',
'uploader_id': '29906170001',
'upload_date': '20160313',
}
}, {
# ui-video-data[asset_metadata][items][brightcoveaccount] = 28911775001
'url': 'https://www.usatoday.com/story/tech/science/2018/08/21/yellowstone-supervolcano-eruption-stop-worrying-its-blow/973633002/',
'info_dict': {
'id': '5824495846001',
'ext': 'mp4',
'title': 'Yellowstone more likely to crack rather than explode',
'timestamp': 1534790612,
'description': 'md5:3715e7927639a4f16b474e9391687c62',
'uploader_id': '28911775001',
'upload_date': '20180820',
}
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(update_url_query(url, {'ajax': 'true'}), display_id)
ui_video_data = get_element_by_attribute('class', 'ui-video-data', webpage)
if not ui_video_data:
raise ExtractorError('no video on the webpage', expected=True)
video_data = self._parse_json(ui_video_data, display_id)
item = try_get(video_data, lambda x: x['asset_metadata']['items'], dict) or {}
return {
'_type': 'url_transparent',
'url': self.BRIGHTCOVE_URL_TEMPLATE % (item.get('brightcoveaccount', '29906170001'), item.get('brightcoveid') or video_data['brightcove_id']),
'id': compat_str(video_data['id']),
'title': video_data['title'],
'thumbnail': video_data.get('thumbnail'),
'description': video_data.get('description'),
'duration': parse_duration(video_data.get('length')),
'ie_key': 'BrightcoveNew',
}
|
vinegret/youtube-dl
|
youtube_dl/extractor/usatoday.py
|
Python
|
unlicense
| 2,703
|
# coding: utf-8
## Random blink
# This program uses three different libraries which we must "import" before we can use them:
# In[ ]:
import pibrella, random, time
# Next we have the program itself:
# In[ ]:
mychoices = ( pibrella.light.red, pibrella.light.yellow, pibrella.light.green )
for i in range(50):
light = random.choice(mychoices)
light.toggle()
time.sleep((random.random() / 2) + 0.1)
## Breaking it apart
### Line 1
# In[ ]:
mychoices = ( pibrella.light.red, pibrella.light.yellow, pibrella.light.green )
# `mychoices` is a "tuple" (a group of objects) containing three things, the red light object, the yellow light object and the green light object.
#
# Commands to try to help you understand `mychoices`:
# In[ ]:
mychoices
# In[ ]:
mychoices[0].on()
# In[ ]:
mychoices[0].off()
# In[ ]:
mychoices[2].toggle()
### Line 2
# In[ ]:
for i in range(50):
print "An example indented line"
# Makes the indented command or commands run 50 times. This is called a loop. The first line is called the loop condition and the indented lines are called the loop body.
#
# Commands to try:
# In[ ]:
range(50)
# In[ ]:
range(5)
# In[ ]:
for i in range(5):
print i
### Line 3
# In[ ]:
light = random.choice(mychoices)
# Selects a random value from mychoices and stores it in a "variable" called light.
#
# Commands to try (remember to repeat these, randomness means that that they don't do the same thing every time):
# In[ ]:
random.choice(mychoices)
# In[ ]:
random.choice(mychoices).toggle()
### Line 4
# In[ ]:
light.toggle()
# Makes the object held in the variable called light toggle its state (on becomes off and off becomes on).
#
# Commands to try:
# In[ ]:
light = mychoices[0]
# In[ ]:
light.toggle()
# In[ ]:
light.on()
# In[ ]:
light.off()
# In[ ]:
light = pibrella.light.green
# In[ ]:
light.toggle()
### Line 5
# In[ ]:
time.sleep((random.random() / 2) + 0.1)
# Makes the program do nothing for a short but randomly selected period of time.
# Commands to try (repeat these, random things don't go the same thing every time):
# In[ ]:
random.random()
# In[ ]:
random.random() / 2
# In[ ]:
(random.random() / 2) + 0.1
# In[ ]:
(random.random() / 10) + 0.9
# In[ ]:
time.sleep(5)
## Challenges
# 1. Make the random blinking go faster (change both numbers).
#
# 2. Make the random blinking happen as fast as possible.
#
# 3. Make the random blinking happen at the speed you thing is nicest!
#
# 4. Make the random blinking run forever.
#
# As a hint find out what this command does (and when you find out, press
# Control and C to stop the program):
#
# while True:
# print("Why not try moving the scroll bar?")
#
# 5. Make four more of the lights on the pibrella join in the blinking.
#
# Hint:
#
# pibrella.output.e.toggle()
# pibrella.output.h.toggle()
|
daniel-thompson/pibrella-examples
|
five_liners/random_blink.py
|
Python
|
mit
| 2,937
|
from django.conf import settings
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.fields import CharField
from rest_framework.generics import get_object_or_404
from apis.betterself.v1.supplements.serializers import SupplementReadSerializer
from apis.betterself.v1.constants import DAILY_FREQUENCY, MONTHLY_FREQUENCY
from apis.betterself.v1.users.serializers import PhoneNumberDetailsSerializer
from betterself.utils.date_utils import get_current_date_months_ago, get_current_utc_time_and_tz
from config.settings.constants import TESTING, LOCAL
from events.models import INPUT_SOURCES_TUPLES, UserActivity, SupplementReminder, WEB_INPUT_SOURCE, SupplementLog
from supplements.models import Supplement, UserSupplementStack
def valid_daily_max_minutes(value):
minutes_in_day = 60 * 24
if value > minutes_in_day:
raise serializers.ValidationError('Error - More than minutes in a day.')
elif value < 0:
raise serializers.ValidationError('Less than 1 is not allowed.')
class SupplementLogCreateUpdateSerializer(serializers.Serializer):
supplement_uuid = serializers.UUIDField(source='supplement.uuid')
quantity = serializers.FloatField(default=1)
time = serializers.DateTimeField()
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES)
uuid = serializers.UUIDField(required=False, read_only=True)
supplement_name = CharField(source='supplement.name', read_only=True, required=False)
duration_minutes = serializers.IntegerField(default=0)
notes = serializers.CharField(default='', max_length=1000, trim_whitespace=True, required=False, allow_blank=True)
@classmethod
def validate_supplement_uuid(cls, value):
# serializers check if these are valid uuid fields, but they don't
# check that these objects should actually exist. do it here!
try:
Supplement.objects.get(uuid=value)
except Supplement.DoesNotExist:
raise ValidationError('Supplement UUID {} does not exist'.format(value))
return value
def create(self, validated_data):
user = self.context['request'].user
create_model = self.context['view'].model
supplement_uuid = validated_data.pop('supplement')['uuid']
# this is a lame hack, but I don't want to rewrite the generic posts
# essentially what happens is we share a supplement across different users
# which should never happen in production
if settings.DJANGO_ENVIRONMENT in (TESTING, LOCAL):
supplement = Supplement.objects.get(uuid=supplement_uuid)
else:
supplement = Supplement.objects.get(uuid=supplement_uuid, user=user)
time = validated_data.pop('time')
obj, _ = create_model.objects.update_or_create(
user=user,
time=time,
supplement=supplement,
defaults=validated_data
)
return obj
def update(self, instance, validated_data):
if 'supplement' in validated_data:
supplement_uuid = validated_data.get('supplement')['uuid']
supplement = get_object_or_404(Supplement, uuid=supplement_uuid)
instance.supplement = supplement
instance.source = validated_data.get('source', instance.source)
instance.duration_minutes = validated_data.get('duration_minutes', instance.duration_minutes)
instance.quantity = validated_data.get('quantity', instance.quantity)
instance.time = validated_data.get('time', instance.time)
instance.notes = validated_data.get('notes', instance.notes)
instance.save()
return instance
class SupplementLogReadOnlySerializer(serializers.ModelSerializer):
supplement_name = CharField(source='supplement.name')
supplement_uuid = CharField(source='supplement.uuid')
quantity = serializers.FloatField()
class Meta:
model = SupplementLog
fields = (
'supplement_name', 'supplement_uuid', 'quantity', 'time', 'source', 'uuid', 'duration_minutes',
'notes'
)
class ProductivityLogReadSerializer(serializers.Serializer):
very_productive_time_minutes = serializers.IntegerField(required=False)
productive_time_minutes = serializers.IntegerField(required=False)
neutral_time_minutes = serializers.IntegerField(required=False)
distracting_time_minutes = serializers.IntegerField(required=False)
very_distracting_time_minutes = serializers.IntegerField(required=False)
date = serializers.DateField()
uuid = serializers.UUIDField()
class ProductivityLogCreateSerializer(serializers.Serializer):
uuid = serializers.UUIDField(required=False, read_only=True)
very_productive_time_minutes = serializers.IntegerField(required=False, validators=[valid_daily_max_minutes])
productive_time_minutes = serializers.IntegerField(required=False, validators=[valid_daily_max_minutes])
neutral_time_minutes = serializers.IntegerField(required=False, validators=[valid_daily_max_minutes])
distracting_time_minutes = serializers.IntegerField(required=False, validators=[valid_daily_max_minutes])
very_distracting_time_minutes = serializers.IntegerField(required=False, validators=[valid_daily_max_minutes])
date = serializers.DateField()
def create(self, validated_data):
user = self.context['request'].user
create_model = self.context['view'].model
date = validated_data.pop('date')
obj, created = create_model.objects.update_or_create(
user=user,
date=date,
defaults=validated_data)
return obj
class UserActivitySerializer(serializers.Serializer):
uuid = serializers.UUIDField(required=False, read_only=True)
name = serializers.CharField()
is_significant_activity = serializers.BooleanField(required=False)
is_negative_activity = serializers.BooleanField(required=False)
is_all_day_activity = serializers.BooleanField(required=False)
def create(self, validated_data):
create_model = self.context['view'].model
user = self.context['request'].user
name = validated_data.pop('name')
obj, created = create_model.objects.update_or_create(
user=user,
name=name,
defaults=validated_data)
return obj
class UserActivityUpdateSerializer(serializers.Serializer):
"""
The create and update serializers "could" be combined, but I rather
be explicit separation for now, I can combine them later -- just don't want to build
tests that assume they're nested.
"""
uuid = serializers.UUIDField()
name = serializers.CharField(required=False)
is_significant_activity = serializers.BooleanField(required=False)
is_negative_activity = serializers.BooleanField(required=False)
is_all_day_activity = serializers.BooleanField(required=False)
def update(self, instance, validated_data):
# Maybe you're doing this wrong ... don't think you should need to do all of this
instance.name = validated_data.get('name', instance.name)
instance.is_significant_activity = validated_data.get('is_significant_activity',
instance.is_significant_activity)
instance.is_negative_activity = validated_data.get('is_negative_activity', instance.is_negative_activity)
instance.is_all_day_activity = validated_data.get('is_all_day_activity', instance.is_all_day_activity)
instance.save()
return instance
class UserActivityLogCreateSerializer(serializers.Serializer):
uuid = serializers.UUIDField(required=False, read_only=True)
# We send back user_activity_uuid after an event is created to serialize correctly
user_activity = UserActivitySerializer(required=False, read_only=True)
user_activity_uuid = serializers.UUIDField(source='user_activity.uuid')
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES)
duration_minutes = serializers.IntegerField(default=0)
time = serializers.DateTimeField()
def create(self, validated_data):
create_model = self.context['view'].model
user = self.context['request'].user
activity_uuid = validated_data.pop('user_activity')['uuid']
user_activity = UserActivity.objects.get(uuid=activity_uuid)
time = validated_data.pop('time')
obj, created = create_model.objects.update_or_create(
user=user,
user_activity=user_activity,
time=time,
defaults=validated_data)
return obj
def update(self, instance, validated_data):
if 'user_activity' in validated_data:
try:
user_activity_uuid = validated_data['user_activity']['uuid']
user_activity = UserActivity.objects.get(uuid=user_activity_uuid, user=instance.user)
except ObjectDoesNotExist:
raise ValidationError('Invalid User Activity UUID Entered')
instance.user_activity = user_activity
instance.user_activity_uuid = validated_data.get('user_activity', instance.user_activity.uuid)
instance.duration_minutes = validated_data.get('duration_minutes', instance.duration_minutes)
instance.time = validated_data.get('time', instance.time)
instance.source = validated_data.get('source', instance.source)
instance.save()
return instance
class UserActivityLogReadSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
user_activity = UserActivitySerializer()
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES)
duration_minutes = serializers.IntegerField()
time = serializers.DateTimeField()
class SleepLogReadSerializer(serializers.Serializer):
uuid = serializers.UUIDField()
start_time = serializers.DateTimeField()
end_time = serializers.DateTimeField()
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES)
class SleepLogCreateSerializer(serializers.Serializer):
uuid = serializers.UUIDField(required=False, read_only=True)
start_time = serializers.DateTimeField()
end_time = serializers.DateTimeField()
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES)
def validate(self, data):
"""
Check that start_end/end_times are valid
"""
if data['start_time'] >= data['end_time']:
raise serializers.ValidationError('End time must occur after start')
create_model = self.context['view'].model
user = self.context['request'].user
start_time = data['start_time']
end_time = data['end_time']
if create_model.objects.filter(user=user).filter(end_time__gte=start_time, start_time__lte=end_time).exists():
raise ValidationError('Overlapping Periods Found')
return data
def create(self, validated_data):
create_model = self.context['view'].model
user = self.context['request'].user
obj, created = create_model.objects.update_or_create(
user=user,
start_time=validated_data['start_time'],
end_time=validated_data['end_time'],
defaults=validated_data)
return obj
class ProductivityLogRequestParametersSerializer(serializers.Serializer):
start_date = serializers.DateField(default=get_current_date_months_ago(3))
cumulative_window = serializers.IntegerField(default=1, min_value=1, max_value=365 * 3)
complete_date_range_in_daily_frequency = serializers.BooleanField(default=False)
class SupplementLogRequestParametersSerializer(serializers.Serializer):
start_date = serializers.DateField(default=get_current_date_months_ago(3))
frequency = serializers.ChoiceField([DAILY_FREQUENCY, MONTHLY_FREQUENCY, None], default=None)
# this is a bit tricky to explain, but if true it means to always have the results for any daily frequencies
# to include the entire date_range from start end date range, which will result in a lot of null/empty data
complete_date_range_in_daily_frequency = serializers.BooleanField(default=False)
def validate(self, validated_data):
if not validated_data['frequency'] and validated_data['complete_date_range_in_daily_frequency']:
raise ValidationError('If there is no frequency, results should not enclose all date ranges between start '
'and ending periods')
return validated_data
class SupplementReminderReadSerializer(serializers.ModelSerializer):
supplement = SupplementReadSerializer()
phone_number_details = PhoneNumberDetailsSerializer(source='user.userphonenumberdetails')
class Meta:
fields = ['supplement', 'reminder_time', 'quantity', 'last_sent_reminder_time', 'phone_number_details', 'uuid']
model = SupplementReminder
class SupplementReminderCreateSerializer(serializers.ModelSerializer):
supplement_uuid = serializers.UUIDField(source='supplement.uuid')
class Meta:
model = SupplementReminder
fields = ('supplement_uuid', 'reminder_time', 'quantity')
@classmethod
def validate_supplement_uuid(cls, value):
# serializers check if these are valid uuid fields, but they don't
# check that these objects should actually exist. do it here!
try:
Supplement.objects.get(uuid=value)
except Supplement.DoesNotExist:
raise ValidationError('Supplement UUID {} does not exist'.format(value))
return value
def validate(self, data):
create_model = self.context['view'].model
user = self.context['request'].user
if create_model.objects.filter(user=user).count() >= 5:
raise ValidationError('Error: Limit of 5 Supplement Reminders A Day')
return data
def create(self, validated_data):
create_model = self.context['view'].model
user = self.context['request'].user
supplement_uuid = validated_data.pop('supplement')['uuid']
# this is a lame hack, but I don't want to rewrite the generic posts
# essentially what happens is we share a supplement across different users
# which should never happen in production
if settings.DJANGO_ENVIRONMENT in (TESTING, LOCAL):
supplement = Supplement.objects.get(uuid=supplement_uuid)
else:
supplement = Supplement.objects.get(uuid=supplement_uuid, user=user)
reminder_time = validated_data.pop('reminder_time')
obj, created = create_model.objects.update_or_create(
user=user,
supplement=supplement,
reminder_time=reminder_time,
defaults=validated_data)
return obj
class SupplementStackLogSerializer(serializers.Serializer):
stack_uuid = serializers.UUIDField()
time = serializers.DateTimeField(default=get_current_utc_time_and_tz)
source = serializers.ChoiceField(INPUT_SOURCES_TUPLES, default=WEB_INPUT_SOURCE)
@classmethod
def validate_stack_uuid(cls, value):
check_if_exists = UserSupplementStack.objects.filter(uuid=value).exists()
if not check_if_exists:
raise ValidationError('Supplement Stack UUID {} does not exist'.format(value))
return value
|
jeffshek/betterself
|
apis/betterself/v1/events/serializers.py
|
Python
|
mit
| 15,381
|
"""
Django settings for issuebox project.
Generated by 'django-admin startproject' using Django 1.9.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from getenv import env
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from django.conf.global_settings import LOGIN_URL
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4^x&#lfmx-^c2_omd_3^c5xshp%o_isg8ifu)!a1mmk59x7ylj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'website',
# added for template tweaks
'widget_tweaks',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'issuebox.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'issuebox.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'issuebox_db',
'USER': 'root',
'PASSWORD': 'root',
'HOST': env('DATABASE_HOST', ''),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = ('website.auth.backends.CustomAuthentication', 'django.contrib.auth.backends.ModelBackend')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "website", "static", "assets"),
# added to store and serve profile images, good enough for development phase
# for production phase see: https://docs.djangoproject.com/en/dev/howto/static-files/deployment/
os.path.join(BASE_DIR, "website", "static", "profile_images")
]
|
UKS-Tim3/Issuebox
|
issuebox/issuebox/settings/development.py
|
Python
|
mit
| 3,844
|
########################################################################
# $Id$
########################################################################
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerBase import FileManagerBase
from DIRAC.Core.Utilities.List import stringListToString, \
intListToString, \
breakListIntoChunks
import datetime
import os
from types import ListType, TupleType, StringTypes
# The logic of some methods is basically a copy/paste from the FileManager class,
# so I could have inherited from it. However, I did not want to depend on it
class FileManagerPs( FileManagerBase ):
def __init__(self, database = None ):
super( FileManagerPs, self ).__init__( database )
######################################################
#
# The all important _findFiles and _getDirectoryFiles methods
#
def _findFiles( self, lfns, metadata = ['FileID'], allStatus = False, connection = False ):
""" Returns the information for the given lfns
The logic works nicely in the FileManager, so I pretty much copied it.
:param lfns: list of lfns
:param metadata: list of params that we want to get for each lfn
:param allStatus: consider all file status or only those defined in db.visibleFileStatus
:return successful/failed convention. successful is a dict < lfn : dict of metadata >
"""
connection = self._getConnection( connection )
dirDict = self._getFileDirectories( lfns )
result = self.db.dtree.findDirs( dirDict.keys() )
if not result['OK']:
return result
directoryIDs = result['Value']
failed = {}
successful = {}
for dirPath in directoryIDs:
fileNames = dirDict[dirPath]
res = self._getDirectoryFiles( directoryIDs[dirPath], fileNames, metadata,
allStatus = allStatus, connection = connection )
for fileName, fileDict in res.get( 'Value', {} ).items():
fname = os.path.join( dirPath, fileName )
successful[fname] = fileDict
# The lfns that are not in successful nor failed don't exist
for failedLfn in ( set( lfns ) - set( successful ) ):
failed.setdefault( failedLfn, "No such file or directory" )
return S_OK( {"Successful":successful, "Failed":failed} )
def _findFileIDs( self, lfns, connection = False ):
""" Find lfn <-> FileID correspondence
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# If there is only one lfn, we might as well make a direct query
if len(lfns) == 1:
lfn = list( lfns )[0] # if lfns is a dict, list(lfns) returns lfns.keys()
pathPart, filePart = os.path.split( lfn )
result = self.db.executeStoredProcedure( 'ps_get_file_id_from_lfn', ( pathPart, filePart, 'ret1' ), outputIds = [2] )
if not result['OK']:
return result
fileId = result['Value'][0]
if not fileId:
failed[lfn] = "No such file"
else:
successful[lfn] = fileId
else:
# We separate the files by directory
filesInDirDict = self._getFileDirectories( lfns )
# We get the directory ids
result = self.db.dtree.findDirs( filesInDirDict.keys() )
if not result['OK']:
return result
directoryPathToIds = result['Value']
# For each directory, we get the file ids of the files we want
for dirPath in directoryPathToIds:
fileNames = filesInDirDict[dirPath]
dirID = directoryPathToIds[dirPath]
formatedFileNames = stringListToString( fileNames )
result = self.db.executeStoredProcedureWithCursor( 'ps_get_file_ids_from_dir_id', ( dirID, formatedFileNames ) )
if not result['OK']:
return result
for fileID, fileName in result['Value']:
fname = os.path.join( dirPath, fileName )
successful[fname] = fileID
# The lfns that are not in successful dont exist
for failedLfn in ( set( lfns ) - set( successful ) ):
failed[failedLfn] = "No such file"
return S_OK({"Successful":successful,"Failed":failed})
def _getDirectoryFiles(self,dirID,fileNames,metadata_input,allStatus=False,connection=False):
""" For a given directory, and eventually given file, returns all the desired metadata
:param dirID : directory ID
:param filenames : the list of filenames, or []
:param metadata_input: list of desired metadata.
It can be anything from (FileName, DirID, FileID, Size, UID, Owner,
GID, OwnerGroup, Status, GUID, Checksum, ChecksumType, Type, CreationDate, ModificationDate, Mode)
:param allStatus : if False, only displays the files whose status is in db.visibleFileStatus
:returns S_OK(files), where files is a dictionary indexed on filename, and values are dictionary of metadata
"""
connection = self._getConnection( connection )
metadata = list( metadata_input )
if "UID" in metadata:
metadata.append( "Owner" )
if "GID" in metadata:
metadata.append( "OwnerGroup" )
if "FileID" not in metadata:
metadata.append( "FileID" )
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileNames = stringListToString( fileNames )
fStatus = stringListToString( self.db.visibleFileStatus )
specificFiles = True if len( fileNames ) else False
result = self.db.executeStoredProcedureWithCursor( 'ps_get_all_info_for_files_in_dir',
( dirID, specificFiles,
formatedFileNames, allStatus, fStatus ) )
if not result['OK']:
return result
fieldNames = ["FileName", "DirID", "FileID", "Size", "UID", "Owner",
"GID", "OwnerGroup", "Status", "GUID", "Checksum",
"ChecksumType", "Type", "CreationDate", "ModificationDate", "Mode"]
rows = result['Value']
files = {}
for row in rows:
rowDict = dict( zip( fieldNames, row ) )
fileName = rowDict['FileName']
# Returns only the required metadata
files[fileName] = dict( ( key, rowDict.get( key, "Unknown metadata field" ) ) for key in metadata )
return S_OK( files )
def _getFileMetadataByID( self, fileIDs, connection=False ):
""" Get standard file metadata for a list of files specified by FileID
:param fileIDS : list of file Ids
:returns S_OK(files), where files is a dictionary indexed on fileID
and the values dictionaries containing the following info:
["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
"""
# Format the filenames and status to be used in a IN clause in the sotred procedure
formatedFileIds = intListToString( fileIDs )
result = self.db.executeStoredProcedureWithCursor( 'ps_get_all_info_for_file_ids', ( formatedFileIds, ) )
if not result['OK']:
return result
rows = result['Value']
fieldNames = ["FileID", "Size", "UID", "GID", "s.Status", "GUID", "CreationDate"]
resultDict = {}
for row in rows:
rowDict = dict( zip( fieldNames, row ) )
rowDict["Size"] = int( rowDict["Size"] )
rowDict["UID"] = int( rowDict["UID"] )
rowDict["GID"] = int( rowDict["GID"] )
resultDict[rowDict["FileID"]] = rowDict
return S_OK( resultDict )
def __insertMultipleFiles ( self, allFileValues, wantedLfns ):
""" Insert multiple files in one query. However, if there is a problem
with one file, all the query is rolled back.
:param allFileValues : dictionary of tuple with all the information about possibly more
files than we want to insert
:param wantedLfns : list of lfn that we want to insert
"""
fileValuesStrings = []
fileDescStrings = []
for lfn in wantedLfns:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = allFileValues[lfn]
utcNow = datetime.datetime.utcnow().replace( microsecond = 0 )
fileValuesStrings.append( "(%s, %s, %s, %s, %s, '%s', '%s', '%s', '%s', '%s', '%s', %s)" % ( dirID,
size, s_uid, s_gid, statusID, fileName, guid,
checksum, checksumtype, utcNow, utcNow, mode ) )
fileDescStrings.append( "(DirID = %s AND FileName = '%s')" % ( dirID, fileName ) )
fileValuesStr = ",".join( fileValuesStrings )
fileDescStr = " OR ".join( fileDescStrings )
result = self.db.executeStoredProcedureWithCursor( 'ps_insert_multiple_file', ( fileValuesStr, fileDescStr ) )
return result
def __chunks( self, l, n ):
""" Yield successive n-sized chunks from l.
"""
for i in xrange( 0, len( l ), n ):
yield l[i:i + n]
def _insertFiles( self, lfns, uid, gid, connection = False ):
""" Insert new files. lfns is a dictionary indexed on lfn, the values are
mandatory: DirID, Size, Checksum, GUID
optional : Owner (dict with username and group), ChecksumType (Adler32 by default), Mode (db.umask by default)
:param lfns : lfns and info to insert
:param uid : user id, overwriten by Owner['username'] if defined
:param gid : user id, overwriten by Owner['group'] if defined
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
res = self._getStatusInt( 'AprioriGood', connection = connection )
if res['OK']:
statusID = res['Value']
else:
return res
lfnsToRetry = []
fileValues = {}
fileDesc = {}
# Prepare each file separately
for lfn in lfns:
# Get all the info
fileInfo = lfns[lfn]
dirID = fileInfo['DirID']
fileName = os.path.basename( lfn )
size = fileInfo['Size']
ownerDict = fileInfo.get( 'Owner', None )
checksum = fileInfo['Checksum']
checksumtype = fileInfo.get( 'ChecksumType', 'Adler32' )
guid = fileInfo['GUID']
mode = fileInfo.get( 'Mode', self.db.umask )
s_uid = uid
s_gid = gid
# overwrite the s_uid and s_gid if defined in the lfn info
if ownerDict:
result = self.db.ugManager.getUserAndGroupID( ownerDict )
if result['OK']:
s_uid, s_gid = result['Value']
fileValues[lfn] = ( dirID, size, s_uid, s_gid,
statusID, fileName, guid,
checksum, checksumtype, mode )
fileDesc[( dirID, fileName )] = lfn
chunkSize = 200
allChunks = list( self.__chunks( lfns.keys(), chunkSize ) )
for lfnChunk in allChunks:
result = self.__insertMultipleFiles( fileValues, lfnChunk )
if result['OK']:
allIds = result['Value']
for dirId, fileName, fileID in allIds:
lfn = fileDesc[ ( dirId, fileName ) ]
successful[lfn] = lfns[lfn]
successful[lfn]['FileID'] = fileID
else:
lfnsToRetry.extend( lfnChunk )
# If we are here, that means that the multiple insert failed, so we do one by one
for lfn in lfnsToRetry:
dirID, size, s_uid, s_gid, statusID, fileName, guid, checksum, checksumtype, mode = fileValues[lfn]
# insert
result = self.db.executeStoredProcedureWithCursor( 'ps_insert_file', ( dirID, size, s_uid, s_gid,
statusID, fileName, guid,
checksum, checksumtype, mode ) )
if not result['OK']:
failed[lfn] = result['Message']
else:
fileID = result['Value'][0][0]
successful[lfn] = lfns[lfn]
successful[lfn]['FileID'] = fileID
return S_OK( { 'Successful' : successful, 'Failed' : failed} )
def _getFileIDFromGUID( self, guids, connection = False ):
""" Returns the file ids from list of guids
:param guids : list of guid
:returns dictionary < guid : fileId >
"""
connection = self._getConnection(connection)
if not guids:
return S_OK({})
if type( guids ) not in [ListType, TupleType]:
guids = [guids]
# formatedGuids = ','.join( [ '"%s"' % guid for guid in guids ] )
formatedGuids = stringListToString( guids )
result = self.db.executeStoredProcedureWithCursor( 'ps_get_file_ids_from_guids', ( formatedGuids, ) )
if not result['OK']:
return result
guidDict = dict( ( guid, fileID ) for guid, fileID in result['Value'] )
return S_OK(guidDict)
def getLFNForGUID( self, guids, connection = False ):
""" Returns the lfns matching given guids"""
connection = self._getConnection( connection )
if not guids:
return S_OK( {} )
if type( guids ) not in [ListType, TupleType]:
guids = [guids]
formatedGuids = stringListToString( guids )
result = self.db.executeStoredProcedureWithCursor( 'ps_get_lfns_from_guids', ( formatedGuids, ) )
if not result['OK']:
return result
guidDict = dict( ( guid, lfn ) for guid, lfn in result['Value'] )
failedGuid = set( guids ) - set( guidDict )
failed = dict.fromkeys( failedGuid, "GUID does not exist" ) if failedGuid else {}
return S_OK( {"Successful" : guidDict, "Failed" : failed} )
######################################################
#
# _deleteFiles related methods
#
def _deleteFiles( self, fileIDs, connection = False ):
""" Delete a list of files and the associated replicas
:param fileIDS : list of fileID
:returns S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
replicaPurge = self.__deleteFileReplicas(fileIDs)
filePurge = self.__deleteFiles(fileIDs,connection=connection)
if not replicaPurge['OK']:
return replicaPurge
if not filePurge['OK']:
return filePurge
return S_OK()
def __deleteFileReplicas( self, fileIDs, connection = False ):
""" Delete all the replicas from the file ids
:param fileIDs list of file ids
:returns S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
if not fileIDs:
return S_OK()
formatedFileIds = intListToString( fileIDs )
result = self.db.executeStoredProcedureWithCursor( 'ps_delete_replicas_from_file_ids', ( formatedFileIds, ) )
if not result['OK']:
return result
errno, msg = result['Value'][0]
if errno:
return S_ERROR( msg )
return S_OK()
def __deleteFiles(self,fileIDs,connection=False):
""" Delete the files from their ids
:param fileIDs list of file ids
:returns S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
formatedFileIds = intListToString( fileIDs )
result = self.db.executeStoredProcedureWithCursor( 'ps_delete_files', ( formatedFileIds, ) )
if not result['OK']:
return result
errno, msg = result['Value'][0]
if errno:
return S_ERROR( msg )
return S_OK()
def __insertMultipleReplicas ( self, allReplicaValues, lfnsChunk ):
""" Insert multiple replicas in one query. However, if there is a problem
with one replica, all the query is rolled back.
:param allReplicaValues : dictionary of tuple with all the information about possibly more
replica than we want to insert
:param lfnsChunk : list of lfn that we want to insert
"""
repValuesStrings = []
repDescStrings = []
for lfn in lfnsChunk:
fileID, seID, statusID, replicaType, pfn = allReplicaValues[lfn]
utcNow = datetime.datetime.utcnow().replace( microsecond = 0 )
repValuesStrings.append( "(%s,%s,'%s','%s','%s','%s','%s')" % ( fileID,
seID, statusID, replicaType, utcNow, utcNow, pfn ) )
repDescStrings.append( "(r.FileID = %s AND SEID = %s)" % ( fileID, seID ) )
repValuesStr = ",".join( repValuesStrings )
repDescStr = " OR ".join( repDescStrings )
result = self.db.executeStoredProcedureWithCursor( 'ps_insert_multiple_replica', ( repValuesStr, repDescStr ) )
return result
def _insertReplicas( self, lfns, master = False, connection = False ):
""" Insert new replicas. lfns is a dictionary with one entry for each file. The keys are lfns, and values are dict
with mandatory attributes : FileID, SE (the name), PFN
:param lfns: lfns and info to insert
:param master: true if they are master replica, otherwise they will be just 'Replica'
:return successful/failed convention, with successful[lfn] = true
"""
chunkSize = 200
connection = self._getConnection(connection)
# Add the files
failed = {}
successful = {}
# Get the status id of AprioriGood
res = self._getStatusInt( 'AprioriGood', connection = connection )
if not res['OK']:
return res
statusID = res['Value']
lfnsToRetry = []
repValues = {}
repDesc = {}
# treat each file after each other
for lfn in lfns.keys():
fileID = lfns[lfn]['FileID']
seName = lfns[lfn]['SE']
if type(seName) in StringTypes:
seList = [seName]
elif type(seName) == ListType:
seList = seName
else:
return S_ERROR('Illegal type of SE list: %s' % str( type( seName ) ) )
replicaType = 'Master' if master else 'Replica'
pfn = lfns[lfn]['PFN']
# treat each replica of a file after the other
# (THIS CANNOT WORK... WE ARE ONLY CAPABLE OF DOING ONE REPLICA PER FILE AT THE TIME)
for seName in seList:
# get the SE id
res = self.db.seManager.findSE(seName)
if not res['OK']:
failed[lfn] = res['Message']
continue
seID = res['Value']
# This is incompatible with adding multiple replica at the time for a given file
repValues[lfn] = ( fileID, seID, statusID, replicaType, pfn )
repDesc[( fileID, seID )] = lfn
allChunks = list( self.__chunks( lfns.keys(), chunkSize ) )
for lfnChunk in allChunks:
result = self.__insertMultipleReplicas( repValues, lfnChunk )
if result['OK']:
allIds = result['Value']
for fileId, seId, repId in allIds:
lfn = repDesc[ ( fileId, seId ) ]
successful[lfn] = True
lfns[lfn]['RepID'] = repId
else:
lfnsToRetry.extend( lfnChunk )
for lfn in lfnsToRetry:
fileID, seID, statusID, replicaType, pfn = repValues[lfn]
# insert the replica and its info
result = self.db.executeStoredProcedureWithCursor( 'ps_insert_replica',
( fileID, seID, statusID, replicaType, pfn ) )
if not result['OK']:
failed[lfn] = result['Message']
else:
replicaID = result['Value'][0][0]
lfns[lfn]['RepID'] = replicaID
successful[lfn] = True
return S_OK({'Successful':successful,'Failed':failed})
def _getRepIDsForReplica( self, replicaTuples, connection = False ):
""" Get the Replica IDs for (fileId, SEID) couples
:param repliacTuples : list of (fileId, SEID) couple
:returns { fileID : { seID : RepID } }
"""
connection = self._getConnection(connection)
replicaDict = {}
for fileID,seID in replicaTuples:
result = self.db.executeStoredProcedure( 'ps_get_replica_id', ( fileID, seID, 'repIdOut' ), outputIds = [2] )
if not result['OK']:
return result
repID = result['Value'][0]
# if the replica exists, we add it to the dict
if repID:
replicaDict.setdefault( fileID, {} ).setdefault( seID, repID )
return S_OK( replicaDict )
######################################################
#
# _deleteReplicas related methods
#
def _deleteReplicas(self,lfns,connection=False):
""" Deletes replicas. The deletion of replicas that do not exist is successful
:param lfns : dictinary with lfns as key, and the value is a dict with a mandatory "SE" key,
corresponding to the SE name or SE ID
:returns successful/failed convention, with successful[lfn] = True
"""
connection = self._getConnection(connection)
failed = {}
successful = {}
# First we get the fileIds from our lfns
res = self._findFiles( lfns.keys(), ['FileID'], connection = connection )
# If the file does not exist we consider the deletion successful
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
successful[lfn] = True
else:
failed[lfn] = error
lfnFileIDDict = res['Value']['Successful']
for lfn,fileDict in lfnFileIDDict.items():
fileID = fileDict['FileID']
# Then we get our StorageElement Id (cached in seManager)
se = lfns[lfn]['SE']
# if se is already the se id, findSE will return it
res = self.db.seManager.findSE( se )
if not res['OK']:
return res
seID = res['Value']
# Finally remove the replica
result = self.db.executeStoredProcedureWithCursor( 'ps_delete_replica_from_file_and_se_ids', ( fileID, seID ) )
if not result['OK']:
failed[lfn] = result['Message']
continue
errno, errMsg = result['Value'][0]
if errno:
failed[lfn] = errMsg
else:
successful[lfn] = True
return S_OK( {"Successful" : successful, "Failed" : failed} )
######################################################
#
# _setReplicaStatus _setReplicaHost _setReplicaParameter methods
# _setFileParameter method
#
def _setReplicaStatus( self, fileID, se, status, connection = False ):
""" Set the status of a replica
:param fileID : file id
:param se : se name or se id
:param status : status to be applied
:returns S_OK() or S_ERROR(msg)
"""
if not status in self.db.validReplicaStatus:
return S_ERROR( 'Invalid replica status %s' % status )
connection = self._getConnection(connection)
res = self._getStatusInt(status,connection=connection)
if not res['OK']:
return res
statusID = res['Value']
# Then we get our StorageElement Id (cached in seManager)
res = self.db.seManager.findSE( se )
if not res['OK']:
return res
seID = res['Value']
result = self.db.executeStoredProcedureWithCursor( 'ps_set_replica_status', ( fileID, seID, statusID ) )
if not result['OK']:
return result
affected = result['Value'][0][0] # Affected is the number of raws updated
if not affected:
return S_ERROR( "Replica does not exist" )
else:
return S_OK()
def _setReplicaHost( self, fileID, se, newSE, connection = False ):
""" Move a replica from one SE to another (I don't think this should be called
:param fileID : file id
:param se : se name or se id of the previous se
:param newSE : se name or se id of the new se
:returns S_OK() or S_ERROR(msg)
"""
connection = self._getConnection(connection)
# Get the new se id
res = self.db.seManager.findSE(newSE)
if not res['OK']:
return res
newSEID = res['Value']
# Get the old se id
res = self.db.seManager.findSE( se )
if not res['OK']:
return res
oldSEID = res['Value']
# update
result = self.db.executeStoredProcedureWithCursor( 'ps_set_replica_host', ( fileID, oldSEID, newSEID ) )
if not result['OK']:
return result
affected = result['Value'][0][0]
if not affected:
return S_ERROR( "Replica does not exist" )
else:
return S_OK()
def _setFileParameter( self, fileID, paramName, paramValue, connection = False ):
""" Generic method to set a file parameter
:param fileID : id of the file
:param paramName : the file parameter you want to change
It should be one of [ UID, GID, Status, Mode]. However, in case of
unexpected parameter, and to stay compatible with the other Manager,
there is a manual request done.
:param paramValue : the value (raw, or id) to insert
:returns S_OK() or S_ERROR
"""
connection = self._getConnection(connection)
# The PS associated with a given parameter
psNames = {'UID' : 'ps_set_file_uid',
'GID' : 'ps_set_file_gid',
'Status' : 'ps_set_file_status',
'Mode' : 'ps_set_file_mode',
}
psName = psNames.get(paramName, None)
# If there is an associated procedure, we go for it
if psName:
result = self.db.executeStoredProcedureWithCursor( psName, ( fileID, paramValue ) )
if not result['OK']:
return result
_affected = result['Value'][0][0]
# If affected = 0, the file does not exist, but who cares...
# In case this is a 'new' parameter, we have a failback solution, but we should add a specific ps for it
else:
req = "UPDATE FC_Files SET %s='%s', ModificationDate=UTC_TIMESTAMP() WHERE FileID IN (%s)"\
% ( paramName, paramValue, intListToString( fileID ) )
return self.db._update( req, connection )
return S_OK()
######################################################
#
# _getFileReplicas related methods
#
def _getFileReplicas( self, fileIDs, fields_input = ['PFN'], allStatus = False, connection = False ):
""" Get replicas for the given list of files specified by their fileIDs
:param fileIDs : list of file ids
:param fields_input : metadata of the Replicas we are interested in
:param allStatus : if True, all the Replica statuses will be considered, otherwise, only the db.visibleReplicaStatus
:returns S_OK with a dict { fileID : { SE name : dict of metadata } }
"""
connection = self._getConnection( connection )
fields = list( fields_input )
if 'Status' not in fields:
fields.append( 'Status' )
replicas = {}
# Format the status to be used in a IN clause in the stored procedure
fStatus = stringListToString( self.db.visibleReplicaStatus )
fieldNames = [ "FileID", "SE", "Status", "RepType", "CreationDate", "ModificationDate", "PFN"]
for fileID in fileIDs:
result = self.db.executeStoredProcedureWithCursor( 'ps_get_all_info_of_replicas',
( fileID, allStatus, fStatus ) )
if not result['OK']:
return result
rows = result['Value']
if not rows:
replicas[fileID] = {}
for row in rows:
rowDict = dict( zip( fieldNames, row ) )
# Returns only the required metadata
se = rowDict["SE"]
repForFile = replicas.setdefault( fileID, {} )
repForFile[se] = dict( ( key, rowDict.get( key, "Unknown metadata field" ) ) for key in fields )
return S_OK(replicas)
def countFilesInDir( self, dirId ):
""" Count how many files there is in a given Directory
:param dirID : directory id
:returns S_OK(value) or S_ERROR
"""
result = self.db.executeStoredProcedure( 'ps_count_files_in_dir', ( dirId, 'ret1' ), outputIds = [1] )
if not result['OK']:
return result
res = S_OK( result['Value'][0] )
return res
########################################################################################################
#
# We overwrite some methods from the base class because of the new DB constraints or perf reasons
#
# Some methods could be inherited in the future if we have perf problems. For example
# * setFileGroup
# * setFileOwner
# * setFileMode
# * changePath*
#
########################################################################################################
def _updateDirectoryUsage( self, directorySEDict, change, connection = False ):
""" This updates the directory usage, but is now done by triggers in the DB"""
return S_OK()
def _computeStorageUsageOnRemoveFile( self, lfns, connection = False ):
"""Again nothing to compute, all done by the triggers"""
directorySESizeDict = {}
return S_OK( directorySESizeDict )
# "REMARQUE : THIS IS STILL TRUE, BUT YOU MIGHT WANT TO CHECK FOR A GIVEN GUID ANYWAY
# def _checkUniqueGUID( self, lfns, connection = False ):
# """ The GUID unicity is ensured at the DB level, so we will have similar message if the insertion fails"""
#
# failed = {}
# return failed
def getDirectoryReplicas( self, dirID, path, allStatus = False, connection = False ):
"""
This is defined in the FileManagerBase but it relies on the SEManager to get the SE names.
It is good practice in software, but since the SE and Replica tables are bound together in the DB,
I might as well resolve the name in the query
Get the replicas for all the Files in the given Directory
:param DirID : ID of the directory
:param path : useless
:param allStatus : whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
# We format the visible file/replica satus so we can give it as argument to the ps
# It is used in an IN clause, so it looks like --'"AprioriGood","Trash"'--
# fStatus = ','.join( [ '"%s"' % status for status in self.db.visibleFileStatus ] )
# rStatus = ','.join( [ '"%s"' % status for status in self.db.visibleReplicaStatus ] )
fStatus = stringListToString( self.db.visibleFileStatus )
rStatus = stringListToString( self.db.visibleReplicaStatus )
result = self.db.executeStoredProcedureWithCursor( 'ps_get_replicas_for_files_in_dir', ( dirID, allStatus, fStatus, rStatus ) )
if not result['OK']:
return result
resultDict = {}
for fileName, _fileID, seName, pfn in result['Value']:
resultDict.setdefault( fileName, {} ).setdefault( seName, [] ).append( pfn )
return S_OK( resultDict )
|
coberger/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/WithFkAndPs/FileManagerPs.py
|
Python
|
gpl-3.0
| 30,500
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@File : `$NAME.py`
@Author : `long`
@Date : `2016-01-08`
@About : ''
"""
import ConfigParser
import os
# 读取配置文件
config_parser = ConfigParser.ConfigParser()
project_path = os.path.split(os.path.realpath(__file__))[0]
file_path = project_path + '/project.conf'
config_parser.read(file_path)
DB_CONFIG = dict(config_parser.items("db"))
REDIS_CONFIG = dict(config_parser.items("redis"))
|
ST9527/django_game_server
|
configs/__init__.py
|
Python
|
gpl-2.0
| 470
|
"""Module for generating candidate k-itemsets."""
from itertools import combinations
def generate_candidate_one_itemset(transactions):
"""Generates candidate 1-itemset."""
return {frozenset([item, item]) for item in set.union(*transactions)}
def generate_candidate_itemsets(itemsets):
"""Generates candidate k-itemsets."""
candidate_itemsets = set()
for itemset1, itemset2 in combinations(itemsets, 2):
# Find the symmetric difference to ensure only the first k-2 items match.
itemlist_diff = list(itemset1 ^ itemset2)
if len(itemlist_diff) == 2 and itemlist_diff[0][0] != itemlist_diff[1][0]:
candidate_itemsets.add(itemset1 | itemset2)
return candidate_itemsets
|
christian-stephen/apriori-algorithm
|
apriori/generate_candidates.py
|
Python
|
mit
| 727
|
#!/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to test basic SSO services and report to Zabbix.
It tries to get a temporary token from the IDP host, and use it to authenticate to each
of our ops-manged AWS accounts as found in ~/etc/openshift_tools/aws_accounts.txt.
It will run an IAM command and report an issue to Zabbix if any received HTTP status codes != 200.
"""
from __future__ import print_function
import os
import yaml
# Reason: disable pylint import-error because our modules aren't loaded on jenkins.
# pylint: disable=import-error
import boto3
import botocore.exceptions
from openshift_tools import saml_aws_creds
from openshift_tools.monitoring.zagg_sender import ZaggSender
class CheckIam(object):
""" Class to check HTTP return codes of IAM commands. """
def __init__(self):
self.client = None
@staticmethod
def check_accounts(path):
""" Retrieves a list of the config-managed ops AWS accounts.
Returns:
A list containing each of the lines found in the aws accounts file
Raises:
A ValueError if the path does not exist
"""
accounts_list = []
if os.path.isfile(path):
with open(path) as open_file:
stripped_line = list([line.rstrip() for line in open_file.readlines()])
for line in stripped_line:
if line is not None:
accounts_list.append(line)
return accounts_list
else:
raise ValueError(path + ' does not exist.')
@staticmethod
def get_token(aws_account, ops_idp_host):
""" Generate temporary SSO access credentials.
Requires the config file containing the IDP hostname.
Returns:
A temporary boto3 client created with a session token provided by the IDP host.
"""
ssh_args = None
# if running in a container (like the monitoring container)
# use alternate ssh key and known host file
if 'CONTAINER' in os.environ:
ssh_args =\
['-i', '/secrets/ssh-id-rsa', '-o', 'UserKnownHostsFile=/configdata/ssh_known_hosts']
try:
creds = saml_aws_creds.get_temp_credentials(
metadata_id='urn:amazon:webservices:%s' % aws_account,
idp_host=ops_idp_host,
ssh_args=ssh_args
)
client = boto3.client(
'iam',
aws_access_key_id=creds['AccessKeyId'],
aws_secret_access_key=creds['SecretAccessKey'],
aws_session_token=creds['SessionToken']
)
return client
except ValueError as client_exception:
if 'Error retrieving SAML token' in client_exception.message and \
'Metadata not found' in client_exception.message:
print('Metadata for %s missing or misconfigured, skipping' % aws_account)
def main(self):
""" Main function. """
yaml_config = {}
config_path = '/etc/openshift_tools/sso-config.yaml'
if os.path.isfile(config_path):
with open(config_path, 'r') as sso_config:
yaml_config = yaml.load(sso_config)
zag = ZaggSender()
ops_accounts = self.check_accounts(yaml_config["aws_account_file"])
zabbix_key = "sso.iam.not.reachable"
key_value = 0
for account in ops_accounts:
account_name, account_number = account.split(':')
try:
temp_client = self.get_token(account_number, yaml_config["idp_host"])
except botocore.exceptions.ClientError as client_error:
if 'Not authorized to perform sts:AssumeRoleWithSAML' in client_error.message:
print('Error: not authorized to use SSO tokens with %s' % account_name)
key_value += 1
if not temp_client:
continue
try:
acc_status = temp_client.get_role(RoleName='iam_monitoring')
if acc_status['ResponseMetadata']['HTTPStatusCode'] != 200:
print("HTTP request failed on account %s (%s)" \
% (account_name, account_number))
key_value += 1
if not acc_status['Role']['AssumeRolePolicyDocument']:
print("No policy document returned for account %s (%s)" \
% (account_name, account_number))
key_value += 1
except botocore.exceptions.ClientError as boto_exception:
print("Failed on account %s (%s) due to exception: %s" \
%(account_name, account_number, str(boto_exception)))
key_value += 1
zag.add_zabbix_keys({zabbix_key: key_value})
zag.send_metrics()
if __name__ == '__main__':
CHECK = CheckIam()
CHECK.main()
|
rhdedgar/openshift-tools
|
scripts/iam-tools/check_sso_service.py
|
Python
|
apache-2.0
| 5,581
|
from django.contrib.admin import site
from girox.core.admin import CustomModelAdmin
from girox.advertising.models import Advertiser
class AdvertiserModelAdmin(CustomModelAdmin):
list_display = ('title_tags', 'link_tags', 'admin_thumbnail')
def admin_thumbnail(self, obj):
return u'<img src="%s" />' % obj.banner.crop['300x250'].url
admin_thumbnail.short_description = 'Thumbnail'
admin_thumbnail.allow_tags = True
def link_tags(self, obj):
return u'<a href="{0}">{0}</a>'.format(obj.link)
link_tags.short_description = 'Link'
link_tags.allow_tags = True
def title_tags(self, obj):
return obj.title
title_tags.short_description = 'Título'
title_tags.allow_tags = True
site.register(Advertiser, AdvertiserModelAdmin)
|
sandrofolk/girox
|
girox/advertising/admin.py
|
Python
|
gpl-3.0
| 789
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import os
import os.path as osp
import re
import shlex
import shutil
import sys
from Cython.Distutils import build_ext as _build_ext
import Cython
import pkg_resources
from setuptools import setup, Extension, Distribution
from os.path import join as pjoin
from distutils.command.clean import clean as _clean
from distutils.util import strtobool
from distutils import sysconfig
# Check if we're running 64-bit Python
is_64_bit = sys.maxsize > 2**32
if Cython.__version__ < '0.29':
raise Exception('Please upgrade to Cython 0.29 or newer')
setup_dir = os.path.abspath(os.path.dirname(__file__))
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
# https://bugs.python.org/issue19555
ext_suffix = sysconfig.get_config_var('SO')
@contextlib.contextmanager
def changed_dir(dirname):
oldcwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(oldcwd)
class clean(_clean):
def run(self):
_clean.run(self)
for x in []:
try:
os.remove(x)
except OSError:
pass
class build_ext(_build_ext):
_found_names = ()
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
self.extensions = [ext for ext in self.extensions
if ext.name != '__dummy__']
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def run(self):
self._run_cmake()
_build_ext.run(self)
# adapted from cmake_build_ext in dynd-python
# github.com/libdynd/dynd-python
description = "Build the C-extensions for arrow"
user_options = ([('cmake-generator=', None, 'CMake generator'),
('extra-cmake-args=', None, 'extra arguments for CMake'),
('build-type=', None,
'build type (debug or release), default release'),
('boost-namespace=', None,
'namespace of boost (default: boost)'),
('with-cuda', None, 'build the Cuda extension'),
('with-flight', None, 'build the Flight extension'),
('with-parquet', None, 'build the Parquet extension'),
('with-static-parquet', None, 'link parquet statically'),
('with-static-boost', None, 'link boost statically'),
('with-plasma', None, 'build the Plasma extension'),
('with-tensorflow', None,
'build pyarrow with TensorFlow support'),
('with-orc', None, 'build the ORC extension'),
('with-gandiva', None, 'build the Gandiva extension'),
('generate-coverage', None,
'enable Cython code coverage'),
('bundle-boost', None,
'bundle the (shared) Boost libraries'),
('bundle-arrow-cpp', None,
'bundle the Arrow C++ libraries')] +
_build_ext.user_options)
def initialize_options(self):
_build_ext.initialize_options(self)
self.cmake_generator = os.environ.get('PYARROW_CMAKE_GENERATOR')
if not self.cmake_generator and sys.platform == 'win32':
self.cmake_generator = 'Visual Studio 14 2015 Win64'
self.extra_cmake_args = os.environ.get('PYARROW_CMAKE_OPTIONS', '')
self.build_type = os.environ.get('PYARROW_BUILD_TYPE',
'release').lower()
self.boost_namespace = os.environ.get('PYARROW_BOOST_NAMESPACE',
'boost')
self.cmake_cxxflags = os.environ.get('PYARROW_CXXFLAGS', '')
if sys.platform == 'win32':
# Cannot do debug builds in Windows unless Python itself is a debug
# build
if not hasattr(sys, 'gettotalrefcount'):
self.build_type = 'release'
self.with_s3 = strtobool(
os.environ.get('PYARROW_WITH_S3', '0'))
self.with_cuda = strtobool(
os.environ.get('PYARROW_WITH_CUDA', '0'))
self.with_flight = strtobool(
os.environ.get('PYARROW_WITH_FLIGHT', '0'))
self.with_parquet = strtobool(
os.environ.get('PYARROW_WITH_PARQUET', '0'))
self.with_static_parquet = strtobool(
os.environ.get('PYARROW_WITH_STATIC_PARQUET', '0'))
self.with_static_boost = strtobool(
os.environ.get('PYARROW_WITH_STATIC_BOOST', '0'))
self.with_plasma = strtobool(
os.environ.get('PYARROW_WITH_PLASMA', '0'))
self.with_tensorflow = strtobool(
os.environ.get('PYARROW_WITH_TENSORFLOW', '0'))
self.with_orc = strtobool(
os.environ.get('PYARROW_WITH_ORC', '0'))
self.with_gandiva = strtobool(
os.environ.get('PYARROW_WITH_GANDIVA', '0'))
self.generate_coverage = strtobool(
os.environ.get('PYARROW_GENERATE_COVERAGE', '0'))
self.bundle_arrow_cpp = strtobool(
os.environ.get('PYARROW_BUNDLE_ARROW_CPP', '0'))
self.bundle_boost = strtobool(
os.environ.get('PYARROW_BUNDLE_BOOST', '0'))
CYTHON_MODULE_NAMES = [
'lib',
'_fs',
'_csv',
'_json',
'_cuda',
'_flight',
'_parquet',
'_orc',
'_plasma',
'_s3fs',
'gandiva']
def _run_cmake(self):
# check if build_type is correctly passed / set
if self.build_type.lower() not in ('release', 'debug'):
raise ValueError("--build-type (or PYARROW_BUILD_TYPE) needs to "
"be 'release' or 'debug'")
# The directory containing this setup.py
source = osp.dirname(osp.abspath(__file__))
# The staging directory for the module being built
build_temp = pjoin(os.getcwd(), self.build_temp)
build_lib = os.path.join(os.getcwd(), self.build_lib)
saved_cwd = os.getcwd()
if not os.path.isdir(self.build_temp):
self.mkpath(self.build_temp)
# Change to the build directory
with changed_dir(self.build_temp):
# Detect if we built elsewhere
if os.path.isfile('CMakeCache.txt'):
cachefile = open('CMakeCache.txt', 'r')
cachedir = re.search('CMAKE_CACHEFILE_DIR:INTERNAL=(.*)',
cachefile.read()).group(1)
cachefile.close()
if (cachedir != build_temp):
return
static_lib_option = ''
cmake_options = [
'-DPYTHON_EXECUTABLE=%s' % sys.executable,
static_lib_option,
]
if self.cmake_generator:
cmake_options += ['-G', self.cmake_generator]
if self.with_s3:
cmake_options.append('-DPYARROW_BUILD_S3=on')
if self.with_cuda:
cmake_options.append('-DPYARROW_BUILD_CUDA=on')
if self.with_flight:
cmake_options.append('-DPYARROW_BUILD_FLIGHT=on')
if self.with_parquet:
cmake_options.append('-DPYARROW_BUILD_PARQUET=on')
if self.with_static_parquet:
cmake_options.append('-DPYARROW_PARQUET_USE_SHARED=off')
if not self.with_static_boost:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=on')
else:
cmake_options.append('-DPYARROW_BOOST_USE_SHARED=off')
if self.with_plasma:
cmake_options.append('-DPYARROW_BUILD_PLASMA=on')
if self.with_tensorflow:
cmake_options.append('-DPYARROW_USE_TENSORFLOW=on')
if self.with_orc:
cmake_options.append('-DPYARROW_BUILD_ORC=on')
if self.with_gandiva:
cmake_options.append('-DPYARROW_BUILD_GANDIVA=on')
if len(self.cmake_cxxflags) > 0:
cmake_options.append('-DPYARROW_CXXFLAGS={0}'
.format(self.cmake_cxxflags))
if self.generate_coverage:
cmake_options.append('-DPYARROW_GENERATE_COVERAGE=on')
if self.bundle_arrow_cpp:
cmake_options.append('-DPYARROW_BUNDLE_ARROW_CPP=ON')
# ARROW-1090: work around CMake rough edges
if 'ARROW_HOME' in os.environ and sys.platform != 'win32':
pkg_config = pjoin(os.environ['ARROW_HOME'], 'lib',
'pkgconfig')
os.environ['PKG_CONFIG_PATH'] = pkg_config
del os.environ['ARROW_HOME']
if self.bundle_boost:
cmake_options.append('-DPYARROW_BUNDLE_BOOST=ON')
cmake_options.append('-DCMAKE_BUILD_TYPE={0}'
.format(self.build_type.lower()))
if self.boost_namespace != 'boost':
cmake_options.append('-DBoost_NAMESPACE={}'
.format(self.boost_namespace))
extra_cmake_args = shlex.split(self.extra_cmake_args)
build_tool_args = []
if sys.platform == 'win32':
if not is_64_bit:
raise RuntimeError('Not supported on 32-bit Windows')
else:
build_tool_args.append('--')
if os.environ.get('PYARROW_BUILD_VERBOSE', '0') == '1':
cmake_options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
if os.environ.get('PYARROW_PARALLEL'):
build_tool_args.append(
'-j{0}'.format(os.environ['PYARROW_PARALLEL']))
# Generate the build files
print("-- Running cmake for pyarrow")
self.spawn(['cmake'] + extra_cmake_args + cmake_options + [source])
print("-- Finished cmake for pyarrow")
# Do the build
print("-- Running cmake --build for pyarrow")
self.spawn(['cmake', '--build', '.', '--config', self.build_type]
+ build_tool_args)
print("-- Finished cmake --build for pyarrow")
if self.inplace:
# a bit hacky
build_lib = saved_cwd
# Move the libraries to the place expected by the Python build
try:
os.makedirs(pjoin(build_lib, 'pyarrow'))
except OSError:
pass
if sys.platform == 'win32':
build_prefix = ''
else:
build_prefix = self.build_type
print('Bundling includes: ' + pjoin(build_prefix, 'include'))
if os.path.exists(pjoin(build_lib, 'pyarrow', 'include')):
shutil.rmtree(pjoin(build_lib, 'pyarrow', 'include'))
shutil.move(pjoin(build_prefix, 'include'),
pjoin(build_lib, 'pyarrow'))
# Move the built C-extension to the place expected by the Python
# build
self._found_names = []
for name in self.CYTHON_MODULE_NAMES:
built_path = self.get_ext_built(name)
if not os.path.exists(built_path):
print('Did not find {0}'.format(built_path))
if self._failure_permitted(name):
print('Cython module {0} failure permitted'
.format(name))
continue
raise RuntimeError('pyarrow C-extension failed to build:',
os.path.abspath(built_path))
cpp_generated_path = self.get_ext_generated_cpp_source(name)
if not os.path.exists(cpp_generated_path):
raise RuntimeError('expected to find generated C++ file '
'in {0!r}'.format(cpp_generated_path))
# The destination path to move the generated C++ source to
# (for Cython source coverage)
cpp_path = pjoin(build_lib, self._get_build_dir(),
os.path.basename(cpp_generated_path))
if os.path.exists(cpp_path):
os.remove(cpp_path)
# The destination path to move the built C extension to
ext_path = pjoin(build_lib, self._get_cmake_ext_path(name))
if os.path.exists(ext_path):
os.remove(ext_path)
self.mkpath(os.path.dirname(ext_path))
print('Moving generated C++ source', cpp_generated_path,
'to build path', cpp_path)
shutil.move(cpp_generated_path, cpp_path)
print('Moving built C-extension', built_path,
'to build path', ext_path)
shutil.move(built_path, ext_path)
self._found_names.append(name)
if os.path.exists(self.get_ext_built_api_header(name)):
shutil.move(self.get_ext_built_api_header(name),
pjoin(os.path.dirname(ext_path),
name + '_api.h'))
if self.bundle_arrow_cpp:
print(pjoin(build_lib, 'pyarrow'))
move_shared_libs(build_prefix, build_lib, "arrow")
move_shared_libs(build_prefix, build_lib, "arrow_python")
if self.with_cuda:
move_shared_libs(build_prefix, build_lib, "arrow_cuda")
if self.with_flight:
move_shared_libs(build_prefix, build_lib, "arrow_flight")
move_shared_libs(build_prefix, build_lib,
"arrow_python_flight")
if self.with_plasma:
move_shared_libs(build_prefix, build_lib, "plasma")
if self.with_gandiva:
move_shared_libs(build_prefix, build_lib, "gandiva")
if self.with_parquet and not self.with_static_parquet:
move_shared_libs(build_prefix, build_lib, "parquet")
if not self.with_static_boost and self.bundle_boost:
move_shared_libs(
build_prefix, build_lib,
"{}_filesystem".format(self.boost_namespace),
implib_required=False)
move_shared_libs(
build_prefix, build_lib,
"{}_system".format(self.boost_namespace),
implib_required=False)
move_shared_libs(
build_prefix, build_lib,
"{}_regex".format(self.boost_namespace),
implib_required=False)
if sys.platform == 'win32':
# zlib uses zlib.dll for Windows
zlib_lib_name = 'zlib'
move_shared_libs(build_prefix, build_lib, zlib_lib_name,
implib_required=False)
if self.with_flight:
# DLL dependencies for gRPC / Flight
for lib_name in ['cares', 'libprotobuf',
'libcrypto-1_1-x64',
'libssl-1_1-x64']:
move_shared_libs(build_prefix, build_lib, lib_name,
implib_required=False)
if self.with_plasma:
# Move the plasma store
source = os.path.join(self.build_type, "plasma-store-server")
target = os.path.join(build_lib,
self._get_build_dir(),
"plasma-store-server")
shutil.move(source, target)
def _failure_permitted(self, name):
if name == '_parquet' and not self.with_parquet:
return True
if name == '_plasma' and not self.with_plasma:
return True
if name == '_orc' and not self.with_orc:
return True
if name == '_flight' and not self.with_flight:
return True
if name == '_s3fs' and not self.with_s3:
return True
if name == '_cuda' and not self.with_cuda:
return True
if name == 'gandiva' and not self.with_gandiva:
return True
return False
def _get_build_dir(self):
# Get the package directory from build_py
build_py = self.get_finalized_command('build_py')
return build_py.get_package_dir('pyarrow')
def _get_cmake_ext_path(self, name):
# This is the name of the arrow C-extension
filename = name + ext_suffix
return pjoin(self._get_build_dir(), filename)
def get_ext_generated_cpp_source(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + ".cpp")
else:
return pjoin(name + ".cpp")
def get_ext_built_api_header(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
return pjoin(head, tail + "_api.h")
else:
return pjoin(name + "_api.h")
def get_ext_built(self, name):
if sys.platform == 'win32':
head, tail = os.path.split(name)
# Visual Studio seems to differ from other generators in
# where it places output files.
if self.cmake_generator.startswith('Visual Studio'):
return pjoin(head, self.build_type, tail + ext_suffix)
else:
return pjoin(head, tail + ext_suffix)
else:
return pjoin(self.build_type, name + ext_suffix)
def get_names(self):
return self._found_names
def get_outputs(self):
# Just the C extensions
# regular_exts = _build_ext.get_outputs(self)
return [self._get_cmake_ext_path(name)
for name in self.get_names()]
def move_shared_libs(build_prefix, build_lib, lib_name,
implib_required=True):
if sys.platform == 'win32':
# Move all .dll and .lib files
libs = [lib_name + '.dll']
if implib_required:
libs.append(lib_name + '.lib')
for filename in libs:
shutil.move(pjoin(build_prefix, filename),
pjoin(build_lib, 'pyarrow', filename))
else:
_move_shared_libs_unix(build_prefix, build_lib, lib_name)
def _move_shared_libs_unix(build_prefix, build_lib, lib_name):
shared_library_prefix = 'lib'
if sys.platform == 'darwin':
shared_library_suffix = '.dylib'
else:
shared_library_suffix = '.so'
lib_filename = (shared_library_prefix + lib_name +
shared_library_suffix)
# Also copy libraries with ABI/SO version suffix
if sys.platform == 'darwin':
lib_pattern = (shared_library_prefix + lib_name +
".*" + shared_library_suffix[1:])
libs = glob.glob(pjoin(build_prefix, lib_pattern))
else:
libs = glob.glob(pjoin(build_prefix, lib_filename) + '*')
if not libs:
raise Exception('Could not find library:' + lib_filename +
' in ' + build_prefix)
# Longest suffix library should be copied, all others symlinked
libs.sort(key=lambda s: -len(s))
print(libs, libs[0])
lib_filename = os.path.basename(libs[0])
shutil.move(pjoin(build_prefix, lib_filename),
pjoin(build_lib, 'pyarrow', lib_filename))
for lib in libs[1:]:
filename = os.path.basename(lib)
link_name = pjoin(build_lib, 'pyarrow', filename)
if not os.path.exists(link_name):
os.symlink(lib_filename, link_name)
# If the event of not running from a git clone (e.g. from a git archive
# or a Python sdist), see if we can set the version number ourselves
default_version = '1.0.0-SNAPSHOT'
if (not os.path.exists('../.git')
and not os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')):
if os.path.exists('PKG-INFO'):
# We're probably in a Python sdist, setuptools_scm will handle fine
pass
else:
os.environ['SETUPTOOLS_SCM_PRETEND_VERSION'] = \
default_version.replace('-SNAPSHOT', 'a0')
# See https://github.com/pypa/setuptools_scm#configuration-parameters
scm_version_write_to_prefix = os.environ.get(
'SETUPTOOLS_SCM_VERSION_WRITE_TO_PREFIX', setup_dir)
def parse_git(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse
kwargs['describe_command'] =\
'git describe --dirty --tags --long --match "apache-arrow-[0-9].*"'
return parse(root, **kwargs)
with open('README.md') as f:
long_description = f.read()
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
install_requires = (
'numpy >= 1.14',
'six >= 1.0.0',
'futures; python_version < "3.2"',
'enum34 >= 1.1.6; python_version < "3.4"',
)
# Only include pytest-runner in setup_requires if we're invoking tests
if {'pytest', 'test', 'ptr'}.intersection(sys.argv):
setup_requires = ['pytest-runner']
else:
setup_requires = []
setup(
name='pyarrow',
packages=['pyarrow', 'pyarrow.tests'],
zip_safe=False,
package_data={'pyarrow': ['*.pxd', '*.pyx', 'includes/*.pxd']},
include_package_data=True,
distclass=BinaryDistribution,
# Dummy extension to trigger build_ext
ext_modules=[Extension('__dummy__', sources=[])],
cmdclass={
'clean': clean,
'build_ext': build_ext
},
entry_points={
'console_scripts': [
'plasma_store = pyarrow:_plasma_store_entry_point'
]
},
use_scm_version={
'root': os.path.dirname(setup_dir),
'parse': parse_git,
'write_to': os.path.join(scm_version_write_to_prefix,
'pyarrow/_generated_version.py')
},
setup_requires=['setuptools_scm', 'cython >= 0.29'] + setup_requires,
install_requires=install_requires,
tests_require=['pytest', 'pandas', 'hypothesis',
'pathlib2; python_version < "3.4"'],
description='Python library for Apache Arrow',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License, Version 2.0',
maintainer='Apache Arrow Developers',
maintainer_email='dev@arrow.apache.org',
test_suite='pyarrow.tests',
url='https://arrow.apache.org/'
)
|
renesugar/arrow
|
python/setup.py
|
Python
|
apache-2.0
| 24,250
|
from colorama import Style
from contextlib import contextmanager
from typing import Any, Dict, Iterator
from queue import Queue, Empty
from xml.sax.saxutils import XMLGenerator
import codecs
import os
import sys
import time
import unicodedata
class Logger:
def __init__(self) -> None:
self.logfile = os.environ.get("LOGFILE", "/dev/null")
self.logfile_handle = codecs.open(self.logfile, "wb")
self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
self._print_serial_logs = True
@staticmethod
def _eprint(*args: object, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
def close(self) -> None:
self.xml.endElement("logfile")
self.xml.endDocument()
self.logfile_handle.close()
def sanitise(self, message: str) -> str:
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
if "machine" in attributes:
return "{}: {}".format(attributes["machine"], message)
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.characters(message)
self.xml.endElement("line")
def info(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def warning(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
def error(self, *args, **kwargs) -> None: # type: ignore
self.log(*args, **kwargs)
sys.exit(1)
def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
self._eprint(self.maybe_prefix(message, attributes))
self.drain_log_queue()
self.log_line(message, attributes)
def log_serial(self, message: str, machine: str) -> None:
self.enqueue({"msg": message, "machine": machine, "type": "serial"})
if self._print_serial_logs:
self._eprint(
Style.DIM + "{} # {}".format(machine, message) + Style.RESET_ALL
)
def enqueue(self, item: Dict[str, str]) -> None:
self.queue.put(item)
def drain_log_queue(self) -> None:
try:
while True:
item = self.queue.get_nowait()
msg = self.sanitise(item["msg"])
del item["msg"]
self.log_line(msg, item)
except Empty:
pass
@contextmanager
def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
self._eprint(self.maybe_prefix(message, attributes))
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.characters(message)
self.xml.endElement("head")
tic = time.time()
self.drain_log_queue()
yield
self.drain_log_queue()
toc = time.time()
self.log("(finished: {}, in {:.2f} seconds)".format(message, toc - tic))
self.xml.endElement("nest")
rootlog = Logger()
|
NixOS/nixpkgs
|
nixos/lib/test-driver/test_driver/logger.py
|
Python
|
mit
| 3,250
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library of basic cythonized CombineFn subclasses.
For internal use only; no backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from apache_beam.transforms import core
try:
from apache_beam.transforms.cy_dataflow_distribution_counter import DataflowDistributionCounter
except ImportError:
from apache_beam.transforms.py_dataflow_distribution_counter import DataflowDistributionCounter
class AccumulatorCombineFn(core.CombineFn):
# singleton?
def create_accumulator(self):
return self._accumulator_type()
@staticmethod
def add_input(accumulator, element):
accumulator.add_input(element)
return accumulator
def merge_accumulators(self, accumulators):
accumulator = self._accumulator_type()
accumulator.merge(accumulators)
return accumulator
@staticmethod
def extract_output(accumulator):
return accumulator.extract_output()
def __eq__(self, other):
return (isinstance(other, AccumulatorCombineFn)
and self._accumulator_type is other._accumulator_type)
def __hash__(self):
return hash(self._accumulator_type)
_63 = 63 # Avoid large literals in C source code.
globals()['INT64_MAX'] = 2**_63 - 1
globals()['INT64_MIN'] = -2**_63
class CountAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, unused_element):
self.value += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class SumInt64Accumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
global INT64_MAX, INT64_MIN # pylint: disable=global-variable-not-assigned
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
if not INT64_MIN <= self.value <= INT64_MAX:
self.value %= 2**64
if self.value >= INT64_MAX:
self.value -= 2**64
return self.value
class MinInt64Accumulator(object):
def __init__(self):
self.value = INT64_MAX
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxInt64Accumulator(object):
def __init__(self):
self.value = INT64_MIN
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanInt64Accumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = int(element)
if not INT64_MIN <= element <= INT64_MAX:
raise OverflowError(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
return self.sum / self.count if self.count else _NAN
class CountCombineFn(AccumulatorCombineFn):
_accumulator_type = CountAccumulator
class SumInt64Fn(AccumulatorCombineFn):
_accumulator_type = SumInt64Accumulator
class MinInt64Fn(AccumulatorCombineFn):
_accumulator_type = MinInt64Accumulator
class MaxInt64Fn(AccumulatorCombineFn):
_accumulator_type = MaxInt64Accumulator
class MeanInt64Fn(AccumulatorCombineFn):
_accumulator_type = MeanInt64Accumulator
_POS_INF = float('inf')
_NEG_INF = float('-inf')
_NAN = float('nan')
class SumDoubleAccumulator(object):
def __init__(self):
self.value = 0
def add_input(self, element):
element = float(element)
self.value += element
def merge(self, accumulators):
for accumulator in accumulators:
self.value += accumulator.value
def extract_output(self):
return self.value
class MinDoubleAccumulator(object):
def __init__(self):
self.value = _POS_INF
def add_input(self, element):
element = float(element)
if element < self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value < self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MaxDoubleAccumulator(object):
def __init__(self):
self.value = _NEG_INF
def add_input(self, element):
element = float(element)
if element > self.value:
self.value = element
def merge(self, accumulators):
for accumulator in accumulators:
if accumulator.value > self.value:
self.value = accumulator.value
def extract_output(self):
return self.value
class MeanDoubleAccumulator(object):
def __init__(self):
self.sum = 0
self.count = 0
def add_input(self, element):
element = float(element)
self.sum += element
self.count += 1
def merge(self, accumulators):
for accumulator in accumulators:
self.sum += accumulator.sum
self.count += accumulator.count
def extract_output(self):
return self.sum / self.count if self.count else _NAN
class SumFloatFn(AccumulatorCombineFn):
_accumulator_type = SumDoubleAccumulator
class MinFloatFn(AccumulatorCombineFn):
_accumulator_type = MinDoubleAccumulator
class MaxFloatFn(AccumulatorCombineFn):
_accumulator_type = MaxDoubleAccumulator
class MeanFloatFn(AccumulatorCombineFn):
_accumulator_type = MeanDoubleAccumulator
class AllAccumulator(object):
def __init__(self):
self.value = True
def add_input(self, element):
self.value &= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value &= accumulator.value
def extract_output(self):
return self.value
class AnyAccumulator(object):
def __init__(self):
self.value = False
def add_input(self, element):
self.value |= not not element
def merge(self, accumulators):
for accumulator in accumulators:
self.value |= accumulator.value
def extract_output(self):
return self.value
class AnyCombineFn(AccumulatorCombineFn):
_accumulator_type = AnyAccumulator
class AllCombineFn(AccumulatorCombineFn):
_accumulator_type = AllAccumulator
class DataflowDistributionCounterFn(AccumulatorCombineFn):
"""A subclass of cy_combiners.AccumulatorCombineFn.
Make DataflowDistributionCounter able to report to Dataflow service via
CounterFactory.
When cythonized DataflowDistributinoCounter available, make
CounterFn combine with cythonized module, otherwise, combine with python
version.
"""
_accumulator_type = DataflowDistributionCounter
|
tgroh/incubator-beam
|
sdks/python/apache_beam/transforms/cy_combiners.py
|
Python
|
apache-2.0
| 8,141
|
#
# Copyright (C) 2013 Mathias Weber <mathew.weber@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
from .appconfig import AppConfig, AppConfigValueException, PY2
|
mweb/appconfig
|
appconfig/__init__.py
|
Python
|
bsd-2-clause
| 1,435
|
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
from typing import Dict, Iterator, List
import gdb
from crash.exceptions import InvalidArgumentError, CorruptedError
from crash.types.list import list_for_each_entry
from crash.util import AddressSpecifier, get_typed_pointer
from crash.util.symbols import Types, Symvals, TypeCallbacks, SymbolCallbacks
symvals = Symvals(['cgroup_roots', 'cgroup_subsys'])
types = Types([
'struct cgroup',
'struct cgroup_root',
'struct cgroup_subsys',
'struct cgrp_cset_link',
'struct task_struct',
])
class Subsys:
_subsys_names: Dict[int, str] = dict()
_available_mask = 0
@classmethod
def init_subsys_ids(cls, subsys_enum: gdb.Type) -> None:
suffix = '_cgrp_id'
for k in subsys_enum.keys():
if k == 'CGROUP_SUBSYS_COUNT':
continue
if subsys_enum[k].enumval in cls._subsys_names:
raise InvalidArgumentError("Enum {} is not unique".format(subsys_enum.name))
if not k.endswith(suffix):
raise InvalidArgumentError("Enum {} has unknown names".format(subsys_enum.name))
cls._subsys_names[subsys_enum[k].enumval] = k[:-len(suffix)]
cls._available_mask |= (1 << subsys_enum[k].enumval)
def for_each_subsys(self) -> Iterator[gdb.Value]:
for ssid in self._subsys_names:
yield symvals.cgroup_subsys[ssid].dereference()
def subsys_mask_to_names(self, mask: int) -> List[str]:
unknown = mask & ~self._available_mask
if unknown:
raise InvalidArgumentError(f"Mask contains unknown controllers {unknown:x}")
ret = []
for ssid in self._subsys_names:
if mask & (1 << ssid):
ret.append(self._subsys_names[ssid])
return ret
_Subsys = Subsys()
def for_each_hierarchy() -> Iterator[gdb.Value]:
# TODO should we factor in cgrp_dfl_visible?
return list_for_each_entry(symvals.cgroup_roots,
types.cgroup_root_type, 'root_list')
def for_each_subsys() -> Iterator[gdb.Value]:
return _Subsys.for_each_subsys()
def subsys_mask_to_names(mask: int) -> List[str]:
return _Subsys.subsys_mask_to_names(mask)
def cgroup_from_root(task: gdb.Value, cgroup_root: gdb.Value) -> gdb.Value:
cssset = task['cgroups'].dereference()
for link in list_for_each_entry(cssset['cgrp_links'], types.cgrp_cset_link_type, 'cgrp_link'):
if link['cgrp']['root'] == cgroup_root.address:
return link['cgrp'].dereference()
# TODO think about migrating tasks
raise CorruptedError(
"Task {int(task.address):016x} not under cgroup_root {int(cgroup_root.address):016x}}"
)
def find_cgroup(addr: AddressSpecifier) -> gdb.Value:
cgrp = get_typed_pointer(addr, types.cgroup_type).dereference()
return cgrp
def for_each_cgroup_task(cgrp: gdb.Value) -> Iterator[gdb.Value]:
# TODO migrating tasks?, zombies?
for link in list_for_each_entry(cgrp['cset_links'], types.cgrp_cset_link_type, 'cset_link'):
cssset = link['cset'].dereference()
for task in list_for_each_entry(cssset['tasks'], types.task_struct_type, 'cg_list'):
yield task
type_cbs = TypeCallbacks([('enum cgroup_subsys_id', Subsys.init_subsys_ids)])
|
jeffmahoney/crash-python
|
crash/subsystem/cgroup/__init__.py
|
Python
|
gpl-2.0
| 3,342
|
import logging
import io
import unittest
from samfp.io.logger import get_logger, SamFpLogFormatter
class TestLogFormat(unittest.TestCase):
logger_name = 'TestLogFormatApp'
def setUp(self):
self.formatter = SamFpLogFormatter()
self.stream = io.StringIO()
self.handler = logging.StreamHandler(self.stream)
self.logger = get_logger(self.logger_name)
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
pass
def test_number_of_handlers(self):
logger = get_logger(self.logger_name)
self.assertEqual(1, len(logger.handlers))
def test_debug(self):
message = 'test debug message'
with self.assertLogs(logger=self.logger_name, level='DEBUG') as cm:
self.logger.debug(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\[D\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\s\w*\]\s)')
def test_info(self):
message = 'test info message'
with self.assertLogs(logger=self.logger_name) as cm:
self.logger.info(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\[I\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\s\w*\]\s)')
def test_warning(self):
message = 'test warning message'
with self.assertLogs(logger=self.logger_name) as cm:
self.logger.warning(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\[W\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\s\w*\]\s)')
def test_warning(self):
message = 'test error message'
with self.assertLogs(logger=self.logger_name) as cm:
self.logger.error(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\[E\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\s\w*\]\s)')
def test_critical(self):
message = 'test critical message'
with self.assertLogs(logger=self.logger_name) as cm:
self.logger.critical(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\[C\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2}\s\w*\]\s)')
def test_color(self):
message = 'test color message'
with self.assertLogs(logger=self.logger_name) as cm:
self.logger.warning(message)
self.handler.flush()
log_message = self.handler.format(cm.records[-1]).strip()
self.assertRegex(log_message, r'(\x1b)')
def test_not_color(self):
formatter = SamFpLogFormatter(use_colours=False)
stream = io.StringIO()
handler = logging.StreamHandler(stream)
logger = get_logger('TestNoColor')
for handler in logger.handlers:
logger.removeHandler(handler)
handler.setFormatter(formatter)
logger.addHandler(handler)
message = 'test color message'
with self.assertLogs(logger='TestNoColor') as cm:
logger.warning(message)
handler.flush()
log_message = handler.format(cm.records[-1]).strip()
self.assertNotRegex(log_message, r'(\x1b)')
|
b1quint/samfp
|
samfp/io/tests/test_logger.py
|
Python
|
bsd-3-clause
| 3,620
|
import logging
import subprocess
from os import path, remove, rename
import tempfile
from textwrap import dedent
__all__ = ['call_astrometry', 'add_astrometry']
logger = logging.getLogger(__name__)
def call_astrometry(filename, sextractor=False,
custom_sextractor_config=False, feder_settings=True,
no_plots=True, minimal_output=True,
save_wcs=False, verify=None,
ra_dec=None, overwrite=False,
wcs_reference_image_center=True,
odds_ratio=None,
astrometry_config=None,
additional_args=None):
"""
Wrapper around astrometry.net solve-field.
Parameters
----------
sextractor : bool or str, optional
``True`` to use `sextractor`, or a ``str`` with the
path to sextractor.
custom_sextractor_config : bool, optional
If ``True``, use a sexractor configuration file customized for Feder
images.
feder_settings : bool, optional
Set True if you want to use plate scale appropriate for Feder
Observatory Apogee Alta U9 camera.
no_plots : bool, optional
``True`` to suppress astrometry.net generation of
plots (pngs showing object location and more)
minimal_output : bool, optional
If ``True``, suppress, as separate files, output of: WCS
header, RA/Dec object list, matching objects list, but see
also `save_wcs`
save_wcs : bool, optional
If ``True``, save WCS header even if other output is suppressed
with `minimial_output`
verify : str, optional
Name of a WCS header to be used as a first guess
for the astrometry fit; if this plate solution does not work
the solution is found as though `verify` had not been specified.
ra_dec : list or tuple of float
(RA, Dec); also limits search radius to 1 degree.
overwrite : bool, optional
If ``True``, perform astrometry even if astrometry.net files from a
previous run are present.
wcs_reference_image_center :
If ``True``, force the WCS reference point in the image to be the
image center.
odds_ratio : float, optional
The odds ratio to use for a successful solve. Default is to use the
default in `solve-field`.
astrometry_config : str, optional
Name of configuration file to use for SExtractor.
additional_args : str or list of str, optional
Additional arguments to pass to `solve-field`
"""
solve_field = ["solve-field"]
option_list = []
option_list.append("--obj 100")
if feder_settings:
option_list.append(
"--scale-low 0.5 --scale-high 0.6 --scale-units arcsecperpix")
if additional_args is not None:
if isinstance(additional_args, str):
add_ons = [additional_args]
else:
add_ons = additional_args
option_list.extend(add_ons)
if isinstance(sextractor, str):
option_list.append("--source-extractor-path " + sextractor)
elif sextractor:
option_list.append("--use-source-extractor")
if no_plots:
option_list.append("--no-plot")
if minimal_output:
option_list.append("--corr none --rdls none --match none")
if not save_wcs:
option_list.append("--wcs none")
if ra_dec is not None:
option_list.append("--ra %s --dec %s --radius 0.5" % ra_dec)
if overwrite:
option_list.append("--overwrite")
if wcs_reference_image_center:
option_list.append("--crpix-center")
options = " ".join(option_list)
solve_field.extend(options.split())
if custom_sextractor_config:
tmp_location = tempfile.mkdtemp()
param_location = path.join(tmp_location, 'default.param')
config_location = path.join(tmp_location, 'feder.config')
config_contents = SExtractor_config.format(param_file=param_location)
with open(config_location, 'w') as f:
f.write(config_contents)
with open(param_location, 'w') as f:
contents = """
X_IMAGE
Y_IMAGE
MAG_AUTO
FLUX_AUTO
"""
f.write(dedent(contents))
additional_solve_args = [
'--source-extractor-config', config_location,
'--x-column', 'X_IMAGE',
'--y-column', 'Y_IMAGE',
'--sort-column', 'MAG_AUTO',
'--sort-ascending'
]
solve_field.extend(additional_solve_args)
if odds_ratio is not None:
solve_field.append('--odds-to-solve')
solve_field.append(odds_ratio)
if astrometry_config is not None:
solve_field.append('--config')
solve_field.append(astrometry_config)
# kludge to handle case when path of verify file contains a space--split
# above does not work for that case.
if verify is not None:
if verify:
solve_field.append("--verify")
solve_field.append("%s" % verify)
else:
solve_field.append("--no-verify")
solve_field.extend([filename])
print(' '.join(solve_field))
logger.info(' '.join(solve_field))
try:
solve_field_output = subprocess.check_output(solve_field,
stderr=subprocess.STDOUT)
return_status = 0
log_level = logging.DEBUG
except subprocess.CalledProcessError as e:
return_status = e.returncode
solve_field_output = 'Output from astrometry.net:\n' + str(e.output)
log_level = logging.WARN
logger.warning('Adding astrometry failed for %s', filename)
raise e
logger.log(log_level, solve_field_output)
return return_status
def add_astrometry(filename, overwrite=False, ra_dec=None,
note_failure=False, save_wcs=False,
verify=None, try_builtin_source_finder=False,
custom_sextractor=False,
odds_ratio=None,
astrometry_config=None,
camera='',
avoid_pyfits=False,
no_source_extractor=False,
solve_field_args=None):
"""Add WCS headers to FITS file using astrometry.net
Parameters
----------
overwrite : bool, optional
Set ``True`` to overwrite the original file. If `False`,
the file astrometry.net generates is kept.
ra_dec : list or tuple of float or str
(RA, Dec) of field center as either decimal or sexagesimal; also
limits search radius to 1 degree.
note_failure : bool, optional
If ``True``, create a file with extension "failed" if astrometry.net
fails. The "failed" file contains the error messages genreated by
astrometry.net.
try_builtin_source_finder : bool
If true, try using astrometry.net's built-in source extractor if
sextractor fails.
save_wcs :
verify :
See :func:`call_astrometry`
camera : str, one of ['celestron', 'u9', 'cp16'], optional
Name of camera; determines the pixel scale used in the solved. Default
is to use `'u9'`.
avoid_pyfits : bool
Add arguments to solve-field to avoid calls to pyfits.BinTableHDU.
See https://groups.google.com/forum/#!topic/astrometry/AT21x6zVAJo
Returns
-------
bool
``True`` on success.
Notes
-----
Tries a couple strategies before giving up: first sextractor,
then, if that fails, astrometry.net's built-in source extractor.
It also cleans up after astrometry.net, keeping only the new FITS
file it generates, the .solved file, and, if desired, a ".failed" file
for fields which it fails to solve.
For more flexible invocation of astrometry.net, see :func:`call_astrometry`
"""
base, ext = path.splitext(filename)
# All are in arcsec per pixel, values are approximate
camera_pixel_scales = {
'celestron': 0.3,
'u9': 0.55,
'cp16': 0.55
}
if camera:
use_feder = False
scale = camera_pixel_scales[camera]
scale_options = ("--scale-low {low} --scale-high {high} "
"--scale-units arcsecperpix".format(low=0.8*scale, high=1.2 * scale))
else:
use_feder = True
scale_options = ''
if avoid_pyfits:
pyfits_options = '--no-remove-lines --uniformize 0'
else:
pyfits_options = ''
additional_opts = ' '.join([scale_options,
pyfits_options])
if solve_field_args is not None:
additional_opts = additional_opts.split()
additional_opts.extend(solve_field_args)
logger.info('BEGIN ADDING ASTROMETRY on {0}'.format(filename))
try:
logger.debug('About to call call_astrometry')
solved_field = (call_astrometry(filename,
sextractor=not no_source_extractor,
ra_dec=ra_dec,
save_wcs=save_wcs, verify=verify,
custom_sextractor_config=custom_sextractor,
odds_ratio=odds_ratio,
astrometry_config=astrometry_config,
feder_settings=use_feder,
additional_args=additional_opts)
== 0)
except subprocess.CalledProcessError as e:
logger.debug('Failed with error')
failed_details = e.output
solved_field = False
if (not solved_field) and try_builtin_source_finder:
log_msg = 'Astrometry failed using sextractor, trying built-in '
log_msg += 'source finder'
logger.info(log_msg)
try:
solved_field = (call_astrometry(filename, ra_dec=ra_dec,
overwrite=True,
save_wcs=save_wcs, verify=verify)
== 0)
except subprocess.CalledProcessError as e:
failed_details = e.output
solved_field = False
if solved_field:
logger.info('Adding astrometry succeeded')
else:
logger.warning('Adding astrometry failed for file %s', filename)
if overwrite and solved_field:
logger.info('Overwriting original file with image with astrometry')
try:
rename(base + '.new', filename)
except OSError as e:
logger.error(e)
return False
# whether we succeeded or failed, clean up
try:
remove(base + '.axy')
except OSError:
pass
if solved_field:
try:
remove(base + '-indx.xyls')
remove(base + '.solved')
except OSError:
pass
if note_failure and not solved_field:
try:
f = open(base + '.failed', 'wb')
f.write(failed_details)
f.close()
except IOError as e:
logger.error('Unable to save output of astrometry.net %s', e)
pass
logger.info('END ADDING ASTROMETRY for %s', filename)
return solved_field
SExtractor_config = """
# Configuration file for SExtractor 2.19.5 based on default by EB 2014-11-26
#
# modification was to change DETECT_MINAREA and turn of filter convolution
#-------------------------------- Catalog ------------------------------------
PARAMETERS_NAME {param_file} # name of the file containing catalog contents
#------------------------------- Extraction ----------------------------------
DETECT_TYPE CCD # CCD (linear) or PHOTO (with gamma correction)
DETECT_MINAREA 15 # min. # of pixels above threshold
DETECT_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
ANALYSIS_THRESH 1.5 # <sigmas> or <threshold>,<ZP> in mag.arcsec-2
FILTER N # apply filter for detection (Y or N)?
FILTER_NAME default.conv # name of the file containing the filter
DEBLEND_NTHRESH 32 # Number of deblending sub-thresholds
DEBLEND_MINCONT 0.005 # Minimum contrast parameter for deblending
CLEAN Y # Clean spurious detections? (Y or N)?
CLEAN_PARAM 1.0 # Cleaning efficiency
MASK_TYPE CORRECT # type of detection MASKing: can be one of
# NONE, BLANK or CORRECT
#------------------------------ Photometry -----------------------------------
PHOT_APERTURES 10 # MAG_APER aperture diameter(s) in pixels
PHOT_AUTOPARAMS 2.5, 3.5 # MAG_AUTO parameters: <Kron_fact>,<min_radius>
PHOT_PETROPARAMS 2.0, 3.5 # MAG_PETRO parameters: <Petrosian_fact>,
# <min_radius>
SATUR_LEVEL 50000.0 # level (in ADUs) at which arises saturation
SATUR_KEY SATURATE # keyword for saturation level (in ADUs)
MAG_ZEROPOINT 0.0 # magnitude zero-point
MAG_GAMMA 4.0 # gamma of emulsion (for photographic scans)
GAIN 0.0 # detector gain in e-/ADU
GAIN_KEY GAIN # keyword for detector gain in e-/ADU
PIXEL_SCALE 1.0 # size of pixel in arcsec (0=use FITS WCS info)
#------------------------- Star/Galaxy Separation ----------------------------
SEEING_FWHM 1.2 # stellar FWHM in arcsec
STARNNW_NAME default.nnw # Neural-Network_Weight table filename
#------------------------------ Background -----------------------------------
BACK_SIZE 64 # Background mesh: <size> or <width>,<height>
BACK_FILTERSIZE 3 # Background filter: <size> or <width>,<height>
BACKPHOTO_TYPE GLOBAL # can be GLOBAL or LOCAL
#------------------------------ Check Image ----------------------------------
CHECKIMAGE_TYPE NONE # can be NONE, BACKGROUND, BACKGROUND_RMS,
# MINIBACKGROUND, MINIBACK_RMS, -BACKGROUND,
# FILTERED, OBJECTS, -OBJECTS, SEGMENTATION,
# or APERTURES
CHECKIMAGE_NAME check.fits # Filename for the check-image
#--------------------- Memory (change with caution!) -------------------------
MEMORY_OBJSTACK 3000 # number of objects in stack
MEMORY_PIXSTACK 300000 # number of pixels in stack
MEMORY_BUFSIZE 1024 # number of lines in buffer
#----------------------------- Miscellaneous ---------------------------------
VERBOSE_TYPE NORMAL # can be QUIET, NORMAL or FULL
HEADER_SUFFIX .head # Filename extension for additional headers
WRITE_XML N # Write XML file (Y/N)?
XML_NAME sex.xml # Filename for XML output
"""
|
mwcraig/msumastro
|
msumastro/header_processing/astrometry.py
|
Python
|
bsd-3-clause
| 14,999
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import random
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
import time
import kodi
# Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(10)
BR_VERS = [
['%s.0' % i for i in xrange(18, 50)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80', '48.0.2564.116', '49.0.2623.112', '50.0.2661.86'],
['11.0'],
['8.0', '9.0', '10.0', '10.6']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko',
'Mozilla/5.0 (compatible; MSIE {br_ver}; {win_ver}{feature}; Trident/6.0)']
def get_ua():
try: last_gen = int(kodi.get_setting('last_ua_create'))
except: last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
versions = {'win_ver': random.choice(WIN_VERS), 'feature': random.choice(FEATURES), 'br_ver': random.choice(BR_VERS[index])}
user_agent = RAND_UAS[index].format(**versions)
# log_utils.log('Creating New User Agent: %s' % (user_agent), log_utils.LOGDEBUG)
kodi.set_setting('current_ua', user_agent)
kodi.set_setting('last_ua_create', str(int(time.time())))
else:
user_agent = kodi.get_setting('current_ua')
return user_agent
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from addon.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='', http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers, compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
request.add_header('User-Agent', self._user_agent)
for key in headers:
request.add_header(key, headers[key])
response = urllib2.urlopen(request)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
req = urllib2.Request(url)
if form_data:
if isinstance(form_data, basestring):
form_data = form_data
else:
form_data = urllib.urlencode(form_data, True)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for key in headers:
req.add_header(key, headers[key])
if compression:
req.add_header('Accept-Encoding', 'gzip')
req.add_unredirected_header('Host', req.get_host())
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
@property
def content(self):
html = self._response.read()
encoding = None
try:
if self._response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = self._response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
if encoding is not None:
try: html = html.decode(encoding)
except: pass
return html
def get_headers(self, as_dict=False):
'''Returns headers returned by the server.
If as_dict is True, headers are returned as a dictionary otherwise a list'''
if as_dict:
return dict([(item[0].title(), item[1]) for item in self._response.info().items()])
else:
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
|
mrknow/filmkodi
|
script.mrknow.urlresolver/lib/urlresolver9/lib/net.py
|
Python
|
apache-2.0
| 12,168
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Logical plan objects have the shape:
{
'spouts': {
spout_name: {
'outputs': [{'stream_name': stream_name}],
}
},
'bolts': {
bolt_name: {
'outputs': [{'stream_name': stream_name}],
'inputs': [{
'stream_name': stream_name,
'component_name': component_name,
'grouping': grouping_type,
}]
}
}
}
"""
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.tools.tracker.src.python.handlers import BaseHandler
import networkx
def topology_stages(logical_plan):
"""Return the number of stages in a logical plan."""
graph = networkx.DiGraph(
(input_info["component_name"], bolt_name)
for bolt_name, bolt_info in logical_plan.get("bolts", {}).items()
for input_info in bolt_info["inputs"]
)
# this is is the same as "diameter" if treating the topology as an undirected graph
return networkx.dag_longest_path_length(graph)
class LogicalPlanHandler(BaseHandler):
"""
URL - /topologies/logicalplan
Parameters:
- cluster (required)
- role - (role) Role used to submit the topology.
- environ (required)
- topology (required) name of the requested topology
The response JSON is a dictionary with all the
information of logical plan of the topology.
"""
# pylint: disable=missing-docstring, attribute-defined-outside-init
def initialize(self, tracker):
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology_info = self.tracker.get_topology_info(topology_name, cluster, role, environ)
lplan = topology_info["logical_plan"]
# format the logical plan as required by the web (because of Ambrose)
# first, spouts followed by bolts
spouts_map = dict()
for name, value in list(lplan['spouts'].items()):
spouts_map[name] = dict(
config=value.get("config", dict()),
outputs=value["outputs"],
spout_type=value["type"],
spout_source=value["source"],
extra_links=value["extra_links"],
)
bolts_map = dict()
for name, value in list(lplan['bolts'].items()):
bolts_map[name] = dict(
config=value.get("config", dict()),
inputComponents=[i['component_name'] for i in value['inputs']],
inputs=value["inputs"],
outputs=value["outputs"]
)
result = dict(
stages=topology_stages(lplan),
spouts=spouts_map,
bolts=bolts_map
)
self.write_success_response(result)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
|
twitter/heron
|
heron/tools/tracker/src/python/handlers/logicalplanhandler.py
|
Python
|
apache-2.0
| 3,748
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from pathlib import Path
from . import ui_list
class ImageListUI(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_property, index=0, flt_flag=0):
if item.image is None:
layout.label("[No Image Specified]", icon="ERROR")
else:
layout.label(str(Path(item.image.name).with_suffix(".hsm")), icon_value=item.image.preview.icon_id)
layout.prop(item, "enabled", text="")
def imagelibmod(modifier, layout, context):
ui_list.draw_modifier_list(layout, "ImageListUI", modifier, "images", "active_image_index", rows=3, maxrows=6)
if modifier.images:
row = layout.row(align=True)
row.template_ID(modifier.images[modifier.active_image_index], "image", open="image.open")
def journalbookmod(modifier, layout, context):
layout.prop_menu_enum(modifier, "versions")
layout.separator()
split = layout.split()
main_col = split.column()
main_col.label("Display Settings:")
col = main_col.column()
col.active = "pvMoul" in modifier.versions
col.prop(modifier, "start_state", text="")
main_col.prop(modifier, "book_type", text="")
main_col.separator()
main_col.label("Book Scaling:")
col = main_col.column(align=True)
col.prop(modifier, "book_scale_w", text="Width", slider=True)
col.prop(modifier, "book_scale_h", text="Height", slider=True)
main_col = split.column()
main_col.label("Content Translations:")
main_col.prop(modifier, "active_translation", text="")
# This should never fail...
try:
translation = modifier.journal_translations[modifier.active_translation_index]
except Exception as e:
main_col.label(text="Error (see console)", icon="ERROR")
print(e)
else:
main_col.prop(translation, "text_id", text="")
main_col.separator()
main_col.label("Clickable Region:")
main_col.prop(modifier, "clickable_region", text="")
def linkingbookmod(modifier, layout, context):
def row_alert(prop_name, **kwargs):
row = layout.row()
row.alert = not getattr(modifier, prop_name)
row.prop(modifier, prop_name, **kwargs)
layout.prop_menu_enum(modifier, "versions")
layout.separator()
row = layout.row()
row.alert = modifier.clickable is None
row.prop(modifier, "clickable")
layout.prop(modifier, "clickable_region")
if "pvMoul" in modifier.versions:
row_alert("seek_point")
layout.prop(modifier, "anim_type")
layout.separator()
layout.prop(modifier, "link_type")
row_alert("age_instance")
if modifier.link_type == "kChildAgeBook":
row_alert("age_parent")
if modifier.link_type == "kBasicLink":
row_alert("age_uuid")
row_alert("age_name")
if "pvMoul" in modifier.versions:
layout.separator()
layout.prop(modifier, "link_destination")
layout.prop(modifier, "spawn_title")
layout.prop(modifier, "spawn_point")
if "pvPots" in modifier.versions:
layout.separator()
layout.prop(modifier, "link_panel_image")
layout.prop(modifier, "book_cover_image")
layout.prop(modifier, "stamp_image")
if modifier.stamp_image:
row = layout.row(align=True)
row.label("Stamp Position:")
row.prop(modifier, "stamp_x", text="X")
row.prop(modifier, "stamp_y", text="Y")
|
Deledrius/korman
|
korman/ui/modifiers/gui.py
|
Python
|
gpl-3.0
| 4,145
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
try:
import tracemalloc
except ImportError:
tracemalloc = None
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import io
import logging
import os
import pickle
import shlex
import signal
import sys
import traceback
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
import warnings
from . import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .log_utils import logger
from .state import QtileState
from .utils import QtileError, get_cache_dir
from .widget.base import _Widget
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
if sys.version_info >= (3, 3):
def _import_module(module_name, dir_path):
import importlib
file_name = os.path.join(dir_path, module_name) + '.py'
f = importlib.machinery.SourceFileLoader(module_name, file_name)
module = f.load_module()
return module
else:
def _import_module(module_name, dir_path):
import imp
fp = None
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
finally:
if fp:
fp.close()
return module
class Qtile(command.CommandObject):
"""This object is the `root` of the command graph"""
def __init__(self, config, displayName=None, fname=None, no_spawn=False, state=None):
self.no_spawn = no_spawn
self._eventloop = None
self._finalize = False
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName += ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks.get(self.conn.get_modifier(nc), 0)
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
# setup the default cursor
self.root.set_cursor('left_ptr')
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.setup_eventloop()
self.server = command._Server(self.fname, self, config, self._eventloop)
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.xproto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.conn.flush()
self.conn.xsync()
self._xpoll()
# Map and Grab keys
for key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
if state:
st = pickle.load(io.BytesIO(state.encode()))
try:
st.apply(self)
except:
logger.exception("failed restoring state")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
hook.fire("startup_complete")
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["CLIPBOARD"]
self.selection_window = self.conn.create_window(-1, -1, 1, 1)
self.selection_window.set_attribute(eventmask=EventMask.PropertyChange)
self.conn.xfixes.select_selection_input(self.selection_window,
"PRIMARY")
self.conn.xfixes.select_selection_input(self.selection_window,
"CLIPBOARD")
r = self.conn.conn.core.GetSelectionOwner(PRIMARY).reply()
self.selection["PRIMARY"]["owner"] = r.owner
r = self.conn.conn.core.GetSelectionOwner(CLIPBOARD).reply()
self.selection["CLIPBOARD"]["owner"] = r.owner
# ask for selection on starup
self.convert_selection(PRIMARY)
self.convert_selection(CLIPBOARD)
def setup_eventloop(self):
self._eventloop = asyncio.new_event_loop()
self._eventloop.add_signal_handler(signal.SIGINT, self.stop)
self._eventloop.add_signal_handler(signal.SIGTERM, self.stop)
self._eventloop.set_exception_handler(
lambda x, y: logger.exception("Got an exception in poll loop")
)
logger.info('Adding io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.add_reader(fd, self._xpoll)
self.setup_python_dbus()
def setup_python_dbus(self):
# This is a little strange. python-dbus internally depends on gobject,
# so gobject's threads need to be running, and a gobject "main loop
# thread" needs to be spawned, but we try to let it only interact with
# us via calls to asyncio's call_soon_threadsafe.
try:
# We import dbus here to thrown an ImportError if it isn't
# available. Since the only reason we're running this thread is
# because of dbus, if dbus isn't around there's no need to run
# this thread.
import dbus # noqa
from gi.repository import GLib
def gobject_thread():
ctx = GLib.main_context_default()
while not self._finalize:
try:
ctx.iteration(True)
except Exception:
logger.exception("got exception from gobject")
self._glib_loop = self.run_in_executor(gobject_thread)
except ImportError:
logger.warning("importing dbus/gobject failed, dbus will not work.")
self._glib_loop = None
def finalize(self):
self._finalize = True
self._eventloop.remove_signal_handler(signal.SIGINT)
self._eventloop.remove_signal_handler(signal.SIGTERM)
self._eventloop.set_exception_handler(None)
if self._glib_loop:
try:
from gi.repository import GLib
GLib.idle_add(lambda: None)
self._eventloop.run_until_complete(self._glib_loop)
except ImportError:
pass
try:
for w in self.widgetMap.values():
w.finalize()
for l in self.config.layouts:
l.finalize()
for screen in self.screens:
for bar in [screen.top, screen.bottom, screen.left, screen.right]:
if bar is not None:
bar.finalize()
logger.info('Removing io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.remove_reader(fd)
self.conn.finalize()
self.server.close()
except:
logger.exception('exception during finalize')
finally:
self._eventloop.close()
self._eventloop = None
def _process_fake_screens(self):
"""
Since Xephyr and Xnest don't really support offset screens, we'll fake
it here for testing, (or if you want to partition a physical monitor
into separate screens)
"""
for i, s in enumerate(self.config.fake_screens):
# should have x,y, width and height set
s._configure(self, i, s.x, s.y, s.width, s.height, self.groups[i])
if not self.currentScreen:
self.currentScreen = s
self.screens.append(s)
def _process_screens(self):
if hasattr(self.config, 'fake_screens'):
self._process_fake_screens()
return
# What's going on here is a little funny. What we really want is only
# screens that don't overlap here; overlapping screens should see the
# same parts of the root window (i.e. for people doing xrandr
# --same-as). However, the order that X gives us pseudo screens in is
# important, because it indicates what people have chosen via xrandr
# --primary or whatever. So we need to alias screens that should be
# aliased, but preserve order as well. See #383.
xywh = {}
screenpos = []
for s in self.conn.pseudoscreens:
pos = (s.x, s.y)
(w, h) = xywh.get(pos, (0, 0))
if pos not in xywh:
screenpos.append(pos)
xywh[pos] = (max(w, s.width), max(h, s.height))
for i, (x, y) in enumerate(screenpos):
(w, h) = xywh[(x, y)]
if i + 1 > len(self.config.screens):
scr = Screen()
else:
scr = self.config.screens[i]
if not self.currentScreen:
self.currentScreen = scr
scr._configure(
self,
i,
x,
y,
w,
h,
self.groups[i],
)
self.screens.append(scr)
if not self.screens:
if self.config.screens:
s = self.config.screens[0]
else:
s = Screen()
self.currentScreen = s
s._configure(
self,
0, 0, 0,
self.conn.default_screen.width_in_pixels,
self.conn.default_screen.height_in_pixels,
self.groups[0],
)
self.screens.append(s)
def mapKey(self, key):
self.keyMap[(key.keysym, key.modmask & self.validMask)] = key
code = self.conn.keysym_to_keycode(key.keysym)
self.root.grab_key(
code,
key.modmask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_key(
code,
key.modmask | self.numlockMask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def unmapKey(self, key):
key_index = (key.keysym, key.modmask & self.validMask)
if key_index not in self.keyMap:
return
code = self.conn.keysym_to_keycode(key.keysym)
self.root.ungrab_key(code, key.modmask)
if self.numlockMask:
self.root.ungrab_key(code, key.modmask | self.numlockMask)
self.root.ungrab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"]
)
del(self.keyMap[key_index])
def update_net_desktops(self):
try:
index = self.groups.index(self.currentGroup)
# TODO: we should really only except ValueError here, AttributeError is
# an annoying chicken and egg because we're accessing currentScreen
# (via currentGroup), and when we set up the initial groups, there
# aren't any screens yet. This can probably be changed when #475 is
# fixed.
except (ValueError, AttributeError):
index = 0
self.root.set_property("_NET_NUMBER_OF_DESKTOPS", len(self.groups))
self.root.set_property(
"_NET_DESKTOP_NAMES", "\0".join([i.name for i in self.groups])
)
self.root.set_property("_NET_CURRENT_DESKTOP", index)
def addGroup(self, name, layout=None, layouts=None):
if name not in self.groupMap.keys():
g = _Group(name, layout)
self.groups.append(g)
if not layouts:
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groupMap[name] = g
hook.fire("addgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
return True
return False
def delGroup(self, name):
# one group per screen is needed
if len(self.groups) == len(self.screens):
raise ValueError("Can't delete all groups.")
if name in self.groupMap.keys():
group = self.groupMap[name]
if group.screen and group.screen.previous_group:
target = group.screen.previous_group
else:
target = group.prevGroup()
# Find a group that's not currently on a screen to bring to the
# front. This will terminate because of our check above.
while target.screen:
target = target.prevGroup()
for i in list(group.windows):
i.togroup(target.name)
if self.currentGroup.name == name:
self.currentScreen.setGroup(target, save_prev=False)
self.groups.remove(group)
del(self.groupMap[name])
hook.fire("delgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
def registerWidget(self, w):
"""Register a bar widget
If a widget with the same name already exists, this will silently
ignore that widget. However, this is not necessarily a bug. By default
a widget's name is just ``self.__class__.lower()``, so putting multiple
widgets of the same class will alias and one will be inaccessible.
Since more than one groupbox widget is useful when you have more than
one screen, this is a not uncommon occurrence. If you want to use the
debug info for widgets with the same name, set the name yourself.
"""
if w.name:
if w.name in self.widgetMap:
return
self.widgetMap[w.name] = w
@utils.lru_cache()
def colorPixel(self, name):
return self.conn.screens[0].default_colormap.alloc_color(name).pixel
@property
def currentLayout(self):
return self.currentGroup.layout
@property
def currentGroup(self):
return self.currentScreen.group
@property
def currentWindow(self):
return self.currentScreen.group.currentWindow
def scan(self):
_, _, children = self.root.query_tree()
for item in children:
try:
attrs = item.get_attributes()
state = item.get_wm_state()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
continue
if attrs and attrs.map_state == xcffib.xproto.MapState.Unmapped:
continue
if state and state[0] == window.WithdrawnState:
continue
self.manage(item)
def unmanage(self, win):
c = self.windowMap.get(win)
if c:
hook.fire("client_killed", c)
self.reset_gaps(c)
if getattr(c, "group", None):
c.group.remove(c)
del self.windowMap[win]
self.update_client_list()
def reset_gaps(self, c):
if c.strut:
self.update_gaps((0, 0, 0, 0), c.strut)
def update_gaps(self, strut, old_strut=None):
from libqtile.bar import Gap
(left, right, top, bottom) = strut[:4]
if old_strut:
(old_left, old_right, old_top, old_bottom) = old_strut[:4]
if not left and old_left:
self.currentScreen.left = None
elif not right and old_right:
self.currentScreen.right = None
elif not top and old_top:
self.currentScreen.top = None
elif not bottom and old_bottom:
self.currentScreen.bottom = None
if top:
self.currentScreen.top = Gap(top)
elif bottom:
self.currentScreen.bottom = Gap(bottom)
elif left:
self.currentScreen.left = Gap(left)
elif right:
self.currentScreen.right = Gap(right)
self.currentScreen.resize()
def manage(self, w):
try:
attrs = w.get_attributes()
internal = w.get_property("QTILE_INTERNAL")
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if attrs and attrs.override_redirect:
return
if w.wid not in self.windowMap:
if internal:
try:
c = window.Internal(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
self.windowMap[w.wid] = c
else:
try:
c = window.Window(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if w.get_wm_type() == "dock" or c.strut:
c.static(self.currentScreen.index)
else:
hook.fire("client_new", c)
# Window may be defunct because
# it's been declared static in hook.
if c.defunct:
return
self.windowMap[w.wid] = c
# Window may have been bound to a group in the hook.
if not c.group:
self.currentScreen.group.add(c, focus=c.can_steal_focus())
self.update_client_list()
hook.fire("client_managed", c)
return c
else:
return self.windowMap[w.wid]
def update_client_list(self):
"""Updates the client stack list
This is needed for third party tasklists and drag and drop of tabs in
chrome
"""
windows = [wid for wid, c in self.windowMap.items() if c.group]
self.root.set_property("_NET_CLIENT_LIST", windows)
# TODO: check stack order
self.root.set_property("_NET_CLIENT_LIST_STACKING", windows)
def grabMouse(self):
self.root.ungrab_button(None, None)
for i in self.config.mouse:
if isinstance(i, Click) and i.focus:
# Make a freezing grab on mouse button to gain focus
# Event will propagate to target window
grabmode = xcffib.xproto.GrabMode.Sync
else:
grabmode = xcffib.xproto.GrabMode.Async
eventmask = EventMask.ButtonPress
if isinstance(i, Drag):
eventmask |= EventMask.ButtonRelease
self.root.grab_button(
i.button_code,
i.modmask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
def grabKeys(self):
self.root.ungrab_key(None, None)
for key in self.keyMap.values():
self.mapKey(key)
def get_target_chain(self, ename, e):
"""Returns a chain of targets that can handle this event
Finds functions named `handle_X`, either on the window object itself or
on the Qtile instance, where X is the event name (e.g. EnterNotify,
ConfigureNotify, etc).
The event will be passed to each target in turn for handling, until one
of the handlers returns False or None, or the end of the chain is
reached.
"""
chain = []
handler = "handle_%s" % ename
# Certain events expose the affected window id as an "event" attribute.
eventEvents = [
"EnterNotify",
"ButtonPress",
"ButtonRelease",
"KeyPress",
]
if hasattr(e, "window"):
c = self.windowMap.get(e.window)
elif hasattr(e, "drawable"):
c = self.windowMap.get(e.drawable)
elif ename in eventEvents:
c = self.windowMap.get(e.event)
else:
c = None
if c is not None and hasattr(c, handler):
chain.append(getattr(c, handler))
if hasattr(self, handler):
chain.append(getattr(self, handler))
if not chain:
logger.info("Unhandled event: %r" % ename)
return chain
def _xpoll(self):
while True:
try:
e = self.conn.conn.poll_for_event()
if not e:
break
ename = e.__class__.__name__
if ename.endswith("Event"):
ename = ename[:-5]
if e.__class__ not in self.ignoreEvents:
logger.debug(ename)
for h in self.get_target_chain(ename, e):
logger.info("Handling: %s" % ename)
r = h(e)
if not r:
break
# Catch some bad X exceptions. Since X is event based, race
# conditions can occur almost anywhere in the code. For
# example, if a window is created and then immediately
# destroyed (before the event handler is evoked), when the
# event handler tries to examine the window properties, it
# will throw a WindowError exception. We can essentially
# ignore it, since the window is already dead and we've got
# another event in the queue notifying us to clean it up.
except (WindowError, AccessError, DrawableError):
pass
except Exception as e:
error_code = self.conn.conn.has_error()
if error_code:
error_string = xcbq.XCB_CONN_ERRORS[error_code]
logger.exception("Shutting down due to X connection error %s (%s)" %
(error_string, error_code))
self.stop()
return
logger.exception("Got an exception in poll loop")
self.conn.flush()
def stop(self):
logger.info('Stopping eventloop')
self._eventloop.stop()
def loop(self):
self.server.start()
try:
self._eventloop.run_forever()
finally:
self.finalize()
def find_screen(self, x, y):
"""Find a screen based on the x and y offset"""
result = []
for i in self.screens:
if i.x <= x <= i.x + i.width and \
i.y <= y <= i.y + i.height:
result.append(i)
if len(result) == 1:
return result[0]
return None
def find_closest_screen(self, x, y):
"""
If find_screen returns None, then this basically extends a
screen vertically and horizontally and see if x,y lies in the
band.
Only works if it can find a SINGLE closest screen, else we
revert to _find_closest_closest.
Useful when dragging a window out of a screen onto another but
having leftmost corner above viewport.
"""
normal = self.find_screen(x, y)
if normal is not None:
return normal
x_match = []
y_match = []
for i in self.screens:
if i.x <= x <= i.x + i.width:
x_match.append(i)
if i.y <= y <= i.y + i.height:
y_match.append(i)
if len(x_match) == 1:
return x_match[0]
if len(y_match) == 1:
return y_match[0]
return self._find_closest_closest(x, y, x_match + y_match)
def _find_closest_closest(self, x, y, candidate_screens):
"""
if find_closest_screen can't determine one, we've got multiple
screens, so figure out who is closer. We'll calculate using
the square of the distance from the center of a screen.
Note that this could return None if x, y is right/below all
screens (shouldn't happen but we don't do anything about it
here other than returning None)
"""
closest_distance = None
closest_screen = None
if not candidate_screens:
# try all screens
candidate_screens = self.screens
# if left corner is below and right of screen
# it can't really be a candidate
candidate_screens = [
s for s in candidate_screens
if x < s.x + s.width and y < s.y + s.height
]
for s in candidate_screens:
middle_x = s.x + s.width / 2
middle_y = s.y + s.height / 2
distance = (x - middle_x) ** 2 + (y - middle_y) ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_screen = s
return closest_screen
def handle_SelectionNotify(self, e):
if not getattr(e, "owner", None):
return
name = self.conn.atoms.get_name(e.selection)
self.selection[name]["owner"] = e.owner
self.selection[name]["selection"] = ""
self.convert_selection(e.selection)
hook.fire("selection_notify", name, self.selection[name])
def convert_selection(self, selection, _type="UTF8_STRING"):
TYPE = self.conn.atoms[_type]
self.conn.conn.core.ConvertSelection(self.selection_window.wid,
selection,
TYPE, selection,
xcffib.CurrentTime)
def handle_PropertyNotify(self, e):
name = self.conn.atoms.get_name(e.atom)
# it's the selection property
if name in ("PRIMARY", "CLIPBOARD"):
assert e.window == self.selection_window.wid
prop = self.selection_window.get_property(e.atom, "UTF8_STRING")
# If the selection property is None, it is unset, which means the
# clipboard is empty.
value = prop and prop.value.to_utf8() or u""
self.selection[name]["selection"] = value
hook.fire("selection_change", name, self.selection[name])
def handle_EnterNotify(self, e):
if e.event in self.windowMap:
return True
s = self.find_screen(e.root_x, e.root_y)
if s:
self.toScreen(s.index, warp=False)
def handle_ClientMessage(self, event):
atoms = self.conn.atoms
opcode = event.type
data = event.data
# handle change of desktop
if atoms["_NET_CURRENT_DESKTOP"] == opcode:
index = data.data32[0]
try:
self.currentScreen.setGroup(self.groups[index])
except IndexError:
logger.info("Invalid Desktop Index: %s" % index)
def handle_KeyPress(self, e):
keysym = self.conn.code_to_syms[e.detail][0]
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.keyMap.get((keysym, state & self.validMask))
if not k:
logger.info("Ignoring unknown keysym: %s" % keysym)
return
for i in k.commands:
if i.check(self):
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs)
)
if status in (command.ERROR, command.EXCEPTION):
logger.error("KB command error %s: %s" % (i.name, val))
else:
return
def cmd_focus_by_click(self, e):
"""Bring a window to the front
Parameters
==========
e : xcb event
Click event used to determine window to focus
"""
wnd = e.child or e.root
# Additional option for config.py
# Brings clicked window to front
if self.config.bring_front_click:
self.conn.conn.core.ConfigureWindow(
wnd,
xcffib.xproto.ConfigWindow.StackMode,
[xcffib.xproto.StackMode.Above]
)
window = self.windowMap.get(wnd)
if window and not window.window.get_property('QTILE_INTERNAL'):
self.currentGroup.focus(self.windowMap.get(wnd), False)
self.windowMap.get(wnd).focus(False)
self.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
self.conn.conn.flush()
def handle_ButtonPress(self, e):
button_code = e.detail
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m or m.modmask & self.validMask != state & self.validMask:
logger.info("Ignoring unknown button: %s" % button_code)
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if m.focus == "after":
self.cmd_focus_by_click(e)
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
elif isinstance(m, Drag):
x = e.event_x
y = e.event_y
if m.start:
i = m.start
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
continue
else:
val = (0, 0)
if m.focus == "after":
self.cmd_focus_by_click(e)
self._drag = (x, y, val[0], val[1], m.commands)
self.root.grab_pointer(
True,
xcbq.ButtonMotionMask |
xcbq.AllButtonsMask |
xcbq.ButtonReleaseMask,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def handle_ButtonRelease(self, e):
button_code = e.detail
state = e.state & ~xcbq.AllButtonsMask
if self.numlockMask:
state = state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m:
logger.info(
"Ignoring unknown button release: %s" % button_code
)
continue
if isinstance(m, Drag):
self._drag = None
self.root.ungrab_pointer()
def handle_MotionNotify(self, e):
if self._drag is None:
return
ox, oy, rx, ry, cmd = self._drag
dx = e.event_x - ox
dy = e.event_y - oy
if dx or dy:
for i in cmd:
if i.check(self):
status, val = self.server.call((
i.selectors,
i.name,
i.args + (rx + dx, ry + dy, e.event_x, e.event_y),
i.kwargs
))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
def handle_ConfigureNotify(self, e):
"""Handle xrandr events"""
screen = self.currentScreen
if e.window == self.root.wid and \
e.width != screen.width and \
e.height != screen.height:
screen.resize(0, 0, e.width, e.height)
def handle_ConfigureRequest(self, e):
# It's not managed, or not mapped, so we just obey it.
cw = xcffib.xproto.ConfigWindow
args = {}
if e.value_mask & cw.X:
args["x"] = max(e.x, 0)
if e.value_mask & cw.Y:
args["y"] = max(e.y, 0)
if e.value_mask & cw.Height:
args["height"] = max(e.height, 0)
if e.value_mask & cw.Width:
args["width"] = max(e.width, 0)
if e.value_mask & cw.BorderWidth:
args["borderwidth"] = max(e.border_width, 0)
w = xcbq.Window(self.conn, e.window)
w.configure(**args)
def handle_MappingNotify(self, e):
self.conn.refresh_keymap()
if e.request == xcffib.xproto.Mapping.Keyboard:
self.grabKeys()
def handle_MapRequest(self, e):
w = xcbq.Window(self.conn, e.window)
c = self.manage(w)
if c and (not c.group or not c.group.screen):
return
w.map()
def handle_DestroyNotify(self, e):
self.unmanage(e.window)
def handle_UnmapNotify(self, e):
if e.event != self.root.wid:
c = self.windowMap.get(e.window)
if c and getattr(c, "group", None):
try:
c.window.unmap()
c.state = window.WithdrawnState
except xcffib.xproto.WindowError:
# This means that the window has probably been destroyed,
# but we haven't yet seen the DestroyNotify (it is likely
# next in the queue). So, we just let these errors pass
# since the window is dead.
pass
self.unmanage(e.window)
def handle_ScreenChangeNotify(self, e):
hook.fire("screen_change", self, e)
def toScreen(self, n, warp=True):
"""Have Qtile move to screen and put focus there"""
if n >= len(self.screens):
return
old = self.currentScreen
self.currentScreen = self.screens[n]
if old != self.currentScreen:
hook.fire("current_screen_change")
old.group.layoutAll()
self.currentGroup.focus(self.currentWindow, warp)
def moveToGroup(self, group):
"""Create a group if it doesn't exist and move a windows there"""
if self.currentWindow and group:
self.addGroup(group)
self.currentWindow.togroup(group)
def _items(self, name):
if name == "group":
return True, list(self.groupMap.keys())
elif name == "layout":
return True, list(range(len(self.currentGroup.layouts)))
elif name == "widget":
return False, list(self.widgetMap.keys())
elif name == "bar":
return False, [x.position for x in self.currentScreen.gaps]
elif name == "window":
return True, self.listWID()
elif name == "screen":
return True, list(range(len(self.screens)))
def _select(self, name, sel):
if name == "group":
if sel is None:
return self.currentGroup
else:
return self.groupMap.get(sel)
elif name == "layout":
if sel is None:
return self.currentGroup.layout
else:
return utils.lget(self.currentGroup.layouts, sel)
elif name == "widget":
return self.widgetMap.get(sel)
elif name == "bar":
return getattr(self.currentScreen, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
return self.clientFromWID(sel)
elif name == "screen":
if sel is None:
return self.currentScreen
else:
return utils.lget(self.screens, sel)
def listWID(self):
return [i.window.wid for i in self.windowMap.values()]
def clientFromWID(self, wid):
for i in self.windowMap.values():
if i.window.wid == wid:
return i
return None
def call_soon(self, func, *args):
""" A wrapper for the event loop's call_soon which also flushes the X
event queue to the server after func is called. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_later(delay, f)
def run_in_executor(self, func, *args):
""" A wrapper for running a function in the event loop's default
executor. """
return self._eventloop.run_in_executor(None, func, *args)
def cmd_debug(self):
"""Set log level to DEBUG"""
logger.setLevel(logging.DEBUG)
logger.debug('Switching to DEBUG threshold')
def cmd_info(self):
"""Set log level to INFO"""
logger.setLevel(logging.INFO)
logger.info('Switching to INFO threshold')
def cmd_warning(self):
"""Set log level to WARNING"""
logger.setLevel(logging.WARNING)
logger.warning('Switching to WARNING threshold')
def cmd_error(self):
"""Set log level to ERROR"""
logger.setLevel(logging.ERROR)
logger.error('Switching to ERROR threshold')
def cmd_critical(self):
"""Set log level to CRITICAL"""
logger.setLevel(logging.CRITICAL)
logger.critical('Switching to CRITICAL threshold')
def cmd_pause(self):
"""Drops into pdb"""
import pdb
pdb.set_trace()
def cmd_groups(self):
"""Return a dictionary containing information for all groups
Examples
========
groups()
"""
return {i.name: i.info() for i in self.groups}
def cmd_get_info(self):
"""Prints info for all groups"""
warnings.warn("The `get_info` command is deprecated, use `groups`", DeprecationWarning)
return self.cmd_groups()
def cmd_display_kb(self, *args):
"""Display table of key bindings"""
class FormatTable(object):
def __init__(self):
self.max_col_size = []
self.rows = []
def add(self, row):
n = len(row) - len(self.max_col_size)
if n > 0:
self.max_col_size += [0] * n
for i, f in enumerate(row):
if len(f) > self.max_col_size[i]:
self.max_col_size[i] = len(f)
self.rows.append(row)
def getformat(self):
return " ".join((["%-{0:d}s".format(max_col_size + 2) for max_col_size in self.max_col_size])) + "\n", len(self.max_col_size)
def expandlist(self, list, n):
if not list:
return ["-" * max_col_size for max_col_size in self.max_col_size]
n -= len(list)
if n > 0:
list += [""] * n
return list
def __str__(self):
format, n = self.getformat()
return "".join([format % tuple(self.expandlist(row, n)) for row in self.rows])
result = FormatTable()
result.add(["KeySym", "Mod", "Command", "Desc"])
result.add([])
rows = []
for (ks, kmm), k in self.keyMap.items():
if not k.commands:
continue
name = ", ".join(xcbq.rkeysyms.get(ks, ("<unknown>", )))
modifiers = ", ".join(utils.translate_modifiers(kmm))
allargs = ", ".join([repr(value) for value in k.commands[0].args] + ["%s = %s" % (keyword, repr(value)) for keyword, value in k.commands[0].kwargs.items()])
rows.append((name, str(modifiers), "{0:s}({1:s})".format(k.commands[0].name, allargs), k.desc))
rows.sort()
for row in rows:
result.add(row)
return str(result)
def cmd_list_widgets(self):
"""List of all addressible widget names"""
return list(self.widgetMap.keys())
def cmd_to_layout_index(self, index, group=None):
"""Switch to the layout with the given index in self.layouts.
Parameters
==========
index :
Index of the layout in the list of layouts.
group :
Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.toLayoutIndex(index)
def cmd_next_layout(self, group=None):
"""Switch to the next layout.
Parameters
==========
group :
Group name. If not specified, the current group is assumed
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.nextLayout()
def cmd_prev_layout(self, group=None):
"""Switch to the previous layout.
Parameters
==========
group :
Group name. If not specified, the current group is assumed
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.prevLayout()
def cmd_screens(self):
"""Return a list of dictionaries providing information on all screens"""
lst = [dict(
index=i.index,
group=i.group.name if i.group is not None else None,
x=i.x,
y=i.y,
width=i.width,
height=i.height,
gaps=dict(
top=i.top.geometry() if i.top else None,
bottom=i.bottom.geometry() if i.bottom else None,
left=i.left.geometry() if i.left else None,
right=i.right.geometry() if i.right else None,
)
) for i in self.screens]
return lst
def cmd_simulate_keypress(self, modifiers, key):
"""Simulates a keypress on the focused window.
Parameters
==========
modifiers :
A list of modifier specification strings. Modifiers can be one of
"shift", "lock", "control" and "mod1" - "mod5".
key :
Key specification.
Examples
========
simulate_keypress(["control", "mod2"], "k")
"""
# FIXME: This needs to be done with sendevent, once we have that fixed.
keysym = xcbq.keysyms.get(key)
if keysym is None:
raise command.CommandError(u"Unknown key: {0:s}".format(key))
keycode = self.conn.first_sym_to_code[keysym]
class DummyEv(object):
pass
d = DummyEv()
d.detail = keycode
try:
d.state = utils.translate_masks(modifiers)
except KeyError as v:
return v.args[0]
self.handle_KeyPress(d)
def cmd_execute(self, cmd, args):
"""Executes the specified command, replacing the current process"""
self.stop()
os.execv(cmd, args)
def cmd_restart(self):
"""Restart qtile using the execute command"""
argv = [sys.executable] + sys.argv
if '--no-spawn' not in argv:
argv.append('--no-spawn')
buf = io.BytesIO()
try:
pickle.dump(QtileState(self), buf, protocol=0)
except:
logger.error("Unable to pickle qtile state")
argv = [s for s in argv if not s.startswith('--with-state')]
argv.append('--with-state=' + buf.getvalue().decode())
self.cmd_execute(sys.executable, argv)
def cmd_spawn(self, cmd):
"""Run cmd in a shell.
cmd may be a string, which is parsed by shlex.split, or a list (similar
to subprocess.Popen).
Examples
========
spawn("firefox")
spawn(["xterm", "-T", "Temporary terminal"])
"""
if isinstance(cmd, six.string_types):
args = shlex.split(cmd)
else:
args = list(cmd)
r, w = os.pipe()
pid = os.fork()
if pid < 0:
os.close(r)
os.close(w)
return pid
if pid == 0:
os.close(r)
# close qtile's stdin, stdout, stderr so the called process doesn't
# pollute our xsession-errors.
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if pid2 == 0:
os.close(w)
# Open /dev/null as stdin, stdout, stderr
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# This shouldn't happen, catch it just in case
pass
else:
# For Python >=3.4, need to set file descriptor to inheritable
try:
os.set_inheritable(fd, True)
except AttributeError:
pass
# Again, this shouldn't happen, but we should just check
if fd > 0:
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError as e:
logger.error("failed spawn: \"{0}\"\n{1}".format(cmd, e))
os._exit(1)
else:
# Here it doesn't matter if fork failed or not, we just write
# its return code and exit.
os.write(w, str(pid2).encode())
os.close(w)
# sys.exit raises SystemExit, which will then be caught by our
# top level catchall and we'll end up with two qtiles; os._exit
# actually calls exit.
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
# 1024 bytes should be enough for any pid. :)
pid = os.read(r, 1024)
os.close(r)
return int(pid)
def cmd_status(self):
"""Return "OK" if Qtile is running"""
return "OK"
def cmd_sync(self):
"""Sync the X display. Should only be used for development"""
self.conn.flush()
def cmd_to_screen(self, n):
"""Warp focus to screen n, where n is a 0-based screen number
Examples
========
to_screen(0)
"""
return self.toScreen(n)
def cmd_next_screen(self):
"""Move to next screen"""
return self.toScreen(
(self.screens.index(self.currentScreen) + 1) % len(self.screens)
)
def cmd_prev_screen(self):
"""Move to the previous screen"""
return self.toScreen(
(self.screens.index(self.currentScreen) - 1) % len(self.screens)
)
def cmd_windows(self):
"""Return info for each client window"""
return [
i.info() for i in self.windowMap.values()
if not isinstance(i, window.Internal)
]
def cmd_internal_windows(self):
"""Return info for each internal window (bars, for example)"""
return [
i.info() for i in self.windowMap.values()
if isinstance(i, window.Internal)
]
def cmd_qtile_info(self):
"""Returns a dictionary of info on the Qtile instance"""
return dict(socketname=self.fname)
def cmd_shutdown(self):
"""Quit Qtile"""
self.stop()
def cmd_switch_groups(self, groupa, groupb):
"""Switch position of groupa to groupb"""
if groupa not in self.groupMap or groupb not in self.groupMap:
return
indexa = self.groups.index(self.groupMap[groupa])
indexb = self.groups.index(self.groupMap[groupb])
self.groups[indexa], self.groups[indexb] = \
self.groups[indexb], self.groups[indexa]
hook.fire("setgroup")
# update window _NET_WM_DESKTOP
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid):
window = self.windowMap.get(wid)
if window:
if not window.group.screen:
self.currentScreen.setGroup(window.group)
window.group.focus(window, False)
def cmd_findwindow(self, prompt="window", widget="prompt"):
"""Launch prompt widget to find a window of the given name
Parameters
==========
prompt :
Text with which to prompt user (default: "window")
widget :
Name of the prompt widget (default: "prompt")
"""
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '{0:s}' present.".format(widget))
return
mb.startInput(
prompt,
self.find_window,
"window",
strict_completer=True
)
def cmd_next_urgent(self):
"""Focus next window with urgent hint"""
try:
nxt = [w for w in self.windowMap.values() if w.urgent][0]
nxt.group.cmd_toscreen()
nxt.group.focus(nxt)
except IndexError:
pass # no window had urgent set
def cmd_togroup(self, prompt="group", widget="prompt"):
"""Launch prompt widget to move current window to a given group
Parameters
==========
prompt :
Text with which to prompt user (default: "group")
widget :
Name of the prompt widget (default: "prompt")
"""
if not self.currentWindow:
logger.warning("No window to move")
return
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '{0:s}' present.".format(widget))
return
mb.startInput(prompt, self.moveToGroup, "group", strict_completer=True)
def cmd_switchgroup(self, prompt="group", widget="prompt"):
"""Launch prompt widget to switch to a given group to the current screen
Parameters
==========
prompt :
Text with which to prompt user (default: "group")
widget :
Name of the prompt widget (default: "prompt")
"""
def f(group):
if group:
try:
self.groupMap[group].cmd_toscreen()
except KeyError:
logger.info(u"No group named '{0:s}' present.".format(group))
mb = self.widgetMap.get(widget)
if not mb:
logger.warning("No widget named '{0:s}' present.".format(widget))
return
mb.startInput(prompt, f, "group", strict_completer=True)
def cmd_spawncmd(self, prompt="spawn", widget="prompt",
command="%s", complete="cmd"):
"""Spawn a command using a prompt widget, with tab-completion.
Parameters
==========
prompt :
Text with which to prompt user (default: "spawn: ").
widget :
Name of the prompt widget (default: "prompt").
command :
command template (default: "%s").
complete :
Tab completion function (default: "cmd")
"""
def f(args):
if args:
self.cmd_spawn(command % args)
try:
mb = self.widgetMap[widget]
mb.startInput(prompt, f, complete)
except KeyError:
logger.error("No widget named '{0:s}' present.".format(widget))
def cmd_qtilecmd(self, prompt="command",
widget="prompt", messenger="xmessage"):
""" Execute a Qtile command using the client syntax
Tab completeion aids navigation of the command tree
Parameters
==========
prompt :
Text to display at the prompt (default: "command: ")
widget :
Name of the prompt widget (default: "prompt")
messenger :
Command to display output, set this to None to disable (default:
"xmessage")
"""
def f(cmd):
if cmd:
# c here is used in eval() below
c = command.CommandRoot(self) # noqa
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if cmd_len == 0:
logger.info('No command entered.')
return
try:
result = eval(u'c.{0:s}'.format(cmd))
except (
command.CommandError,
command.CommandException,
AttributeError) as err:
logger.error(err)
result = None
if result is not None:
from pprint import pformat
message = pformat(result)
if messenger:
self.cmd_spawn('{0:s} "{1:s}"'.format(messenger, message))
logger.info(result)
mb = self.widgetMap[widget]
if not mb:
logger.error("No widget named {0:s} present.".format(widget))
return
mb.startInput(prompt, f, "qshell")
def cmd_addgroup(self, group):
"""Add a group with the given name"""
return self.addGroup(group)
def cmd_delgroup(self, group):
"""Delete a group with the given name"""
return self.delGroup(group)
def cmd_add_rule(self, match_args, rule_args, min_priorty=False):
"""Add a dgroup rule, returns rule_id needed to remove it
Parameters
==========
match_args :
config.Match arguments
rule_args :
config.Rule arguments
min_priorty :
If the rule is added with minimum prioriry (last) (default: False)
"""
if not self.dgroups:
logger.warning('No dgroups created')
return
match = Match(**match_args)
rule = Rule(match, **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
def cmd_remove_rule(self, rule_id):
"""Remove a dgroup rule by rule_id"""
self.dgroups.remove_rule(rule_id)
def cmd_run_external(self, full_path):
"""Run external Python script"""
def format_error(path, e):
s = """Can't call "main" from "{path}"\n\t{err_name}: {err}"""
return s.format(path=path, err_name=e.__class__.__name__, err=e)
module_name = os.path.splitext(os.path.basename(full_path))[0]
dir_path = os.path.dirname(full_path)
err_str = ""
local_stdout = io.BytesIO()
old_stdout = sys.stdout
sys.stdout = local_stdout
sys.exc_clear()
try:
module = _import_module(module_name, dir_path)
module.main(self)
except ImportError as e:
err_str += format_error(full_path, e)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
err_str += traceback.format_exc()
err_str += format_error(full_path, exc_type(exc_value))
finally:
sys.exc_clear()
sys.stdout = old_stdout
local_stdout.close()
return local_stdout.getvalue() + err_str
def cmd_hide_show_bar(self, position="all"):
"""Toggle visibility of a given bar
Parameters
==========
position :
one of: "top", "bottom", "left", "right", or "all" (default: "all")
"""
if position in ["top", "bottom", "left", "right"]:
bar = getattr(self.currentScreen, position)
if bar:
bar.show(not bar.is_show())
self.currentGroup.layoutAll()
else:
logger.warning(
"Not found bar in position '%s' for hide/show." % position)
elif position == "all":
screen = self.currentScreen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if bar:
if is_show is None:
is_show = not bar.is_show()
bar.show(is_show)
if is_show is not None:
self.currentGroup.layoutAll()
else:
logger.warning("Not found bar for hide/show.")
else:
logger.error("Invalid position value:{0:s}".format(position))
def cmd_get_state(self):
"""Get pickled state for restarting qtile"""
buf = io.BytesIO()
pickle.dump(QtileState(self), buf, protocol=0)
state = buf.getvalue().decode()
logger.info('State = ')
logger.info(''.join(state.split('\n')))
return state
def cmd_tracemalloc_toggle(self):
"""Toggle tracemalloc status
Running tracemalloc is required for qtile-top
"""
if not tracemalloc.is_tracing():
tracemalloc.start()
else:
tracemalloc.stop()
def cmd_tracemalloc_dump(self):
"""Dump tracemalloc snapshot"""
if not tracemalloc:
logger.warning('No tracemalloc module')
raise command.CommandError("No tracemalloc module")
if not tracemalloc.is_tracing():
return [False, "Trace not started"]
cache_directory = get_cache_dir()
malloc_dump = os.path.join(cache_directory, "qtile_tracemalloc.dump")
tracemalloc.take_snapshot().dump(malloc_dump)
return [True, malloc_dump]
|
de-vri-es/qtile
|
libqtile/manager.py
|
Python
|
mit
| 63,587
|
#!/usr/bin/env python
"""
DragonPy - Simple6809 memory info
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:created: 2013 by Jens Diemer - www.jensdiemer.de
:copyleft: 2013 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import logging
from MC6809.core.memory_info import BaseMemoryInfo
log = logging.getLogger("DragonPy.Simple6809.mem_info")
class Simple6809MemInfo(BaseMemoryInfo):
MEM_INFO = (
# generated from "ExBasROM.LST" with "make_mem_info.py":
(0x0008, 0x0008, 'BACKSPACE'),
(0x000d, 0x000d, 'ENTER KEY'),
(0x001b, 0x001b, 'ESCAPE CODE'),
(0x0020, 0x0020, 'SPACE (BLANK)'),
(0x003a, 0x003a, 'STACK BUFFER ROOM'),
(0x00fa, 0x00fa, 'MAX NUMBER OF CHARS IN A BASIC LINE'),
(0x00fa, 0x00fa, 'MAXIMUM MS BYTE OF LINE NUMBER'),
(0x0021, 0x0021, 'OP CODE OF BRN - SKIP ONE BYTE'),
(0x008c, 0x008c, 'OP CODE OF CMPX # - SKIP TWO BYTES'),
(0x0086, 0x0086, 'OP CODE OF LDA # - SKIP THE NEXT BYTE'),
(0x0000, 0x0000, 'STOP/END FLAG: POSITIVE=STOP, NEG=END'),
(0x0001, 0x0001, 'TERMINATOR FLAG 1'),
(0x0002, 0x0002, 'TERMINATOR FLAG 2'),
(0x0003, 0x0003, 'SCRATCH VARIABLE'),
(0x0004, 0x0004, 'IF COUNTER - HOW MANY IF STATEMENTS IN A LINE'),
(0x0005, 0x0005, '*DV* ARRAY FLAG 0=EVALUATE, 1=DIMENSIONING'),
(0x0006, 0x0006, '*DV* *PV TYPE FLAG: 0=NUMERIC, $FF=STRING'),
(0x0007, 0x0007, '*TV STRING SPACE HOUSEKEEPING FLAG'),
(0x0008, 0x0008, 'DISABLE ARRAY SEARCH: 00=ALLOW SEARCH'),
(0x0009, 0x0009, '*TV INPUT FLAG: READ=0, INPUT<>0'),
(0x000a, 0x000a, '*TV RELATIONAL OPERATOR FLAG'),
(0x000b, 0x000b, '*PV TEMPORARY STRING STACK POINTER'),
(0x000d, 0x000d, '*PV ADDR OF LAST USED STRING STACK ADDRESS'),
(0x000f, 0x000f, 'TEMPORARY POINTER'),
(0x0011, 0x0011, 'TEMPORARY DESCRIPTOR STORAGE (STACK SEARCH)'),
(0x0013, 0x0013, 'FLOATING POINT ACCUMULATOR #2 MANTISSA'),
(0x0017, 0x0017, 'BOTTOM OF STACK AT LAST CHECK'),
(0x0019, 0x0019, '*PV BEGINNING OF BASIC PROGRAM'),
(0x001b, 0x001b, '*PV START OF VARIABLES'),
(0x001d, 0x001d, '*PV START OF ARRAYS'),
(0x001f, 0x001f, '*PV END OF ARRAYS (+1)'),
(0x0021, 0x0021, '*PV START OF STRING STORAGE (TOP OF FREE RAM)'),
(0x0023, 0x0023, '*PV START OF STRING VARIABLES'),
(0x0025, 0x0025, 'UTILITY STRING POINTER'),
(0x0027, 0x0027, '*PV TOP OF STRING SPACE'),
(0x0029, 0x0029, 'SAVED LINE NUMBER DURING A "STOP"'),
(0x002b, 0x002b, 'BINARY VALUE OF A CONVERTED LINE NUMBER'),
(0x002d, 0x002d, 'SAVED INPUT PTR DURING A "STOP"'),
(0x002f, 0x002f, 'TEMPORARY INPUT POINTER STORAGE'),
(0x0031, 0x0031, "*PV 'DATA' STATEMENT LINE NUMBER POINTER"),
(0x0033, 0x0033, "*PV 'DATA' STATEMENT ADDRESS POINTER"),
(0x0035, 0x0035, "DATA POINTER FOR 'INPUT' & 'READ'"),
(0x0037, 0x0037, '*TV TEMP STORAGE FOR A VARIABLE NAME'),
(0x0039, 0x0039, '*TV POINTER TO A VARIABLE DESCRIPTOR'),
(0x003b, 0x003b, 'TEMP POINTER TO A VARIABLE DESCRIPTOR'),
(0x003d, 0x003d, 'POINTER TO RELATIONAL OPERATOR PROCESSING ROUTINE'),
(0x003f, 0x003f, 'TEMPORARY RELATIONAL OPERATOR FLAG BYTE'),
(0x004f, 0x004f, '*PV FLOATING POINT ACCUMULATOR #0 FPA0 EXPONENT'),
(0x0050, 0x0050, '*PV FLOAT.ACCU #0 FPA0 MANTISSA MS Most Significant Byte'),
(0x0051, 0x0051, '*PV FLOAT.ACCU #0 FPA0 MANTISSA NMS Next Most Significant Byte'),
(0x0052, 0x0052, '*PV FLOAT.ACCU #0 FPA0 MANTISSA NLS Next Least Significant Byte'),
(0x0053, 0x0053, '*PV FLOAT.ACCU #0 FPA0 MANTISSA LS Least Significant Byte'),
(0x0054, 0x0054, '*PV FLOATING POINT ACCUMULATOR #0 FPA0 SIGN'),
(0x0055, 0x0055, 'POLYNOMIAL COEFFICIENT COUNTER'),
(0x0056, 0x0056, 'TEMPORARY STRING DESCRIPTOR'),
(0x005b, 0x005b, 'FLOATING POINT CARRY BYTE'),
(0x005c, 0x005c, '*PV FLOATING POINT ACCUMULATOR #1 FPA0 EXPONENT'),
(0x005d, 0x005d, '*PV FLOAT.ACCU #1 FPA0 MANTISSA MS Most Significant Byte'),
(0x005e, 0x005e, '*PV FLOAT.ACCU #1 FPA0 MANTISSA NMS Next Most Significant Byte'),
(0x005f, 0x005f, '*PV FLOAT.ACCU #1 FPA0 MANTISSA NLS Next Least Significant Byte'),
(0x0060, 0x0060, '*PV FLOAT.ACCU #1 FPA0 MANTISSA LS Least Significant Byte'),
(0x0061, 0x0061, '*PV FLOATING POINT ACCUMULATOR #1 FPA0 SIGN'),
(0x0062, 0x0062, 'SIGN OF RESULT OF FLOATING POINT OPERATION'),
(0x0063, 0x0063, 'FLOATING POINT SUB BYTE (FIFTH BYTE)'),
(0x0064, 0x0064, 'POLYNOMIAL COEFFICIENT POINTER'),
(0x0066, 0x0066, 'CURRENT LINE POINTER DURING LIST'),
(0x0068, 0x0068, '*PV CURRENT LINE # OF BASIC PROGRAM, $FFFF = DIRECT'),
(0x006a, 0x006a, '*TV TAB FIELD WIDTH'),
(0x006b, 0x006b, '*TV TAB ZONE'),
(0x006c, 0x006c, '*TV PRINT POSITION'),
(0x006d, 0x006d, '*TV PRINT WIDTH'),
(0x006e, 0x006e, '*PV WARM START FLAG: $55=WARM, OTHER=COLD'),
(0x006f, 0x006f, '*PV WARM START VECTOR - JUMP ADDRESS FOR WARM START'),
(0x0071, 0x0071, '*PV TOP OF RAM'),
(0x0073, 0x0073, '*TV INKEY$ RAM IMAGE'),
(0x0074, 0x0074, '*PV DUMMY - THESE TWO BYTES ARE ALWAYS ZERO'),
(0x0076, 0x0076, '16'),
(0x0077, 0x0077, '112'),
(0x0078, 0x0078, '132'),
(0x0079, 0x0079, '0'),
(0x007a, 0x007a, 'LB4AA'),
(0x007c, 0x007c, '*PV INCREMENT LS BYTE OF INPUT POINTER'),
(0x007e, 0x007e, '*PV BRANCH IF NOT ZERO (NO CARRY)'),
(0x0080, 0x0080, '*PV INCREMENT MS BYTE OF INPUT POINTER'),
(0x0082, 0x0082, '*PV OP CODE OF LDA EXTENDED'),
(0x0083, 0x0083, '*PV THESE 2 BYTES CONTAIN ADDRESS OF THE CURRENT'),
(0x0085, 0x0085, 'JUMP BACK INTO THE BASIC RUM'),
(0x0088, 0x0088, '= LOW ORDER FOUR BYTES OF THE PRODUCT'),
(0x0089, 0x0089, '= OF A FLOATING POINT MULTIPLICATION'),
(0x008a, 0x008a, '= THESE BYTES ARE USE AS RANDOM DATA'),
(0x008b, 0x008b, '= BY THE RND STATEMENT'),
(0x008c, 0x008c, '*PV TRACE FLAG 0=OFF ELSE=ON'),
(0x008d, 0x008d, '*PV ADDRESS OF THE START OF USR VECTORS'),
(0x00ad, 0x00ad, "JUMP ADDRESS FOR BASIC'S USR FUNCTION"),
(0x00b0, 0x00b0, '* FLOATING POINT RANDOM NUMBER SEED EXPONENT'),
(0x00b1, 0x00b1, '* MANTISSA: INITIALLY SET TO $804FC75259'),
(0x00b5, 0x00b5, 'USR 0 VECTOR'),
(0x00b7, 0x00b7, 'USR 1'),
(0x00b9, 0x00b9, 'USR 2'),
(0x00bb, 0x00bb, 'USR 3'),
(0x00bd, 0x00bd, 'USR 4'),
(0x00bf, 0x00bf, 'USR 5'),
(0x00c1, 0x00c1, 'USR 6'),
(0x00c3, 0x00c3, 'USR 7'),
(0x00c5, 0x00c5, 'USR 8'),
(0x00c7, 0x00c7, 'USR 9'),
(0x00c9, 0x00c9, 'STRING DESCRIPTOR STACK'),
(0x00f1, 0x00f1, 'LINE INPUT BUFFER HEADER'),
(0x00f3, 0x00f3, 'BASIC LINE INPUT BUFFER'),
(0x01ee, 0x01ee, 'STRING BUFFER'),
(0x0217, 0x0217, 'START OF PROGRAM SPACE'),
(0xdb00, 0xdb00, 'GET A CHARACTER FROM CONSOLE IN'),
(0xdb02, 0xdb02, 'LOOP IF NO KEY DOWN'),
(0xdb14, 0xdb14, 'IA'),
(0xdb18, 0xdb18, 'IS IT CARRIAGE RETURN?'),
(0xdb1a, 0xdb1a, 'YES'),
(0xdb1f, 0xdb1f, 'INCREMENT CHARACTER COUNTER'),
(0xdb21, 0xdb21, 'CHECK FOR END OF LINE PRINTER LINE'),
(0xdb23, 0xdb23, 'AT END OF LINE PRINTER LINE?'),
(0xdb25, 0xdb25, 'NO'),
(0xdb27, 0xdb27, 'RESET CHARACTER COUNTER'),
(0xdb29, 0xdb29, 'IA'),
(0xdb30, 0xdb30, 'IA'),
(0xdb32, 0xdb32, 'DO LINEFEED AFTER CR'),
(0xdb46, 0xdb46, 'SET STACK TO TOP OF LINE INPUT BUFFER'),
(0xdb4a, 0xdb4a, 'GET WARM START FLAG'),
(0xdb4c, 0xdb4c, 'IS IT A WARM START?'),
(0xdb4e, 0xdb4e, 'NO - D0 A COLD START'),
(0xdb50, 0xdb50, 'WARM START VECTOR'),
(0xdb52, 0xdb52, 'GET FIRST BYTE OF WARM START ADDR'),
(0xdb54, 0xdb54, 'IS IT NOP?'),
(0xdb56, 0xdb56, 'NO - DO A COLD START'),
(0xdb58, 0xdb58, 'YES, G0 THERE'),
(0xdb5a, 0xdb5a, 'POINT X TO CLEAR 1ST 1K OF RAM'),
(0xdb5d, 0xdb5d, 'MOVE POINTER DOWN TWO-CLEAR BYTE'),
(0xdb5f, 0xdb5f, 'ADVANCE POINTER ONE'),
(0xdb61, 0xdb61, 'KEEP GOING IF NOT AT BOTTOM OF PAGE 0'),
(0xdb63, 0xdb63, 'SET TO START OF PROGRAM SPACE'),
(0xdb66, 0xdb66, 'CLEAR 1ST BYTE OF BASIC PROGRAM'),
(0xdb68, 0xdb68, 'BEGINNING OF BASIC PROGRAM'),
(0xdb6a, 0xdb6a, 'LOOK FOR END OF MEMORY'),
(0xdb6c, 0xdb6c, '* COMPLEMENT IT AND PUT IT BACK'),
(0xdb6d, 0xdb6d, '* INTO SYSTEM MEMORY'),
(0xdb6f, 0xdb6f, 'IS IT RAM?'),
(0xdb71, 0xdb71, 'BRANCH IF NOT (ROM, BAD RAM OR NO RAM)'),
(0xdb73, 0xdb73, 'MOVE POINTER UP ONE'),
(0xdb75, 0xdb75, 'RE-COMPLEMENT TO RESTORE BYTE'),
(0xdb77, 0xdb77, 'KEEP LOOKING FOR END OF RAM'),
(0xdb79, 0xdb79, 'SAVE ABSOLUTE TOP OF RAM'),
(0xdb7b, 0xdb7b, 'SAVE TOP OF STRING SPACE'),
(0xdb7d, 0xdb7d, 'SAVE START OF STRING VARIABLES'),
(0xdb7f, 0xdb7f, 'CLEAR 200 - DEFAULT STRING SPACE TO 200 BYTES'),
(0xdb83, 0xdb83, 'SAVE START OF STRING SPACE'),
(0xdb85, 0xdb85, 'PUT STACK THERE'),
(0xdb87, 0xdb87, 'POINT X TO ROM SOURCE DATA'),
(0xdb8a, 0xdb8a, 'POINT U TO RAM DESTINATION'),
(0xdb8d, 0xdb8d, 'MOVE 18 BYTES'),
(0xdb8f, 0xdb8f, 'MOVE 18 BYTES FROM ROM TO RAM'),
(0xdb92, 0xdb92, 'POINT U TO NEXT RAM DESTINATION'),
(0xdb95, 0xdb95, 'MOVE 4 MORE BYTES'),
(0xdb97, 0xdb97, 'MOVE 4 BYTES FROM ROM TO RAM'),
(0xdb9c, 0xdb9c, 'PUT RTS IN LINHDR-1'),
(0xdb9e, 0xdb9e, "G0 DO A 'NEW'"),
(0xdba1, 0xdba1, 'INITIALIZE ADDRESS OF START OF'),
(0xdba4, 0xdba4, 'USR JUMP TABLE'),
(0xdba6, 0xdba6, "ADDRESS OF 'FC ERROR' ROUTINE"),
(0xdba9, 0xdba9, '10 USR CALLS IN EX BASIC'),
(0xdbab, 0xdbab, "STORE 'FC' ERROR AT USR ADDRESSES"),
(0xdbad, 0xdbad, 'FINISHED ALL 10?'),
(0xdbae, 0xdbae, 'NO'),
(0xdbb0, 0xdbb0, 'DIV16 CLOCK -> 7372800 / 4 / 16 = 115200'),
(0xdbb5, 0xdbb5, 'POINT X TO COLOR BASIC COPYRIGHT MESSAGE'),
(0xdbb8, 0xdbb8, "PRINT 'COLOR BASIC'"),
(0xdbbb, 0xdbbb, 'WARM START ADDRESS'),
(0xdbbe, 0xdbbe, 'SAVE IT'),
(0xdbc0, 0xdbc0, 'WARM START FLAG'),
(0xdbc2, 0xdbc2, 'SAVE IT'),
(0xdbc4, 0xdbc4, "GO TO BASIC'S MAIN LOOP"),
(0xdbc6, 0xdbc6, 'FOR WARM START'),
(0xdbc7, 0xdbc7, 'DO PART OF A NEW'),
(0xdbca, 0xdbca, 'GO TO MAIN LOOP OF BASIC'),
(0xdbce, 0xdbce, 'TAB FIELD WIDTH'),
(0xdbcf, 0xdbcf, 'LAST TAB ZONE'),
(0xdbd0, 0xdbd0, 'PRINTER WIDTH'),
(0xdbd1, 0xdbd1, 'LINE PRINTER POSITION'),
(0xdbd2, 0xdbd2, "ARGUMENT OF EXEC COMMAND - SET TO 'FC' ERROR"),
(0xdbd4, 0xdbd4, '+1'),
(0xdbe0, 0xdbe0, 'IRQ SERVICE'),
(0xdbe3, 0xdbe3, 'FIRQ SERVICE'),
(0xdbe6, 0xdbe6, "USR ADDRESS FOR 8K BASIC (INITIALIZED TO 'FC' ERROR)"),
(0xdbe9, 0xdbe9, '*RANDOM SEED'),
(0xdbea, 0xdbea, '*RANDON SEED OF MANTISSA'),
(0xdbec, 0xdbec, '*.811635157'),
(0xdbee, 0xdbee, '50 BASIC COMMANDS'),
(0xdbef, 0xdbef, 'POINTS TO RESERVED WORDS'),
(0xdbf1, 0xdbf1, 'POINTS TO JUMP TABLE FOR COMMANDS'),
(0xdbf3, 0xdbf3, '29 BASIC SECONDARY COMMANDS'),
(0xdbf4, 0xdbf4, 'POINTS TO SECONDARY FUNCTION RESERVED WORDS'),
(0xdbf6, 0xdbf6, 'POINTS TO SECONDARY FUNCTION JUMP TABLE'),
(0xdbf8, 0xdbf8, 'NO MORE TABLES (RES WORDS=0)'),
(0xdbfa, 0xdbfa, 'NO MORE TABLES'),
(0xdbfc, 0xdbfc, 'NO MORE TABLES'),
(0xdbfe, 0xdbfe, 'NO MORE TABLES'),
(0xdc00, 0xdc00, 'NO MORE TABLES'),
(0xdc02, 0xdc02, 'NO MORE TABLES (SECONDARY FNS =0)'),
(0xdc04, 0xdc04, 'BASIC"'),
(0xdc18, 0xdc18, 'BY MICROSOFT"'),
(0xdc30, 0xdc30, 'SAVE REGISTERS'),
(0xdc32, 0xdc32, 'TAB FIELD WIDTH AND TAB ZONE'),
(0xdc34, 0xdc34, 'PRINTER WIDTH AND POSITION'),
(0xdc36, 0xdc36, 'SAVE TAB FIELD WIDTH AND ZONE'),
(0xdc38, 0xdc38, 'SAVE PRINT POSITION'),
(0xdc3a, 0xdc3a, 'SAVE PRINT WIDTH'),
(0xdc3c, 0xdc3c, 'RESTORE REGISTERS'),
(0xdc3e, 0xdc3e, 'RESET BREAK CHECK KEY TEMP KEY STORAGE'),
(0xdc40, 0xdc40, 'INPUT LINE BUFFER'),
(0xdc43, 0xdc43, 'ACCB CHAR COUNTER: SET TO 1 TO ALLOW A'),
(0xdc45, 0xdc45, 'GO GET A CHARACTER FROM CONSOLE IN'),
(0xdc48, 0xdc48, 'BACKSPACE'),
(0xdc4a, 0xdc4a, 'NO'),
(0xdc4c, 0xdc4c, 'YES - DECREMENT CHAR COUNTER'),
(0xdc4d, 0xdc4d, 'BRANCH IF BACK AT START OF LINE AGAIN'),
(0xdc4f, 0xdc4f, 'DECREMENT BUFFER POINTER'),
(0xdc51, 0xdc51, 'ECHO CHAR TO SCREEN'),
(0xdc53, 0xdc53, 'SHIFT RIGHT ARROW?'),
(0xdc55, 0xdc55, 'NO'),
(0xdc57, 0xdc57, 'DEC CHAR CTR'),
(0xdc58, 0xdc58, 'GO BACK TO START IF CHAR CTR = 0'),
(0xdc5a, 0xdc5a, 'BACKSPACE?'),
(0xdc5c, 0xdc5c, 'SEND TO CONSOLE OUT (SCREEN)'),
(0xdc5f, 0xdc5f, 'KEEP GOING'),
(0xdc61, 0xdc61, 'BREAK KEY?'),
(0xdc63, 0xdc63, 'SET CARRY FLAG'),
(0xdc65, 0xdc65, 'BRANCH IF BREAK KEY DOWN'),
(0xdc67, 0xdc67, 'ENTER KEY?'),
(0xdc69, 0xdc69, 'NO'),
(0xdc6b, 0xdc6b, 'CLEAR CARRY FLAG IF ENTER KEY - END LINE ENTRY'),
(0xdc6c, 0xdc6c, 'SAVE CARRY FLAG'),
(0xdc6e, 0xdc6e, 'SEND CR TO SCREEN'),
(0xdc71, 0xdc71, 'MAKE LAST BYTE IN INPUT BUFFER = 0'),
(0xdc73, 0xdc73, 'RESET INPUT BUFFER POINTER'),
(0xdc76, 0xdc76, 'RESTORE CARRY FLAG'),
(0xdc78, 0xdc78, 'IS IT CONTROL CHAR?'),
(0xdc7a, 0xdc7a, 'BRANCH IF CONTROL CHARACTER'),
(0xdc7e, 0xdc7e, '* IGNORE IF > LOWER CASE Z'),
(0xdc80, 0xdc80, 'HAVE 250 OR MORE CHARACTERS BEEN ENTERED?'),
(0xdc82, 0xdc82, 'YES, IGNORE ANY MORE'),
(0xdc84, 0xdc84, 'PUT IT IN INPUT BUFFER'),
(0xdc86, 0xdc86, 'INCREMENT CHARACTER COUNTER'),
(0xdc87, 0xdc87, 'ECHO IT TO SCREEN'),
(0xdc8a, 0xdc8a, 'GO SET SOME MORE'),
(0xdc8c, 0xdc8c, 'BRANCH IF NO ARGUMENT'),
(0xdc8e, 0xdc8e, 'EVALUATE ARGUMENT - ARGUMENT RETURNED IN X'),
(0xdc91, 0xdc91, 'STORE X TO EXEC JUMP ADDRESS'),
(0xdc93, 0xdc93, 'GO DO IT'),
(0xdc97, 0xdc97, 'GO DO BREAK KEY CHECK'),
(0xdc9a, 0xdc9a, 'WAS A KEY DOWN IN THE BREAK CHECK?'),
(0xdc9c, 0xdc9c, 'YES'),
(0xdc9e, 0xdc9e, 'GO GET A KEY'),
(0xdca1, 0xdca1, 'CLEAR INKEY RAM IMAGE'),
(0xdca3, 0xdca3, 'STORE THE KEY IN FPA0'),
(0xdca5, 0xdca5, 'CONVERT FPA0+3 TO A STRING'),
(0xdca9, 0xdca9, 'SET LENGTH OF STRING = 0 IF NO KEY DOWN'),
(0xdcab, 0xdcab, 'PUT A NULL STRING ONTO THE STRING STACK'),
(0xdcae, 0xdcae, 'GET BYTE FROM X'),
(0xdcb0, 0xdcb0, 'STORE IT AT U'),
(0xdcb2, 0xdcb2, 'MOVED ALL BYTES?'),
(0xdcb3, 0xdcb3, 'NO'),
(0xdcb7, 0xdcb7, 'GET CURRENT INPUT CHAR FROM BASIC LINE'),
(0xdcb9, 0xdcb9, 'RETURN IF END OF LINE'),
(0xdcbb, 0xdcbb, 'SYNTAX ERROR IF ANY MORE CHARACTERS'),
(0xdcbe, 0xdcbe, 'FROM INTERRUPT'),
(0xdcbf, 0xdcbf, 'IS THIS CHARACTER >=(ASCII 9)+1?'),
(0xdcc1, 0xdcc1, 'BRANCH IF > 9; Z SET IF = COLON'),
(0xdcc3, 0xdcc3, 'SPACE?'),
(0xdcc5, 0xdcc5, 'NO - SET CARRY IF NUMERIC'),
(0xdcc7, 0xdcc7, 'IF SPACE, GET NECT CHAR (IGNORE SPACES)'),
(0xdcc9, 0xdcc9, '* SET CARRY IF'),
(0xdccb, 0xdccb, '* CHARACTER > ASCII 0'),
(0xdcce, 0xdcce, 'SGN'),
(0xdcd0, 0xdcd0, 'INT'),
(0xdcd2, 0xdcd2, 'ABS'),
(0xdcd4, 0xdcd4, 'USR'),
(0x0083, 0x0083, '_TAB/2+$7F'),
(0xff83, 0xff83, 'C_TAB/2+$FF7F'),
(0xdcd6, 0xdcd6, 'RND'),
(0xdcd8, 0xdcd8, 'SIN'),
(0xdcda, 0xdcda, 'PEEK'),
(0xdcdc, 0xdcdc, 'LEN'),
(0xdcde, 0xdcde, 'STR$'),
(0xdce0, 0xdce0, 'VAL'),
(0xdce2, 0xdce2, 'ASC'),
(0xdce4, 0xdce4, 'CHR$'),
(0xdce6, 0xdce6, 'ATN'),
(0xdce8, 0xdce8, 'COS'),
(0xdcea, 0xdcea, 'TAN'),
(0xdcec, 0xdcec, 'EXP'),
(0xdcee, 0xdcee, 'FIX'),
(0xdcf0, 0xdcf0, 'LOG'),
(0xdcf2, 0xdcf2, 'POS'),
(0xdcf4, 0xdcf4, 'SQR'),
(0xdcf6, 0xdcf6, 'HEX$'),
(0xdcf8, 0xdcf8, 'LEFT$'),
(0x0095, 0x0095, '_TAB/2+$7F'),
(0xdcfa, 0xdcfa, 'RIGHT$'),
(0xdcfc, 0xdcfc, 'MID$'),
(0x0097, 0x0097, '_TAB/2+$7F'),
(0xdcfe, 0xdcfe, 'INKEY$'),
(0x0098, 0x0098, '_TAB/2+$7F'),
(0xdd00, 0xdd00, 'MEM'),
(0xdd02, 0xdd02, 'VARPTR'),
(0xdd04, 0xdd04, 'INSTR'),
(0xdd06, 0xdd06, 'STRING$'),
(0x001d, 0x001d, 'NC_TAB/2'),
(0xdd09, 0xdd09, '+'),
(0xdd0c, 0xdd0c, '-'),
(0xdd12, 0xdd12, '/'),
(0xdd15, 0xdd15, 'EXPONENTIATION'),
(0xdd18, 0xdd18, 'AND'),
(0xdd1b, 0xdd1b, 'OR'),
(0xdd1d, 0xdd1d, '80'),
(0xdd1f, 0xdd1f, "'"),
(0xdd20, 0xdd20, '81'),
(0xdd21, 0xdd21, "'"),
(0xdd22, 0xdd22, '82'),
(0xdd24, 0xdd24, "'"),
(0xdd25, 0xdd25, '83'),
(0xdd26, 0xdd26, '84'),
(0xdd29, 0xdd29, "'"),
(0xdd2a, 0xdd2a, '85'),
(0xdd2b, 0xdd2b, "'"),
(0xdd2c, 0xdd2c, '86'),
(0xdd2f, 0xdd2f, "'"),
(0xdd30, 0xdd30, '87'),
(0xdd34, 0xdd34, "'"),
(0xdd35, 0xdd35, '88'),
(0xdd36, 0xdd36, "'"),
(0xdd37, 0xdd37, '89'),
(0xdd3b, 0xdd3b, "'"),
(0xdd3c, 0xdd3c, '8A'),
(0xdd3e, 0xdd3e, "'"),
(0xdd3f, 0xdd3f, '8B'),
(0xdd42, 0xdd42, "'"),
(0xdd43, 0xdd43, '8C'),
(0xdd45, 0xdd45, "'"),
(0xdd46, 0xdd46, '8D'),
(0xdd49, 0xdd49, "'"),
(0xdd4a, 0xdd4a, '8E'),
(0xdd4c, 0xdd4c, "'"),
(0xdd4d, 0xdd4d, '8F'),
(0xdd53, 0xdd53, "'"),
(0xdd54, 0xdd54, '90'),
(0xdd59, 0xdd59, "'"),
(0xdd5a, 0xdd5a, '91'),
(0xdd5d, 0xdd5d, "'"),
(0xdd5e, 0xdd5e, '92'),
(0xdd61, 0xdd61, "'"),
(0xdd62, 0xdd62, '93'),
(0xdd65, 0xdd65, "'"),
(0xdd66, 0xdd66, '94'),
(0xdd69, 0xdd69, "'"),
(0xdd6a, 0xdd6a, '95'),
(0xdd6e, 0xdd6e, "'"),
(0xdd6f, 0xdd6f, '96'),
(0xdd71, 0xdd71, "'"),
(0xdd72, 0xdd72, '97'),
(0xdd75, 0xdd75, "'"),
(0xdd76, 0xdd76, '98'),
(0xdd79, 0xdd79, "'"),
(0xdd7a, 0xdd7a, '99'),
(0xdd7e, 0xdd7e, "'"),
(0xdd7f, 0xdd7f, '9A'),
(0xdd81, 0xdd81, "'"),
(0xdd82, 0xdd82, '9B'),
(0xdd84, 0xdd84, "'"),
(0xdd85, 0xdd85, '9C'),
(0xdd88, 0xdd88, "'"),
(0xdd89, 0xdd89, '9D'),
(0xdd8d, 0xdd8d, "'"),
(0xdd8e, 0xdd8e, '9E'),
(0xdd91, 0xdd91, "'"),
(0xdd92, 0xdd92, '9F'),
(0xdd95, 0xdd95, "'"),
(0xdd96, 0xdd96, 'A0'),
(0xdd97, 0xdd97, "'"),
(0xdd98, 0xdd98, 'A1'),
(0xdd9a, 0xdd9a, "'"),
(0xdd9b, 0xdd9b, 'A2'),
(0xdd9e, 0xdd9e, "'"),
(0xdd9f, 0xdd9f, 'A3'),
(0xdda1, 0xdda1, "'"),
(0xdda2, 0xdda2, 'A4'),
(0xdda5, 0xdda5, "'"),
(0xdda6, 0xdda6, 'A5'),
(0xdda8, 0xdda8, "'"),
(0xdda9, 0xdda9, 'A6'),
(0xddaa, 0xddaa, 'A7'),
(0xddab, 0xddab, 'A8'),
(0xddac, 0xddac, 'A9'),
(0xddad, 0xddad, 'AA'),
(0xddae, 0xddae, 'AB'),
(0xddb0, 0xddb0, "'"),
(0xddb1, 0xddb1, 'AC'),
(0xddb2, 0xddb2, "'"),
(0xddb3, 0xddb3, 'AD'),
(0xddb4, 0xddb4, 'AE'),
(0xddb5, 0xddb5, 'AF'),
(0xddb6, 0xddb6, 'B0'),
(0xddb7, 0xddb7, "'"),
(0xddb8, 0xddb8, 'B1'),
(0xddbc, 0xddbc, "'"),
(0xddbd, 0xddbd, '80'),
(0xddbf, 0xddbf, "'"),
(0xddc0, 0xddc0, '81'),
(0xddc2, 0xddc2, "'"),
(0xddc3, 0xddc3, '82'),
(0xddc5, 0xddc5, "'"),
(0xddc6, 0xddc6, '83'),
(0xddc8, 0xddc8, "'"),
(0xddc9, 0xddc9, '84'),
(0xddcb, 0xddcb, "'"),
(0xddcc, 0xddcc, '85'),
(0xddce, 0xddce, "'"),
(0xddcf, 0xddcf, '86'),
(0xddd2, 0xddd2, "'"),
(0xddd3, 0xddd3, '87'),
(0xddd5, 0xddd5, "'"),
(0xddd6, 0xddd6, '88'),
(0xddd9, 0xddd9, "'"),
(0xddda, 0xddda, '89'),
(0xdddc, 0xdddc, "'"),
(0xdddd, 0xdddd, '8A'),
(0xdddf, 0xdddf, "'"),
(0xdde0, 0xdde0, '8B'),
(0xdde3, 0xdde3, "'"),
(0xdde4, 0xdde4, '8C'),
(0xdde6, 0xdde6, "'"),
(0xdde7, 0xdde7, '8D'),
(0xdde9, 0xdde9, "'"),
(0xddea, 0xddea, '8E'),
(0xddec, 0xddec, "'"),
(0xdded, 0xdded, '8F'),
(0xddef, 0xddef, "'"),
(0xddf0, 0xddf0, '90'),
(0xddf2, 0xddf2, "'"),
(0xddf3, 0xddf3, '91'),
(0xddf5, 0xddf5, "'"),
(0xddf6, 0xddf6, '92'),
(0xddf8, 0xddf8, "'"),
(0xddf9, 0xddf9, '93'),
(0xddfb, 0xddfb, "'"),
(0xddfc, 0xddfc, '94'),
(0xddff, 0xddff, "'"),
(0xde00, 0xde00, '95'),
(0xde04, 0xde04, "'"),
(0xde05, 0xde05, '96'),
(0xde0a, 0xde0a, "'"),
(0xde0b, 0xde0b, '97'),
(0xde0e, 0xde0e, "'"),
(0xde0f, 0xde0f, '98'),
(0xde14, 0xde14, "'"),
(0xde15, 0xde15, '99'),
(0xde17, 0xde17, "'"),
(0xde18, 0xde18, '9A'),
(0xde1d, 0xde1d, "'"),
(0xde1e, 0xde1e, '9B'),
(0xde22, 0xde22, "'"),
(0xde23, 0xde23, '9C'),
(0xde29, 0xde29, "'"),
(0xde2a, 0xde2a, '80'),
(0xde2c, 0xde2c, '81'),
(0xde2e, 0xde2e, '82'),
(0x0082, 0x0082, 'TAB/2+$7F'),
(0xde30, 0xde30, "83 (')"),
(0x0083, 0x0083, '_TAB/2+$7F'),
(0xde32, 0xde32, '84 (ELSE)'),
(0x0084, 0x0084, 'TAB/2+$7F'),
(0xde34, 0xde34, '85'),
(0x0085, 0x0085, 'TAB/2+$7F'),
(0xde36, 0xde36, '86'),
(0x0086, 0x0086, 'TAB/2+$7F'),
(0xde38, 0xde38, '87'),
(0x0087, 0x0087, 'TAB/2+$7F'),
(0xde3a, 0xde3a, '88'),
(0xde3c, 0xde3c, '89'),
(0x0089, 0x0089, 'TAB/2+$7F'),
(0xde3e, 0xde3e, '8A'),
(0xde40, 0xde40, '8B'),
(0xde42, 0xde42, '8C'),
(0xde44, 0xde44, '8D'),
(0xde46, 0xde46, '8E'),
(0xde48, 0xde48, '8F'),
(0xde4a, 0xde4a, '90'),
(0xde4c, 0xde4c, '91'),
(0xde4e, 0xde4e, '92'),
(0xde50, 0xde50, '93'),
(0xde52, 0xde52, '94'),
(0xde54, 0xde54, '95'),
(0xde56, 0xde56, '96'),
(0xde58, 0xde58, '97'),
(0xde5a, 0xde5a, '98'),
(0xde5c, 0xde5c, '99'),
(0xde5e, 0xde5e, '9A'),
(0xde60, 0xde60, '9B'),
(0xde62, 0xde62, '9C'),
(0xde64, 0xde64, '9D'),
(0xde66, 0xde66, '9E'),
(0x009e, 0x009e, 'CMD_TAB/2+$7F'),
(0xde68, 0xde68, '0 NEXT WITHOUT FOR'),
(0xde6a, 0xde6a, '1 SYNTAX ERROR'),
(0xde6c, 0xde6c, '2 RETURN WITHOUT GOSUB'),
(0xde6e, 0xde6e, '3 OUT OF DATA'),
(0xde70, 0xde70, '4 ILLEGAL FUNCTION CALL'),
(0xde72, 0xde72, '5 OVERFLOW'),
(0xde74, 0xde74, '6 OUT OF MEMORY'),
(0xde76, 0xde76, '7 UNDEFINED LINE NUMBER'),
(0xde78, 0xde78, '8 BAD SUBSCRIPT'),
(0xde7a, 0xde7a, '9 REDIMENSIONED ARRAY'),
(0xde7c, 0xde7c, '10 DIVISION BY ZERO'),
(0xde7e, 0xde7e, '11 ILLEGAL DIRECT STATEMENT'),
(0xde80, 0xde80, '12 TYPE MISMATCH'),
(0xde82, 0xde82, '13 OUT OF STRING SPACE'),
(0xde84, 0xde84, '14 STRING TOO LONG'),
(0xde86, 0xde86, '15 STRING FORMULA TOO COMPLEX'),
(0xde88, 0xde88, "16 CAN'T CONTINUE"),
(0xde8a, 0xde8a, '17 BAD FILE DATA'),
(0xde8c, 0xde8c, '18 FILE ALREADY OPEN'),
(0xde8e, 0xde8e, '19 DEVICE NUMBER ERROR'),
(0xde90, 0xde90, '20 I/O ERROR'),
(0xde92, 0xde92, '21 BAD FILE MODE'),
(0xde94, 0xde94, '22 FILE NOT OPEN'),
(0xde96, 0xde96, '23 INPUT PAST END OF FILE'),
(0xde98, 0xde98, '24 DIRECT STATEMENT IN FILE'),
(0xde9a, 0xde9a, '25 UNDEFINED FUNCTION (FN) CALL'),
(0xde9c, 0xde9c, '26 FILE NOT FOUND'),
(0xde9e, 0xde9e, 'R"'),
(0xdeb0, 0xdeb0, '"'),
(0xdeb6, 0xdeb6, 'POINT X TO 3RD ADDRESS ON STACK - IGNORE THE'),
(0xdeb8, 0xdeb8, "18 BYTES SAVED ON STACK FOR EACH 'FOR' LOOP"),
(0xdeba, 0xdeba, 'SAVE POINTER'),
(0xdebc, 0xdebc, 'GET 1ST BYTE'),
(0xdebe, 0xdebe, '* CHECK FOR TYPE OF STACK JUMP FOUND'),
(0xdec0, 0xdec0, "* BRANCH IF NOT 'FOR/NEXT'"),
(0xdec2, 0xdec2, '= GET INDEX VARIABLE DESCRIPTOR'),
(0xdec4, 0xdec4, '= POINTER AND SAVE IT IN TMPTR1'),
(0xdec6, 0xdec6, 'GET INDEX VARIABLE BEING SEARCHED FOR'),
(0xdec8, 0xdec8, 'BRANCH IF DEFAULT INDEX VARIABLE - USE THE'),
(0xdeca, 0xdeca, 'DOES THE STACK INDEX MATCH THE ONE'),
(0xdecc, 0xdecc, 'YES'),
(0xdece, 0xdece, '* RESTORE INITIAL POINTER, ADD'),
(0xded0, 0xded0, '* 18 TO IT AND LOOK FOR'),
(0xded1, 0xded1, '* NEXT BLOCK OF DATA'),
(0xded3, 0xded3, '= GET 1ST INDEX VARIABLE FOUND AND'),
(0xded5, 0xded5, "= SAVE AS 'NEXT' INDEX"),
(0xded7, 0xded7, "POINT X TO START OF 'FOR/NEXT' DATA"),
(0xded9, 0xded9, "SET ZERO FLAG IF 'FOR/NEXT' DATA"),
(0xdedb, 0xdedb, 'ACCD = NEW BOTTOM OF FREE RAM - IS THERE'),
(0xdedd, 0xdedd, 'POINT U TO DESTINATION ADDRESS (V41)'),
(0xdedf, 0xdedf, 'ADD ONE TO U - COMPENSATE FOR FIRST PSHU'),
(0xdee1, 0xdee1, 'POINT X TO SOURCE ADDRESS (V43)'),
(0xdee3, 0xdee3, 'ADD ONE - COMPENSATE FOR FIRST LDA ,X'),
(0xdee5, 0xdee5, 'GRAB A BYTE FROM SOURCE'),
(0xdee7, 0xdee7, 'MOVE IT TO DESTINATION'),
(0xdee9, 0xdee9, 'DONE?'),
(0xdeeb, 0xdeeb, 'NO - KEEP MOVING BYTES'),
(0xdeed, 0xdeed, 'SAVE FINAL DESTINATION ADDRESS'),
(0xdef0, 0xdef0, '* ACCD CONTAINS NUMBER OF EXTRA'),
(0xdef1, 0xdef1, '* BYTES TO PUT ON STACK'),
(0xdef2, 0xdef2, 'END OF PROGRAM AND VARIABLES'),
(0xdef4, 0xdef4, 'ADD STACK BUFFER - ROOM FOR STACK?'),
(0xdef7, 0xdef7, 'BRANCH IF GREATER THAN $FFFF'),
(0xdef9, 0xdef9, 'CURRENT NEW BOTTOM OF STACK STACK POINTER'),
(0xdefc, 0xdefc, 'ARE WE GOING TO BE BELOW STACK?'),
(0xdeff, 0xdeff, 'YES - NO ERROR'),
(0xdf01, 0xdf01, 'OUT OF MEMORY ERROR'),
(0xdf03, 0xdf03, 'RESET STACK, STRING STACK, CONTINUE POINTER'),
(0xdf06, 0xdf06, 'SEND A CR TO SCREEN'),
(0xdf09, 0xdf09, "SEND A '?' TO SCREEN"),
(0xdf0c, 0xdf0c, 'POINT TO ERROR TABLE'),
(0xdf0f, 0xdf0f, 'ADD MESSAGE NUMBER OFFSET'),
(0xdf10, 0xdf10, '* GET TWO CHARACTERS FROM X AND'),
(0xdf12, 0xdf12, '* SEND TO CONSOLE OUT (SCREEN)'),
(0xdf14, 0xdf14, 'POINT TO "ERROR" MESSAGE'),
(0xdf17, 0xdf17, 'PRINT MESSAGE POINTED TO BY X'),
(0xdf1a, 0xdf1a, 'GET CURRENT LINE NUMBER (CURL IN)'),
(0xdf1c, 0xdf1c, 'TEST FOR DIRECT MODE'),
(0xdf1d, 0xdf1d, 'BRANCH IF DIRECT MODE'),
(0xdf1f, 0xdf1f, "PRINT 'IN ****'"),
(0xdf22, 0xdf22, 'MOVE CURSOR TO START OF LINE'),
(0xdf25, 0xdf25, "POINT X TO 'OK', CR MESSAGE"),
(0xdf28, 0xdf28, "PRINT 'OK', CR"),
(0xdf2b, 0xdf2b, 'GO GET AN INPUT LINE'),
(0xdf2e, 0xdf2e, 'THE LINE NUMBER FOR DIRECT MODE IS $FFFF'),
(0xdf31, 0xdf31, 'SAVE IT IN CURLIN'),
(0xdf33, 0xdf33, 'BRANCH IF LINE INPUT TERMINATED BY BREAK'),
(0xdf35, 0xdf35, 'SAVE (X) AS CURRENT INPUT POINTER - THIS WILL'),
(0xdf37, 0xdf37, 'GET NEXT CHARACTER FROM BASIC'),
(0xdf39, 0xdf39, 'NO LINE INPUT - GET ANOTHER LINE'),
(0xdf3b, 0xdf3b, 'BRANCH IF NUMER1C - THERE WAS A LINE NUMBER BEFORE'),
(0xdf3d, 0xdf3d, 'GO CRUNCH LINE'),
(0xdf40, 0xdf40, 'GO EXECUTE THE STATEMENT (LIVE KEYBOARD)'),
(0xdf43, 0xdf43, 'GET A CHARACTER'),
(0xdf45, 0xdf45, 'SEND TO CONSOLE OUT'),
(0xdf48, 0xdf48, 'CONVERT LINE NUMBER TO BINARY'),
(0xdf4b, 0xdf4b, 'GET CONVERTED LINE NUMBER'),
(0xdf4d, 0xdf4d, 'STORE IT IN LINE INPUT HEADER'),
(0xdf4f, 0xdf4f, 'GO CRUNCH THE LINE'),
(0xdf52, 0xdf52, 'SAVE LINE LENGTH'),
(0xdf54, 0xdf54, 'FIND OUT WHERE TO INSERT LINE'),
(0xdf56, 0xdf56, 'BRANCH IF LINE NUMBER DOES NOT ALREADY EXIST'),
(0xdf58, 0xdf58, 'GET ABSOLUTE ADDRESS OF LINE NUMBER'),
(0xdf5a, 0xdf5a, 'SUBTRACT ADDRESS OF NEXT LINE NUMBER'),
(0xdf5c, 0xdf5c, '* ADD TO CURRENT END OF PROGRAM - THIS WILL REMOVE'),
(0xdf5e, 0xdf5e, '* THE LENGTH OF THIS LINE NUMBER FROM THE PROGRAM'),
(0xdf60, 0xdf60, 'POINT U TO ADDRESS OF NEXT LINE NUMBER'),
(0xdf62, 0xdf62, "GET A BYTE FROM WHAT'S LEFT OF PROGRAM"),
(0xdf64, 0xdf64, 'MOVE IT DOWN'),
(0xdf66, 0xdf66, 'COMPARE TO END OF BASIC PROGRAM'),
(0xdf68, 0xdf68, 'BRANCH IF NOT AT END'),
(0xdf6a, 0xdf6a, '* CHECK TO SEE IF THERE IS A LINE IN'),
(0xdf6c, 0xdf6c, '* THE BUFFER AND BRANCH IF NONE'),
(0xdf6e, 0xdf6e, '= SAVE CURRENT END OF'),
(0xdf70, 0xdf70, '= PROGRAM IN V43'),
(0xdf72, 0xdf72, '* ADD LENGTH OF CRUNCHED LINE,'),
(0xdf74, 0xdf74, '* PROPOGATE CARRY AND SAVE NEW END'),
(0xdf76, 0xdf76, '* OF PROGRAM IN V41'),
(0xdf78, 0xdf78, "= MAKE SURE THERE'S ENOUGH RAM FOR THIS"),
(0xdf7b, 0xdf7b, 'POINT U TO LINE TO BE INSERTED'),
(0xdf7e, 0xdf7e, 'GET A BYTE FROM NEW LINE'),
(0xdf80, 0xdf80, 'INSERT IT IN PROGRAM'),
(0xdf82, 0xdf82, '* COMPARE TO ADDRESS OF END OF INSERTED'),
(0xdf84, 0xdf84, '* LINE AND BRANCH IF NOT DONE'),
(0xdf86, 0xdf86, '= GET AND SAVE'),
(0xdf88, 0xdf88, '= END OF PROGRAM'),
(0xdf8a, 0xdf8a, 'RESET INPUT POINTER, CLEAR VARIABLES, INITIALIZE'),
(0xdf8c, 0xdf8c, 'ADJUST START OF NEXT LINE ADDRESSES'),
(0xdf8e, 0xdf8e, "REENTER BASIC'S INPUT LOOP"),
(0xdf90, 0xdf90, 'POINT X TO START OF PROGRAM'),
(0xdf92, 0xdf92, 'GET ADDRESS OF NEXT LINE'),
(0xdf94, 0xdf94, 'RETURN IF END OF PROGRAM'),
(0xdf96, 0xdf96, 'POINT U TO START OF BASIC TEXT IN LINE'),
(0xdf98, 0xdf98, '* SKIP THROUGH THE LINE UNTIL A'),
(0xdf9a, 0xdf9a, '* ZERO (END OF LINE) IS FOUND'),
(0xdf9c, 0xdf9c, 'SAVE THE NEW START OF NEXT LINE ADDRESS'),
(0xdf9e, 0xdf9e, 'POINT X TO START OF NEXT LINE'),
(0xdfa0, 0xdfa0, 'KEEP GOING'),
(0xdfa2, 0xdfa2, 'GET THE LINE NUMBER TO FIND'),
(0xdfa4, 0xdfa4, 'BEGINNING OF PROGRAM'),
(0xdfa6, 0xdfa6, 'GET ADDRESS OF NEXT LINE NUMBER'),
(0xdfa8, 0xdfa8, 'BRANCH IF END OF PROG'),
(0xdfaa, 0xdfaa, 'IS IT A MATCH?'),
(0xdfad, 0xdfad, 'CARRY SET IF LOWER; CARRY CLEAR IF MATCH'),
(0xdfaf, 0xdfaf, 'X = ADDRESS OF NEXT LINE'),
(0xdfb1, 0xdfb1, 'KEEP LOOPING FOR LINE NUMBER'),
(0xdfb3, 0xdfb3, 'SET CARRY FLAG'),
(0xdfb5, 0xdfb5, 'SAVE MATCH LINE NUMBER OR NUMBER OF LINE JUST AFTER'),
(0xdfb8, 0xdfb8, 'BRANCH IF ARGUMENT GIVEN'),
(0xdfba, 0xdfba, 'GET START OF BASIC'),
(0xdfbc, 0xdfbc, '* PUT 2 ZERO BYTES THERE - ERASE'),
(0xdfbe, 0xdfbe, '* THE BASIC PROGRAM'),
(0xdfc0, 0xdfc0, 'AND THE NEXT ADDRESS IS NOW THE END OF PROGRAM'),
(0xdfc2, 0xdfc2, 'GET START OF BASIC'),
(0xdfc4, 0xdfc4, 'PUT INPUT POINTER ONE BEFORE START OF BASIC'),
(0xdfc7, 0xdfc7, '* RESET START OF STRING VARIABLES'),
(0xdfc9, 0xdfc9, '* TO TOP OF STRING SPACE'),
(0xdfcb, 0xdfcb, "RESET 'DATA' POINTER TO START OF BASIC"),
(0xdfce, 0xdfce, '* GET START OF VARIABLES AND USE IT'),
(0xdfd0, 0xdfd0, '* TO RESET START OF ARRAYS'),
(0xdfd2, 0xdfd2, 'RESET END OF ARRAYS'),
(0xdfd4, 0xdfd4, '* RESET STRING STACK POINTER TO'),
(0xdfd7, 0xdfd7, '* BOTTOM OF STRING STACK'),
(0xdfd9, 0xdfd9, 'GET RETURN ADDRESS OFF STACK'),
(0xdfdb, 0xdfdb, 'RESTORE STACK POINTER'),
(0xdfde, 0xdfde, 'PUT A ZERO BYTE ON STACK - TO CLEAR ANY RETURN OF'),
(0xdfe0, 0xdfe0, "RESET 'CONT' ADDRESS SO YOU"),
(0xdfe2, 0xdfe2, "'CAN'T CONTINUE'"),
(0xdfe4, 0xdfe4, 'CLEAR THE ARRAY DISABLE FLAG'),
(0xdfe6, 0xdfe6, 'RETURN TO CALLING ROUTINE - THIS IS NECESSARY'),
(0xdfe8, 0xdfe8, '* SAVE THE DISABLE ARRAY FLAG IN VO8'),
(0xdfea, 0xdfea, '* DO NOT ALLOW THE INDEX VARIABLE TO BE AN ARRAY'),
(0xdfec, 0xdfec, 'SET INDEX VARIABLE TO INITIAL VALUE'),
(0xdfef, 0xdfef, "SEARCH THE STACK FOR 'FOR/NEXT' DATA"),
(0xdff2, 0xdff2, 'PURGE RETURN ADDRESS OFF OF THE STACK'),
(0xdff4, 0xdff4, 'BRANCH IF INDEX VARIABLE NOT ALREADY BEING USED'),
(0xdff6, 0xdff6, "GET (ADDRESS + 18) OF MATCHED 'FOR/NEXT' DATA"),
(0xdff8, 0xdff8, 'MOVE THE STACK POINTER TO THE BEGINNING OF THE'),
(0xdffa, 0xdffa, '* CHECK FOR ROOM FOR 18 BYTES'),
(0xdffc, 0xdffc, '* IN FREE RAM'),
(0xdfff, 0xdfff, 'GET ADDR OF END OF SUBLINE IN X'),
(0xe002, 0xe002, 'GET CURRENT LINE NUMBER'),
(0xe004, 0xe004, 'SAVE LINE ADDR AND LINE NUMBER ON STACK'),
(0xe006, 0xe006, "TOKEN FOR 'TO'"),
(0xe008, 0xe008, "SYNTAX CHECK FOR 'TO'"),
(0xe00b, 0xe00b, "'TM' ERROR IF INDEX VARIABLE SET TO STRING"),
(0xe00e, 0xe00e, 'EVALUATE EXPRESSION'),
(0xe011, 0xe011, 'GET FPA0 MANTISSA SIGN'),
(0xe013, 0xe013, 'FORM A MASK TO SAVE DATA BITS OF HIGH ORDER MANTISSA'),
(0xe015, 0xe015, 'PUT THE MANTISSA SIGN IN BIT 7 OF HIGH ORDER MANTISSA'),
(0xe017, 0xe017, 'SAVE THE PACKED HIGH ORDER MANTISSA'),
(0xe019, 0xe019, 'LOAD FOLLOWING ADDRESS INTO Y AS A RETURN'),
(0xe01d, 0xe01d, 'ADDRESS - PUSH FPA0 ONTO THE STACK'),
(0xe020, 0xe020, 'POINT X TO FLOATING POINT NUMBER 1.0 (DEFAULT STEP VALUE)'),
(0xe023, 0xe023, 'MOVE (X) TO FPA0'),
(0xe026, 0xe026, 'GET CURRENT INPUT CHARACTER'),
(0xe028, 0xe028, 'STEP TOKEN'),
(0xe02a, 0xe02a, "BRANCH IF NO 'STEP' VALUE"),
(0xe02c, 0xe02c, 'GET A CHARACTER FROM BASIC'),
(0xe02e, 0xe02e, 'EVALUATE NUMERIC EXPRESSION'),
(0xe031, 0xe031, 'CHECK STATUS OF FPA0'),
(0xe034, 0xe034, 'SAVE STATUS AND FPA0 ON THE STACK'),
(0xe037, 0xe037, "* GET DESCRIPTOR POINTER FOR THE 'STEP'"),
(0xe039, 0xe039, '* VARIABLE AND SAVE IT ON THE STACK'),
(0xe03b, 0xe03b, "= GET THE 'FOR' FLAG AND"),
(0xe03d, 0xe03d, '= SAVE IT ON THE STACK'),
(0xe03f, 0xe03f, 'ENABLE IRQ,FIRQ'),
(0xe041, 0xe041, 'CHECK FOR KEYBOARD BREAK'),
(0xe043, 0xe043, "GET BASIC'S INPUT POINTER"),
(0xe045, 0xe045, 'SAVE IT'),
(0xe047, 0xe047, 'GET CURRENT INPUT CHAR & MOVE POINTER'),
(0xe049, 0xe049, 'BRANCH IF END OF LINE'),
(0xe04b, 0xe04b, 'CHECK FOR LINE SEPARATOR'),
(0xe04d, 0xe04d, 'BRANCH IF COLON'),
(0xe04f, 0xe04f, "'SYNTAX ERROR'-IF NOT LINE SEPARATOR"),
(0xe052, 0xe052, 'GET MS BYTE OF ADDRESS OF NEXT BASIC LINE'),
(0xe054, 0xe054, 'SAVE IN STOP/END FLAG - CAUSE A STOP IF'),
(0xe056, 0xe056, "BRANCH TO 'STOP' - END OF PROGRAM"),
(0xe058, 0xe058, 'GET CURRENT LINE NUMBER'),
(0xe05a, 0xe05a, 'SAVE IN CURLIN'),
(0xe05c, 0xe05c, 'SAVE ADDRESS OF FIRST BYTE OF LINE'),
(0xe05e, 0xe05e, 'TEST THE TRACE FLAG'),
(0xe060, 0xe060, 'BRANCH IF TRACE OFF'),
(0xe062, 0xe062, '<LEFT HAND MARKER FOR TRON LINE NUMBER'),
(0xe064, 0xe064, 'OUTPUT A CHARACTER'),
(0xe067, 0xe067, 'GET MS BYTE OF LINE NUMBER'),
(0xe069, 0xe069, 'CONVERT ACCD TO DECIMAL AND PRINT ON SCREEN'),
(0xe06c, 0xe06c, '> RIGHT HAND MARKER FOR TRON LINE NUMBER'),
(0xe06e, 0xe06e, 'OUTPUT A CHARACTER'),
(0xe071, 0xe071, 'GET A CHARACTER FROM BASIC'),
(0xe073, 0xe073, 'GO PROCESS COMMAND'),
(0xe075, 0xe075, 'GO BACK TO MAIN LOOP'),
(0xe077, 0xe077, 'RETURN IF END OF LINE (RTS - was BEQ LAE40)'),
(0xe079, 0xe079, 'CHECK FOR TOKEN - BIT 7 SET (NEGATIVE)'),
(0xe07a, 0xe07a, "BRANCH IF NOT A TOKEN - GO DO A 'LET' WHICH"),
(0xe07e, 0xe07e, 'SECONDARY TOKEN'),
(0xe082, 0xe082, 'SKIPF TOKEN - HIGHEST EXECUTABLE COMMAND IN BASIC'),
(0xe084, 0xe084, "'SYNTAX ERROR' IF NON-EXECUTABLE TOKEN"),
(0xe086, 0xe086, "GET ADDRESS OF BASIC'S COMMAND TABLE"),
(0xe089, 0xe089, 'X2 (2 BYTE/JUMP ADDRESS) & DISCARD BIT 7'),
(0xe08a, 0xe08a, 'SAVE COMMAND OFFSET IN ACCB'),
(0xe08c, 0xe08c, 'NON X POINTS TO COMMAND JUMP ADDR'),
(0xe08d, 0xe08d, 'GET AN INPUT CHAR'),
(0xe08f, 0xe08f, 'GO DO A COMMAND'),
(0xe091, 0xe091, 'GET AN INPUT CHAR'),
(0xe093, 0xe093, 'TOKEN FOR "MID$"'),
(0xe095, 0xe095, 'PROCESS MID$ REPLACEMENT'),
(0xe099, 0xe099, 'SYNTAX ERROR'),
(0xe09c, 0xe09c, 'BEGINNING OF PROGRAM ADDRESS'),
(0xe09e, 0xe09e, 'MOVE TO ONE BYTE BEFORE PROGRAM'),
(0xe0a0, 0xe0a0, 'SAVE NEW DATA POINTER'),
(0xe0a3, 0xe0a3, 'GET A KEYSTROKE ENTRY'),
(0xe0a6, 0xe0a6, 'RETURN IF NO INPUT'),
(0xe0a8, 0xe0a8, 'CONTROL C? (BREAK)'),
(0xe0aa, 0xe0aa, 'YES'),
(0xe0ac, 0xe0ac, 'CONTROL S? (PAUSE)'),
(0xe0ae, 0xe0ae, 'YES'),
(0xe0b0, 0xe0b0, 'SAVE KEYSTROKE IN INKEY IMAGE'),
(0xe0b3, 0xe0b3, 'GET A KEY'),
(0xe0b6, 0xe0b6, 'BRANCH IF NO KEY DOWN'),
(0xe0b8, 0xe0b8, 'CONTINUE - DO A BREAK CHECK'),
(0xe0ba, 0xe0ba, 'GET CURRENT INPUT CHAR'),
(0xe0be, 0xe0be, 'SET CARRY FLAG'),
(0xe0c0, 0xe0c0, 'BRANCH IF ARGUMENT EXISTS'),
(0xe0c2, 0xe0c2, '* SAVE CURRENT POSITION OF'),
(0xe0c4, 0xe0c4, "* BASIC'S INPUT POINTER"),
(0xe0c6, 0xe0c6, 'ROTATE CARRY INTO BIT 7 OF STOP/END FLAG'),
(0xe0c8, 0xe0c8, 'PURGE RETURN ADDRESS OFF STACK'),
(0xe0ca, 0xe0ca, 'GET CURRENT LINE NUMBER'),
(0xe0cc, 0xe0cc, 'DIRECT MODE?'),
(0xe0cf, 0xe0cf, 'YES'),
(0xe0d1, 0xe0d1, 'SAVE CURRENT LINE NUMBER'),
(0xe0d3, 0xe0d3, '* GET AND SAVE CURRENT POSITION'),
(0xe0d5, 0xe0d5, "* OF BASIC'S INPUT POINTER"),
(0xe0d7, 0xe0d7, "POINT TO CR, 'BREAK' MESSAGE"),
(0xe0da, 0xe0da, 'CHECK STOP/END FLAG'),
(0xe0dc, 0xe0dc, 'BRANCH TO MAIN LOOP OF BASIC IF END'),
(0xe0e0, 0xe0e0, "PRINT 'BREAK AT ####' AND GO TO"),
(0xe0e3, 0xe0e3, 'RETURN IF ARGUMENT GIVEN'),
(0xe0e5, 0xe0e5, "'CAN'T CONTINUE' ERROR"),
(0xe0e7, 0xe0e7, 'GET CONTINUE ADDRESS (INPUT POINTER)'),
(0xe0e9, 0xe0e9, "'CN' ERROR IF CONTINUE ADDRESS = 0"),
(0xe0ed, 0xe0ed, "RESET BASIC'S INPUT POINTER"),
(0xe0ef, 0xe0ef, 'GET LINE NUMBER'),
(0xe0f1, 0xe0f1, 'RESET CURRENT LINE NUMBER'),
(0xe0f4, 0xe0f4, 'BRANCH IF NO ARGUMENT'),
(0xe0f6, 0xe0f6, 'EVALUATE ARGUMENT'),
(0xe0f9, 0xe0f9, 'SAVE AMOUNT OF STRING SPACE ON STACK'),
(0xe0fb, 0xe0fb, 'GET CURRENT TOP OF CLEARED SPACE'),
(0xe0fd, 0xe0fd, 'GET CURRENT INPUT CHARACTER'),
(0xe0ff, 0xe0ff, 'BRANCH IF NO NEW TOP OF CLEARED SPACE'),
(0xe101, 0xe101, 'SYNTAX CHECK FOR COMMA'),
(0xe104, 0xe104, 'EVALUATE EXPRESSlON; RETURN VALUE IN X'),
(0xe107, 0xe107, 'X = TOP OF CLEARED SPACE'),
(0xe109, 0xe109, 'COMPARE TO TOP OF RAM'),
(0xe10b, 0xe10b, "'OM' ERROR IF > TOP OF RAM"),
(0xe10d, 0xe10d, 'ACCD = TOP OF CLEARED SPACE'),
(0xe10f, 0xe10f, 'SUBTRACT OUT AMOUNT OF CLEARED SPACE'),
(0xe111, 0xe111, "'OM' ERROR IF FREE MEM < 0"),
(0xe113, 0xe113, 'U = BOTTOM OF CLEARED SPACE'),
(0xe115, 0xe115, 'SUBTRACT OUT STACK BUFFER'),
(0xe118, 0xe118, "'OM' ERROR IF FREE MEM < 0"),
(0xe11a, 0xe11a, 'SUBTRACT OUT START OF VARIABLES'),
(0xe11c, 0xe11c, "'OM' ERROR IF FREE MEM < 0"),
(0xe11e, 0xe11e, 'SAVE NEW BOTTOM OF CLEARED SPACE'),
(0xe120, 0xe120, 'SAVE NEW TOP OF CLEARED SPACE'),
(0xe122, 0xe122, 'ERASE ALL VARIABLES, INITIALIZE POINTERS, ETC'),
(0xe125, 0xe125, "'OM' ERROR"),
(0xe128, 0xe128, '* GET CURRENT INPUT CHARACTER'),
(0xe12a, 0xe12a, '* IF NO LINE NUMBER'),
(0xe12e, 0xe12e, 'ERASE ALL VARIABLES'),
(0xe131, 0xe131, "'GOTO' THE RUN ADDRESS"),
(0xe133, 0xe133, 'SAVE INPUT CHARACTER IN ACCB'),
(0xe135, 0xe135, 'GET A CHARACTER FROM BASIC'),
(0xe137, 0xe137, "'TO' TOKEN"),
(0xe139, 0xe139, 'BRANCH IF GOTO'),
(0xe13b, 0xe13b, "'SUB' TOKEN"),
(0xe13d, 0xe13d, "'SYNTAX ERROR' IF NEITHER"),
(0xe13f, 0xe13f, '=ROOM FOR 6'),
(0xe141, 0xe141, '=BYTES ON STACK?'),
(0xe144, 0xe144, '* SAVE CURRENT BASIC INPUT POINTER, LINE'),
(0xe146, 0xe146, '* NUMBER AND SUB TOKEN ON STACK'),
(0xe14c, 0xe14c, "GO DO A 'GOTO'"),
(0xe14e, 0xe14e, "JUMP BACK TO BASIC'S MAIN LOOP"),
(0xe151, 0xe151, 'GET CURRENT INPUT CHAR'),
(0xe153, 0xe153, 'GET LINE NUMBER TO BINARY IN BINVAL'),
(0xe156, 0xe156, "ADVANCE BASIC'S POINTER TO END OF LINE"),
(0xe158, 0xe158, 'POINT TO START OF NEXT LINE'),
(0xe15a, 0xe15a, 'GET THE LINE NUMBER TO RUN'),
(0xe15c, 0xe15c, 'COMPARE TO CURRENT LINE NUMBER'),
(0xe15f, 0xe15f, "IF REO'D LINE NUMBER IS > CURRENT LINE NUMBER,"),
(0xe161, 0xe161, 'BEGINNING OF PROGRAM'),
(0xe163, 0xe163, 'GO FIND A LINE NUMBER'),
(0xe166, 0xe166, "'UNDEFINED LINE NUMBER'"),
(0xe168, 0xe168, 'MOVE BACK TO JUST BEFORE START OF LINE'),
(0xe16a, 0xe16a, "RESET BASIC'S INPUT POINTER"),
(0xe16d, 0xe16d, 'EXIT ROUTINE IF ARGUMENT GIVEN'),
(0xe16f, 0xe16f, '* PUT AN ILLEGAL VARIABLE NAME IN FIRST BYTE OF'),
(0xe171, 0xe171, "* VARDES WHICH WILL CAUSE 'FOR/NEXT' DATA ON THE"),
(0xe173, 0xe173, 'CHECK FOR RETURN DATA ON THE STACK'),
(0xe176, 0xe176, 'RESET STACK POINTER - PURGE TWO RETURN ADDRESSES'),
(0xe178, 0xe178, 'SUB TOKEN - $80'),
(0xe17a, 0xe17a, "BRANCH IF 'RETURN' FROM SUBROUTINE"),
(0xe17c, 0xe17c, "ERROR #2 'RETURN WITHOUT GOSUB'"),
(0xe17e, 0xe17e, 'SKIP TWO BYTES'),
(0xe17f, 0xe17f, "ERROR #7 'UNDEFINED LINE NUMBER'"),
(0xe181, 0xe181, 'JUMP TO ERROR HANDLER'),
(0xe184, 0xe184, "'SYNTAX ERROR'"),
(0xe187, 0xe187, '* RESTORE VALUES OF CURRENT LINE NUMBER AND'),
(0xe189, 0xe189, "* BASIC'S INPUT POINTER FOR THIS SUBROUTINE"),
(0xe18b, 0xe18b, '* AND LOAD ACCA WITH SUB TOKEN ($A6)'),
(0xe18d, 0xe18d, 'MOVE INPUT POINTER TO END OF SUBLINE OR LINE'),
(0xe18f, 0xe18f, 'SKIP 2 BYTES'),
(0xe190, 0xe190, 'MOVE INPUT POINTER TO END OF LINE'),
(0xe192, 0xe192, "RESET BASIC'S INPUT POINTER"),
(0xe195, 0xe195, 'COLON = SUBLINE TERMINATOR CHARACTER'),
(0xe197, 0xe197, 'SKPILD SKIP ONE BYTE; LDA #$5F'),
(0xe198, 0xe198, '0 = LINE TERMINATOR CHARACTER'),
(0xe199, 0xe199, 'TEMP STORE PRIMARY TERMINATOR CHARACTER'),
(0xe19b, 0xe19b, '0 (END OF LINE) = ALTERNATE TERM. CHAR.'),
(0xe19c, 0xe19c, "LOAD X W/BASIC'S INPUT POINTER"),
(0xe19e, 0xe19e, '* CHANGE TERMINATOR CHARACTER'),
(0xe1a0, 0xe1a0, '* FROM ACCB TO CHARAC - SAVE OLD TERMINATOR'),
(0xe1a2, 0xe1a2, 'SWAP PRIMARY AND SECONDARY TERMINATORS'),
(0xe1a4, 0xe1a4, 'GET NEXT INPUT CHARACTER'),
(0xe1a6, 0xe1a6, 'RETURN IF 0 (END OF LINE)'),
(0xe1a8, 0xe1a8, 'SAVE TERMINATOR ON STACK'),
(0xe1aa, 0xe1aa, 'COMPARE TO INPUT CHARACTER'),
(0xe1ac, 0xe1ac, 'RETURN IF EQUAL'),
(0xe1ae, 0xe1ae, 'MOVE POINTER UP ONE'),
(0xe1b0, 0xe1b0, 'CHECK FOR DOUBLE QUOTES'),
(0xe1b2, 0xe1b2, 'BRANCH IF " - TOGGLE TERMINATOR CHARACTERS'),
(0xe1b4, 0xe1b4, '* CHECK FOR $FF AND BRANCH IF'),
(0xe1b5, 0xe1b5, '* NOT SECONDARY TOKEN'),
(0xe1b7, 0xe1b7, 'MOVE INPUT POINTER 1 MORE IF SECONDARY'),
(0xe1b9, 0xe1b9, 'TOKEN FOR IF?'),
(0xe1bb, 0xe1bb, 'NO - GET ANOTHER INPUT CHARACTER'),
(0xe1bd, 0xe1bd, 'INCREMENT IF COUNTER - KEEP TRACK OF HOW MANY'),
(0xe1bf, 0xe1bf, 'GET ANOTHER INPUT CHARACTER'),
(0xe1c1, 0xe1c1, 'EVALUATE NUMERIC EXPRESSION'),
(0xe1c4, 0xe1c4, 'GET CURRENT INPUT CHARACTER'),
(0xe1c6, 0xe1c6, 'TOKEN FOR GO'),
(0xe1c8, 0xe1c8, "TREAT 'GO' THE SAME AS 'THEN'"),
(0xe1ca, 0xe1ca, 'TOKEN FOR THEN'),
(0xe1cc, 0xe1cc, 'DO A SYNTAX CHECK ON ACCB'),
(0xe1cf, 0xe1cf, 'CHECK FOR TRUE/FALSE - FALSE IF FPA0 EXPONENT = ZERO'),
(0xe1d1, 0xe1d1, 'BRANCH IF CONDITION TRUE'),
(0xe1d3, 0xe1d3, 'CLEAR FLAG - KEEP TRACK OF WHICH NESTED ELSE STATEMENT'),
(0xe1d5, 0xe1d5, "MOVE BASIC'S POINTER TO END OF SUBLINE"),
(0xe1d7, 0xe1d7, '* CHECK TO SEE IF END OF LINE OR SUBLINE'),
(0xe1d8, 0xe1d8, '* AND RETURN IF END OF LINE'),
(0xe1da, 0xe1da, 'GET AN INPUT CHARACTER FROM BASIC'),
(0xe1dc, 0xe1dc, 'TOKEN FOR ELSE'),
(0xe1de, 0xe1de, "IGNORE ALL DATA EXCEPT 'ELSE' UNTIL"),
(0xe1e0, 0xe1e0, 'CHECK TO SEE IF YOU MUST SEARCH ANOTHER SUBLINE'),
(0xe1e2, 0xe1e2, "BRANCH TO SEARCH ANOTHER SUBLINE FOR 'ELSE'"),
(0xe1e4, 0xe1e4, 'GET AN INPUT CHARACTER FROM BASIC'),
(0xe1e6, 0xe1e6, 'GET CURRENT INPUT CHARACTER'),
(0xe1e8, 0xe1e8, "BRANCH TO 'GOTO' IF NUMERIC CHARACTER"),
(0xe1ec, 0xe1ec, 'RETURN TO MAIN INTERPRETATION LOOP'),
(0xe1ef, 0xe1ef, 'EVALUATE EXPRESSION'),
(0xe1f2, 0xe1f2, 'TOKEN FOR GO'),
(0xe1f4, 0xe1f4, 'SYNTAX CHECK FOR GO'),
(0xe1f7, 0xe1f7, 'SAVE NEW TOKEN (TO,SUB)'),
(0xe1f9, 0xe1f9, 'TOKEN FOR SUB?'),
(0xe1fb, 0xe1fb, 'YES'),
(0xe1fd, 0xe1fd, 'TOKEN FOR TO?'),
(0xe1ff, 0xe1ff, "'SYNTAX' ERROR IF NOT 'SUB' OR 'TO'"),
(0xe201, 0xe201, 'DECREMENT IS BYTE OF MANTISSA OF FPA0 - THIS'),
(0xe203, 0xe203, 'BRANCH IF NOT AT THE PROPER GOTO OR GOSUB LINE NUMBER'),
(0xe205, 0xe205, "GET BACK THE TOKEN FOLLOWING 'GO'"),
(0xe207, 0xe207, "GO DO A 'GOTO' OR 'GOSUB'"),
(0xe20a, 0xe20a, 'GET A CHARACTER FROM BASIC'),
(0xe20c, 0xe20c, 'CONVERT BASIC LINE NUMBER TO BINARY'),
(0xe20e, 0xe20e, 'IS CHARACTER FOLLOWING LINE NUMBER A COMMA?'),
(0xe210, 0xe210, 'YES'),
(0xe212, 0xe212, 'IF NOT, FALL THROUGH TO NEXT COMMAND'),
(0xe214, 0xe214, 'DEFAULT LINE NUMBER OF ZERO'),
(0xe216, 0xe216, 'SAVE IT IN BINVAL'),
(0xe218, 0xe218, 'RETURN IF NOT NUMERIC CHARACTER'),
(0xe21a, 0xe21a, 'MASK OFF ASCII'),
(0xe21c, 0xe21c, 'SAVE DIGIT IN VO1'),
(0xe21e, 0xe21e, 'GET ACCUMULATED LINE NUMBER VALUE'),
(0xe220, 0xe220, 'LARGEST LINE NUMBER IS $F9FF (63999) -'),
(0xe222, 0xe222, "'SYNTAX' ERROR IF TOO BIG"),
(0xe225, 0xe225, '* TIMES 2'),
(0xe226, 0xe226, '='),
(0xe227, 0xe227, '= TIMES 4'),
(0xe228, 0xe228, 'ADD 1 = TIMES 5'),
(0xe22b, 0xe22b, '* TIMES 10'),
(0xe22c, 0xe22c, 'ADD NEXT DIGIT'),
(0xe22e, 0xe22e, 'PROPAGATE CARRY'),
(0xe230, 0xe230, 'SAVE NEW ACCUMULATED LINE NUMBER'),
(0xe232, 0xe232, 'GET NEXT CHARACTER FROM BASIC'),
(0xe234, 0xe234, 'LOOP- PROCESS NEXT DIGIT'),
(0xe236, 0xe236, 'FIND TARGET VARIABLE DESCRIPTOR'),
(0xe239, 0xe239, 'SAVE DESCRIPTOR ADDRESS OF 1ST EXPRESSION'),
(0xe23b, 0xe23b, 'TOKEN FOR "="'),
(0xe23d, 0xe23d, "DO A SYNTAX CHECK FOR '='"),
(0xe240, 0xe240, '* GET VARIABLE TYPE AND'),
(0xe242, 0xe242, '* SAVE ON THE STACK'),
(0xe244, 0xe244, 'EVALUATE EXPRESSION'),
(0xe247, 0xe247, '* REGET VARIABLE TYPE OF 1ST EXPRESSION AND'),
(0xe249, 0xe249, '* SET CARRY IF STRING'),
(0xe24a, 0xe24a, 'TYPE CHECK-TM ERROR IF VARIABLE TYPES ON'),
(0xe24d, 0xe24d, 'GO PUT FPA0 INTO VARIABLE DESCRIPTOR IF NUMERIC'),
(0xe251, 0xe251, 'POINT X TO DESCRIPTOR OF REPLACEMENT STRING'),
(0xe253, 0xe253, 'LOAD ACCD WITH START OF STRING SPACE'),
(0xe255, 0xe255, 'IS THE STRING IN STRING SPACE?'),
(0xe258, 0xe258, "BRANCH IF IT'S NOT IN THE STRING SPACE"),
(0xe25a, 0xe25a, 'COMPARE DESCRIPTOR ADDRESS TO START OF VARIABLES'),
(0xe25c, 0xe25c, 'BRANCH IF DESCRIPTOR ADDRESS NOT IN VARIABLES'),
(0xe25e, 0xe25e, 'GET LENGTH OF REPLACEMENT STRING'),
(0xe260, 0xe260, 'RESERVE ACCB BYTES OF STRING SPACE'),
(0xe263, 0xe263, 'GET DESCRIPTOR ADDRESS BACK'),
(0xe265, 0xe265, 'MOVE STRING INTO STRING SPACE'),
(0xe268, 0xe268, 'POINT X TO TEMP STRING DESCRIPTOR ADDRESS'),
(0xe26b, 0xe26b, 'SAVE STRING DESCRIPTOR ADDRESS IN V4D'),
(0xe26d, 0xe26d, 'REMOVE STRING DESCRIPTOR IF LAST ONE'),
(0xe270, 0xe270, 'POINT U TO REPLACEMENT DESCRIPTOR ADDRESS'),
(0xe272, 0xe272, 'GET TARGET DESCRIPTOR ADDRESS'),
(0xe274, 0xe274, 'GET LENGTH AND START OF REPLACEMENT STRING'),
(0xe276, 0xe276, '* SAVE STRING LENGTH AND START IN'),
(0xe278, 0xe278, '* TARGET DESCRIPTOR LOCATION'),
(0xe27c, 0xe27c, '?REDO MESSAGE'),
(0xe283, 0xe283, 'JMP TO ERROR HANDLER'),
(0xe286, 0xe286, '= GET THE INPUT FLAG AND BRANCH'),
(0xe288, 0xe288, "= IF 'INPUT'"),
(0xe28a, 0xe28a, '* GET LINE NUMBER WHERE THE ERROR OCCURRED'),
(0xe28c, 0xe28c, '* AND USE IT AS THE CURRENT LINE NUMBER'),
(0xe28e, 0xe28e, "'SYNTAX ERROR'"),
(0xe291, 0xe291, "* POINT X TO '?REDO' AND PRINT"),
(0xe294, 0xe294, '* IT ON THE SCREEN'),
(0xe297, 0xe297, '= GET THE SAVED ABSOLUTE ADDRESS OF'),
(0xe299, 0xe299, '= INPUT POINTER AND RESTORE IT'),
(0xe29c, 0xe29c, "'ID' ERROR"),
(0xe29e, 0xe29e, 'GET CURRENT LINE NUMBER'),
(0xe2a0, 0xe2a0, 'ADD ONE'),
(0xe2a2, 0xe2a2, "'ID' ERROR BRANCH IF DIRECT MODE"),
(0xe2a4, 0xe2a4, 'GET SOME INPUT DATA - WAS LB002'),
(0xe2a7, 0xe2a7, 'CHECK FOR PROMPT STRING DELIMITER'),
(0xe2a9, 0xe2a9, 'BRANCH IF NO PROMPT STRING'),
(0xe2ab, 0xe2ab, 'PUT PROMPT STRING ON STRING STACK'),
(0xe2b0, 0xe2b0, '* DO A SYNTAX CHECK FOR SEMICOLON'),
(0xe2b3, 0xe2b3, 'PRINT MESSAGE TO CONSOLE OUT'),
(0xe2b6, 0xe2b6, "POINT TO BASIC'S LINE BUFFER"),
(0xe2b9, 0xe2b9, 'CLEAR 1ST BYTE - FLAG TO INDICATE NO DATA'),
(0xe2bb, 0xe2bb, 'INPUT A STRING TO LINE BUFFER'),
(0xe2bd, 0xe2bd, '* INSERT A COMMA AT THE END'),
(0xe2bf, 0xe2bf, '* OF THE LINE INPUT BUFFER'),
(0xe2c3, 0xe2c3, 'SEND A "?" TO CONSOLE OUT'),
(0xe2c6, 0xe2c6, "SEND A 'SPACE' TO CONSOLE OUT"),
(0xe2c9, 0xe2c9, 'GO READ IN A BASIC LINE'),
(0xe2cc, 0xe2cc, 'BRANCH IF ENTER KEY ENDED ENTRY'),
(0xe2ce, 0xe2ce, 'PURGE TWO RETURN ADDRESSES OFF THE STACK'),
(0xe2d0, 0xe2d0, "GO DO A 'STOP' IF BREAK KEY ENDED LINE ENTRY"),
(0xe2d3, 0xe2d3, "'INPUT PAST END OF FILE' ERROR"),
(0xe2d6, 0xe2d6, "GET 'READ' START ADDRESS"),
(0xe2d8, 0xe2d8, 'SKIP ONE BYTE - LDA #*$4F'),
(0xe2d9, 0xe2d9, "'INPUT' ENTRY POINT: INPUT FLAG = 0"),
(0xe2da, 0xe2da, 'SET INPUT FLAG; 0 = INPUT: <> 0 = READ'),
(0xe2dc, 0xe2dc, "SAVE 'READ' START ADDRESS/'INPUT' BUFFER START"),
(0xe2de, 0xe2de, 'EVALUATE A VARIABLE'),
(0xe2e1, 0xe2e1, 'SAVE DESCRIPTOR ADDRESS'),
(0xe2e3, 0xe2e3, "* GET BASIC'S INPUT POINTER"),
(0xe2e5, 0xe2e5, '* AND SAVE IT'),
(0xe2e7, 0xe2e7, "GET 'READ' ADDRESS START/'INPUT' BUFFER POINTER"),
(0xe2e9, 0xe2e9, 'GET A CHARACTER FROM THE BASIC PROGRAM'),
(0xe2eb, 0xe2eb, 'BRANCH IF NOT END OF LINE'),
(0xe2ed, 0xe2ed, '* CHECK INPUT FLAG AND BRANCH'),
(0xe2ef, 0xe2ef, '* IF LOOKING FOR DATA (READ)'),
(0xe2f1, 0xe2f1, "SEND A '?' TO CONSOLE OUT"),
(0xe2f4, 0xe2f4, 'FILL INPUT BUFFER FROM CONSOLE IN'),
(0xe2f6, 0xe2f6, "RESET BASIC'S INPUT POINTER"),
(0xe2f8, 0xe2f8, 'GET A CHARACTER FROM BASIC'),
(0xe2fa, 0xe2fa, '* CHECK VARIABLE TYPE AND'),
(0xe2fc, 0xe2fc, '* BRANCH IF NUMERIC'),
(0xe2fe, 0xe2fe, 'LOAD X WITH CURRENT BASIC INPUT POINTER'),
(0xe300, 0xe300, 'SAVE CURRENT INPUT CHARACTER'),
(0xe302, 0xe302, 'CHECK FOR STRING DELIMITER'),
(0xe304, 0xe304, 'BRANCH IF STRING DELIMITER'),
(0xe306, 0xe306, 'BACK UP POINTER'),
(0xe308, 0xe308, '* ZERO = END OF LINE CHARACTER'),
(0xe309, 0xe309, '* SAVE AS TERMINATOR'),
(0xe30b, 0xe30b, 'SET UP PRINT PARAMETERS'),
(0xe30e, 0xe30e, 'END OF SUBLINE CHARACTER'),
(0xe310, 0xe310, 'SAVE AS TERMINATOR I'),
(0xe312, 0xe312, 'COMMA'),
(0xe314, 0xe314, 'SAVE AS TERMINATOR 2'),
(0xe316, 0xe316, 'STRIP A STRING FROM THE INPUT BUFFER'),
(0xe319, 0xe319, 'MOVE INPUT POINTER TO END OF STRING'),
(0xe31c, 0xe31c, 'PUT A STRING INTO THE STRING SPACE IF NECESSARY'),
(0xe31f, 0xe31f, 'CHECK FOR ANOTHER DATA ITEM'),
(0xe321, 0xe321, 'CONVERT AN ASCII STRING TO FP NUMBER'),
(0xe324, 0xe324, 'PACK FPA0 AND STORE IT IN ADDRESS IN VARDES -'),
(0xe327, 0xe327, 'GET CURRENT INPUT CHARACTER'),
(0xe329, 0xe329, 'BRANCH IF END OF LINE'),
(0xe32b, 0xe32b, 'CHECK FOR A COMMA'),
(0xe32d, 0xe32d, "BAD FILE DATA' ERROR OR RETRY"),
(0xe331, 0xe331, '* GET CURRENT INPUT'),
(0xe333, 0xe333, '* POINTER (USED AS A DATA POINTER) AND SAVE IT'),
(0xe335, 0xe335, '* RESET INPUT POINTER TO INPUT OR'),
(0xe337, 0xe337, '* READ STATEMENT'),
(0xe339, 0xe339, 'GET CURRENT CHARACTER FROM BASIC'),
(0xe33b, 0xe33b, 'BRANCH IF END OF LINE - EXIT COMMAND'),
(0xe33d, 0xe33d, 'SYNTAX CHECK FOR COMMA'),
(0xe340, 0xe340, 'GET ANOTHER INPUT OR READ ITEM'),
(0xe342, 0xe342, "RESET BASIC'S INPUT POINTER"),
(0xe344, 0xe344, 'SEARCH FOR END OF CURRENT LINE OR SUBLINE'),
(0xe347, 0xe347, 'MOVE X ONE PAST END OF LINE'),
(0xe349, 0xe349, 'CHECK FOR END OF LINE'),
(0xe34a, 0xe34a, 'BRANCH IF END OF SUBLINE'),
(0xe34c, 0xe34c, "'OUT OF DATA' ERROR"),
(0xe34e, 0xe34e, 'GET NEXT 2 CHARACTERS'),
(0xe350, 0xe350, "'OD' ERROR IF END OF PROGRAM"),
(0xe352, 0xe352, 'GET BASIC LINE NUMBER AND'),
(0xe354, 0xe354, 'SAVE IT IN DATTXT'),
(0xe356, 0xe356, 'GET AN INPUT CHARACTER'),
(0xe358, 0xe358, 'DATA TOKEN?'),
(0xe35a, 0xe35a, 'NO - KEEP LOOKING'),
(0xe35c, 0xe35c, 'YES'),
(0xe35e, 0xe35e, 'GET DATA POINTER'),
(0xe360, 0xe360, '* CHECK INPUT FLAG'),
(0xe362, 0xe362, '* SAVE NEW DATA POINTER IF READ'),
(0xe366, 0xe366, "= CHECK NEXT CHARACTER IN 'INPUT' BUFFER"),
(0xe368, 0xe368, '='),
(0xe36a, 0xe36a, "POINT X TO '?EXTRA IGNORED'"),
(0xe36d, 0xe36d, 'PRINT THE MESSAGE'),
(0xe371, 0xe371, 'IGNORED" ?EXTRA IGNORED MESSAGE'),
(0xe381, 0xe381, 'BRANCH IF ARGUMENT GIVEN'),
(0xe383, 0xe383, 'X = 0: DEFAULT FOR NO ARGUMENT'),
(0xe387, 0xe387, 'EVALUATE AN ALPHA EXPRESSION'),
(0xe38a, 0xe38a, 'SAVE VARIABLE DESCRIPTOR POINTER'),
(0xe38c, 0xe38c, "GO SCAN FOR 'FOR/NEXT' DATA ON STACK"),
(0xe38f, 0xe38f, 'BRANCH IF DATA FOUND'),
(0xe391, 0xe391, "'NEXT WITHOUT FOR' ERROR (SHOULD BE CLRB)"),
(0xe393, 0xe393, 'PROCESS ERROR'),
(0xe395, 0xe395, "POINT S TO START OF 'FOR/NEXT' DATA"),
(0xe397, 0xe397, 'POINT X TO FP VALUE OF STEP'),
(0xe399, 0xe399, 'COPY A FP NUMBER FROM (X) TO FPA0'),
(0xe39c, 0xe39c, 'GET THE DIRECTION OF STEP'),
(0xe39e, 0xe39e, 'SAVE IT AS THE SIGN OF FPA0'),
(0xe3a0, 0xe3a0, 'POINT (X) TO INDEX VARIABLE DESCRIPTOR'),
(0xe3a2, 0xe3a2, 'ADD (X) TO FPA0 (STEP TO INDEX)'),
(0xe3a5, 0xe3a5, 'PACK FPA0 AND STORE IT IN ADDRESS'),
(0xe3a8, 0xe3a8, 'POINT (X) TO TERMINAL VALUE OF INDEX'),
(0xe3aa, 0xe3aa, 'COMPARE CURRENT INDEX VALUE TO TERMINAL VALUE OF INDEX'),
(0xe3ad, 0xe3ad, 'ACCB = 0 IF TERMINAL VALUE=CURRENT VALUE AND STEP=0 OR IF'),
(0xe3af, 0xe3af, "BRANCH IF 'FOR/NEXT' LOOP DONE"),
(0xe3b1, 0xe3b1, '* GET LINE NUMBER AND'),
(0xe3b3, 0xe3b3, '* BASIC POINTER OF'),
(0xe3b5, 0xe3b5, '* STATEMENT FOLLOWING THE'),
(0xe3b8, 0xe3b8, '* PROPER FOR STATEMENT'),
(0xe3ba, 0xe3ba, 'JUMP BACK TO COMMAND INTEPR. LOOP'),
(0xe3bd, 0xe3bd, "PULL THE 'FOR-NEXT' DATA OFF THE STACK"),
(0xe3c0, 0xe3c0, 'GET CURRENT INPUT CHARACTER'),
(0xe3c2, 0xe3c2, 'CHECK FOR ANOTHER ARGUMENT'),
(0xe3c4, 0xe3c4, 'RETURN IF NONE'),
(0xe3c6, 0xe3c6, 'GET NEXT CHARACTER FROM BASIC'),
(0xe3c8, 0xe3c8, "BSR SIMULATES A CALL TO 'NEXT' FROM COMMAND LOOP"),
(0xe3ca, 0xe3ca, 'EVALUATE EXPRESSION AND DO A TYPE CHECK FOR NUMERIC'),
(0xe3cc, 0xe3cc, 'CLEAR CARRY FLAG'),
(0xe3ce, 0xe3ce, 'OP CODE OF TST $1A01 - SKIP TWO BYTES (DO NOT CHANGE CARRY FLAG)'),
(0xe3cf, 0xe3cf, 'SET CARRY'),
(0xe3d1, 0xe3d1, 'TEST TYPE FLAG; DO NOT CHANGE CARRY'),
(0xe3d3, 0xe3d3, 'BRANCH IF STRING'),
(0xe3d5, 0xe3d5, 'RETURN ON PLUS'),
(0xe3d7, 0xe3d7, "SKIP 2 BYTES - 'TM' ERROR"),
(0xe3d8, 0xe3d8, 'RETURN ON MINUS'),
(0xe3da, 0xe3da, "'TYPE M1SMATCH' ERROR"),
(0xe3dc, 0xe3dc, 'PROCESS ERROR'),
(0xe3df, 0xe3df, 'BACK UP INPUT POINTER'),
(0xe3e1, 0xe3e1, 'END OF OPERATION PRECEDENCE FLAG'),
(0xe3e2, 0xe3e2, 'SKIP TWO BYTES'),
(0xe3e3, 0xe3e3, 'SAVE FLAG (RELATIONAL OPERATOR FLAG)'),
(0xe3e5, 0xe3e5, 'SAVE FLAG (PRECEDENCE FLAG)'),
(0xe3e9, 0xe3e9, '* SEE IF ROOM IN FREE RAM FOR (B) WORDS'),
(0xe3ec, 0xe3ec, 'GO EVALUATE AN EXPRESSION'),
(0xe3ef, 0xe3ef, 'RESET RELATIONAL OPERATOR FLAG'),
(0xe3f1, 0xe3f1, 'GET CURRENT INPUT CHARACTER'),
(0xe3f3, 0xe3f3, 'TOKEN FOR >'),
(0xe3f5, 0xe3f5, 'BRANCH IF LESS THAN RELATIONAL OPERATORS'),
(0xe3f9, 0xe3f9, '* BRANCH IF GREATER THAN RELATIONAL OPERATORS'),
(0xe3fb, 0xe3fb, "SET CARRY IF '>'"),
(0xe3fd, 0xe3fd, 'CARRY TO BIT 0'),
(0xe3fe, 0xe3fe, '* CARRY SET IF'),
(0xe400, 0xe400, '* TRELFL = ACCA'),
(0xe402, 0xe402, 'BRANCH IF SYNTAX ERROR : == << OR >>'),
(0xe404, 0xe404, 'BIT 0: >, BIT 1 =, BIT 2: <'),
(0xe406, 0xe406, 'GET AN INPUT CHARACTER'),
(0xe408, 0xe408, 'CHECK FOR ANOTHER RELATIONAL OPERATOR'),
(0xe40a, 0xe40a, 'GET RELATIONAL OPERATOR FLAG'),
(0xe40c, 0xe40c, 'BRANCH IF RELATIONAL COMPARISON'),
(0xe40e, 0xe40e, 'BRANCH IF > RELATIONAL OPERATOR'),
(0xe412, 0xe412, 'SEVEN ARITHMETIC/LOGICAL OPERATORS'),
(0xe414, 0xe414, 'BRANCH IF NOT ARITHMETIC/LOGICAL OPERATOR'),
(0xe416, 0xe416, 'ADD CARRY, NUMERIC FLAG AND MODIFIED TOKEN NUMBER'),
(0xe418, 0xe418, "BRANCH IF VALTYP = FF, AND ACCA = '+' TOKEN -"),
(0xe41c, 0xe41c, 'RESTORE ARITHMETIC/LOGICAL OPERATOR NUMBER'),
(0xe41e, 0xe41e, '* STORE OPERATOR NUMBER ON STACK; MULTIPLY IT BY 2'),
(0xe420, 0xe420, '* THEN ADD THE STORED STACK DATA = MULTIPLY'),
(0xe421, 0xe421, '* X 3; 3 BYTE/TABLE ENTRY'),
(0xe423, 0xe423, 'JUMP TABLE FOR ARITHMETIC & LOGICAL OPERATORS'),
(0xe426, 0xe426, 'POINT X TO PROPER TABLE'),
(0xe428, 0xe428, 'GET PRECEDENCE FLAG FROM STACK'),
(0xe42a, 0xe42a, 'COMPARE TO CURRENT OPERATOR'),
(0xe42c, 0xe42c, 'BRANCH IF STACK OPERATOR > CURRENT OPERATOR'),
(0xe42e, 0xe42e, "'TM' ERROR IF VARIABLE TYPE = STRING"),
(0xe430, 0xe430, 'SAVE PRECEDENCE FLAG'),
(0xe432, 0xe432, 'PUSH OPERATOR ROUTINE ADDRESS AND FPA0 ONTO STACK'),
(0xe434, 0xe434, 'GET POINTER TO ARITHMETIC/LOGICAL TABLE ENTRY FOR'),
(0xe436, 0xe436, 'GET PRECEDENCE FLAG OF PREVIOUS OPERATION'),
(0xe438, 0xe438, 'BRANCH IF NOT END OF OPERATION'),
(0xe43a, 0xe43a, 'CHECK TYPE OF PRECEDENCE FLAG'),
(0xe43b, 0xe43b, 'BRANCH IF END OF EXPRESSION OR SUB-EXPRESSION'),
(0xe43f, 0xe43f, 'EVALUATE AN OPERATION'),
(0xe441, 0xe441, 'BIT 7 OF TYPE FLAG TO CARRY'),
(0xe443, 0xe443, 'SHIFT RELATIONAL FLAG LEFT - VALTYP TO BIT 0'),
(0xe444, 0xe444, 'MOVE THE INPUT POINTER BACK ONE'),
(0xe446, 0xe446, 'POINT X TO RELATIONAL COMPARISON JUMP TABLE'),
(0xe449, 0xe449, 'SAVE RELATIONAL COMPARISON DATA'),
(0xe44b, 0xe44b, 'SET VARIABLE TYPE TO NUMERIC'),
(0xe44d, 0xe44d, 'PERFORM OPERATION OR SAVE ON STACK'),
(0xe44f, 0xe44f, "* GET BASIC'S INPUT POINTER AND"),
(0xe451, 0xe451, '* MOVE IT BACK ONE'),
(0xe454, 0xe454, 'RELATIONAL COMPARISON FLAG'),
(0xe455, 0xe455, 'JUMP ADDRESS'),
(0xe457, 0xe457, 'COMPARE PRECEDENCE OF LAST DONE OPERATION TO'),
(0xe459, 0xe459, 'EVALUATE OPERATION IF LOWER PRECEDENCE'),
(0xe45b, 0xe45b, 'PUSH OPERATION DATA ON STACK IF HIGHER PRECEDENCE'),
(0xe45d, 0xe45d, 'GET ADDRESS OF OPERATOR ROUTINE'),
(0xe45f, 0xe45f, 'SAVE IT ON THE STACK'),
(0xe461, 0xe461, 'PUSH FPA0 ONTO STACK'),
(0xe463, 0xe463, 'GET BACK RELATIONAL OPERATOR FLAG'),
(0xe465, 0xe465, 'EVALUATE ANOTHER EXPRESSION'),
(0xe468, 0xe468, "'SYNTAX ERROR'"),
(0xe46b, 0xe46b, 'GET SIGN OF FPA0 MANTISSA'),
(0xe46d, 0xe46d, 'GET PRECEDENCE CODE TO ACCA'),
(0xe46f, 0xe46f, 'GET RETURN ADDRESS FROM STACK & PUT IT IN Y'),
(0xe471, 0xe471, 'SAVE ACCB ON STACK'),
(0xe473, 0xe473, '* PUSH FPA0 ONTO THE STACK'),
(0xe47b, 0xe47b, 'JUMP TO ADDRESS IN Y'),
(0xe47d, 0xe47d, 'POINT X TO DUMMY VALUE (ZERO)'),
(0xe47f, 0xe47f, 'GET PRECEDENCE FLAG FROM STACK'),
(0xe481, 0xe481, 'BRANCH IF END OF EXPRESSION'),
(0xe483, 0xe483, '* CHECK FOR RELATIONAL COMPARISON FLAG'),
(0xe485, 0xe485, '* AND BRANCH IF RELATIONAL COMPARISON'),
(0xe487, 0xe487, "'TM' ERROR IF VARIABLE TYPE = STRING"),
(0xe48a, 0xe48a, 'SAVE POINTER TO OPERATOR ROUTINE'),
(0xe48c, 0xe48c, 'GET RELATIONAL OPERATOR FLAG FROM STACK'),
(0xe48e, 0xe48e, "CHECK FOR 'NOT' OPERATOR"),
(0xe490, 0xe490, "RETURN IF 'NOT' - NO RELATIONAL COMPARISON"),
(0xe492, 0xe492, 'CHECK FOR NEGATION (UNARY) FLAG'),
(0xe494, 0xe494, 'RETURN IF NEGATION - NO RELATIONAL COMPARISON'),
(0xe496, 0xe496, '= ROTATE VALTYP BIT INTO CARRY'),
(0xe497, 0xe497, '= FLAG AND SAVE NEW RELFLG'),
(0xe499, 0xe499, '* PULL A FP VALUE OFF OF THE STACK'),
(0xe49b, 0xe49b, '* AND SAVE IT IN FPA1'),
(0xe4a1, 0xe4a1, '= GET MANTISSA SIGN AND'),
(0xe4a3, 0xe4a3, '= SAVE IT IN FPA1'),
(0xe4a5, 0xe4a5, 'EOR IT WITH FPA1 MANTISSA SIGN'),
(0xe4a7, 0xe4a7, 'SAVE IT IN RESULT SIGN BYTE'),
(0xe4a9, 0xe4a9, 'GET EXPONENT OF FPA0'),
(0xe4ac, 0xe4ac, 'CALL EXTENDED BASIC ADD-IN'),
(0xe4af, 0xe4af, 'INITIALIZE TYPE FLAG TO NUMERIC'),
(0xe4b1, 0xe4b1, 'GET AN INPUT CHAR'),
(0xe4b3, 0xe4b3, 'BRANCH IF NOT NUMERIC'),
(0xe4b5, 0xe4b5, 'CONVERT ASCII STRING TO FLOATING POINT -'),
(0xe4b8, 0xe4b8, 'SET CARRY IF NOT ALPHA'),
(0xe4bb, 0xe4bb, 'BRANCH IF ALPHA CHARACTER'),
(0xe4bd, 0xe4bd, "IS IT '.' (DECIMAL POINT)?"),
(0xe4bf, 0xe4bf, 'CONVERT ASCII STRING TO FLOATING POINT'),
(0xe4c1, 0xe4c1, 'MINUS TOKEN'),
(0xe4c3, 0xe4c3, 'YES - GO PROCESS THE MINUS OPERATOR'),
(0xe4c5, 0xe4c5, 'PLUS TOKEN'),
(0xe4c7, 0xe4c7, 'YES - GET ANOTHER CHARACTER'),
(0xe4c9, 0xe4c9, 'STRING DELIMITER?'),
(0xe4cb, 0xe4cb, 'NO'),
(0xe4cd, 0xe4cd, 'CURRENT BASIC POINTER TO X'),
(0xe4cf, 0xe4cf, 'SAVE STRING ON STRING STACK'),
(0xe4d2, 0xe4d2, '* GET ADDRESS OF END OF STRING AND'),
(0xe4d4, 0xe4d4, "* PUT BASIC'S INPUT POINTER THERE"),
(0xe4d7, 0xe4d7, 'NOT TOKEN?'),
(0xe4d9, 0xe4d9, 'NO'),
(0xe4db, 0xe4db, "'NOT' PRECEDENCE FLAG"),
(0xe4dd, 0xe4dd, "PROCESS OPERATION FOLLOWING 'NOT'"),
(0xe4e0, 0xe4e0, 'CONVERT FPA0 TO INTEGER IN ACCD'),
(0xe4e3, 0xe4e3, "* 'NOT' THE INTEGER"),
(0xe4e5, 0xe4e5, 'CONVERT ACCD TO FLOATING POINT (FPA0)'),
(0xe4e8, 0xe4e8, 'CHECK FOR TOKENS PRECEEDED BY $FF'),
(0xe4e9, 0xe4e9, 'IT WAS PRECEEDED BY $FF'),
(0xe4eb, 0xe4eb, "SYNTAX CHECK FOR A '('"),
(0xe4ed, 0xe4ed, 'EVALUATE EXPRESSIONS WITHIN PARENTHESES AT'),
(0xe4f0, 0xe4f0, "SYNTAX CHECK FOR ')'"),
(0xe4f2, 0xe4f2, 'SKIP 2 BYTES'),
(0xe4f3, 0xe4f3, "SYNTAX CHECK FOR '('"),
(0xe4f5, 0xe4f5, 'SKIP 2 BYTES'),
(0xe4f6, 0xe4f6, 'SYNTAX CHECK FOR COMMA'),
(0xe4f8, 0xe4f8, '* COMPARE ACCB TO CURRENT INPUT'),
(0xe4fc, 0xe4fc, '* CHARACTER - SYNTAX ERROR IF NO MATCH'),
(0xe4fe, 0xe4fe, 'GET A CHARACTER FROM BASIC'),
(0xe500, 0xe500, 'SYNTAX ERROR'),
(0xe502, 0xe502, 'JUMP TO ERROR HANDLER'),
(0xe505, 0xe505, 'MINUS (UNARY) PRECEDENCE FLAG'),
(0xe507, 0xe507, "PROCESS OPERATION FOLLOWING 'UNARY' NEGATION"),
(0xe50a, 0xe50a, 'CHANGE SIGN OF FPA0 MANTISSA'),
(0xe50d, 0xe50d, 'FIND THE DESCRIPTOR ADDRESS OF A VARIABLE'),
(0xe510, 0xe510, 'SAVE DESCRIPTOR ADDRESS IN FPA0'),
(0xe512, 0xe512, 'TEST VARIABLE TYPE'),
(0xe514, 0xe514, 'RETURN IF STRING'),
(0xe516, 0xe516, 'COPY A FP NUMBER FROM (X) TO FPA0'),
(0xe519, 0xe519, 'GET AN INPUT CHARACTER (SECONDARY TOKEN)'),
(0xe51b, 0xe51b, 'SAVE IT IN ACCB'),
(0xe51d, 0xe51d, 'X2 & BET RID OF BIT 7'),
(0xe51e, 0xe51e, 'GET ANOTHER INPUT CHARACTER'),
(0xe520, 0xe520, '29 SECONDARY FUNCTIONS - 1'),
(0xe522, 0xe522, 'BRANCH IF COLOR BASIC TOKEN'),
(0xe524, 0xe524, 'SYNTAX ERROR'),
(0xe527, 0xe527, 'SAVE TOKEN OFFSET ON STACK'),
(0xe529, 0xe529, 'CHECK FOR TOKEN WITH AN ARGUMENT'),
(0xe52b, 0xe52b, 'DO SECONDARIES STRING$ OR LESS'),
(0xe52f, 0xe52f, '* DO SECONDARIES $92 (INKEY$) OR >'),
(0xe531, 0xe531, "SYNTAX CHECK FOR A '('"),
(0xe533, 0xe533, 'GET TOKEN NUMBER'),
(0xe535, 0xe535, 'EVALUATE FIRST STRING IN ARGUMENT'),
(0xe538, 0xe538, 'SYNTAX CHECK FOR A COMMA'),
(0xe53a, 0xe53a, "'TM' ERROR IF NUMERIC VARiABLE"),
(0xe53d, 0xe53d, 'GET TOKEN OFFSET FROM STACK'),
(0xe53f, 0xe53f, 'POINT U TO STRING DESCRIPTOR'),
(0xe541, 0xe541, 'SAVE TOKEN OFFSET AND DESCRIPTOR ADDRESS'),
(0xe543, 0xe543, 'EVALUATE FIRST NUMERIC ARGUMENT'),
(0xe546, 0xe546, 'GET TOKEN OFFSET FROM STACK'),
(0xe548, 0xe548, 'SAVE TOKEN OFFSET AND NUMERIC ARGUMENT'),
(0xe54a, 0xe54a, 'OP CODE OF LDX# - SKlP 2 BYTES'),
(0xe54b, 0xe54b, "SYNTAX CHECK FOR A '('"),
(0xe54d, 0xe54d, 'GET TOKEN OFFSET'),
(0xe54f, 0xe54f, 'GET SECONDARY FUNCTION JUMP TABLE ADDRESS'),
(0xe552, 0xe552, 'ADD IN COMMAND OFFSET'),
(0xe553, 0xe553, 'GO DO AN SECONDARY FUNCTION'),
(0xe555, 0xe555, "'TM' ERROR IF VARIABLE TYPE = STRING"),
(0xe558, 0xe558, "SKIP ONE BYTE - 'OR' FLAG = $4F"),
(0xe559, 0xe559, 'AND FLAG = 0'),
(0xe55a, 0xe55a, 'AND/OR FLAG'),
(0xe55c, 0xe55c, 'CONVERT FPA0 INTO AN INTEGER IN ACCD'),
(0xe55f, 0xe55f, 'TEMP SAVE ACCD'),
(0xe561, 0xe561, 'MOVE FPA1 TO FPA0'),
(0xe564, 0xe564, 'CONVERT FPA0 INTO AN INTEGER IN ACCD'),
(0xe567, 0xe567, 'CHECK AND/OR FLAG'),
(0xe569, 0xe569, 'BRANCH IF OR'),
(0xe56b, 0xe56b, "* 'AND' ACCD WITH FPA0 INTEGER"),
(0xe56d, 0xe56d, '* STORED IN ENDCHR'),
(0xe56f, 0xe56f, 'CONVERT TO FP'),
(0xe571, 0xe571, "* 'OR' ACCD WITH FPA0 INTEGER"),
(0xe573, 0xe573, '* STORED IN CHARAC'),
(0xe575, 0xe575, 'CONVERT THE VALUE IN ACCD INTO A FP NUMBER'),
(0xe578, 0xe578, "'TM' ERROR IF TYPE MISMATCH"),
(0xe57b, 0xe57b, 'BRANCH IF STRING VARIABLE'),
(0xe57d, 0xe57d, "* 'PACK' THE MANTISSA"),
(0xe57f, 0xe57f, '* SIGN OF FPA1 INTO'),
(0xe581, 0xe581, '* BIT 7 OF THE'),
(0xe583, 0xe583, '* MANTISSA MS BYTE'),
(0xe585, 0xe585, 'POINT X TO FPA1'),
(0xe588, 0xe588, 'COMPARE FPA0 TO FPA1'),
(0xe58b, 0xe58b, 'CHECK TRUTH OF RELATIONAL COMPARISON'),
(0xe58d, 0xe58d, 'SET VARIABLE TYPE TO NUMERIC'),
(0xe58f, 0xe58f, 'REMOVE STRING TYPE FLAG (BIT0=1 FOR STRINGS) FROM THE'),
(0xe591, 0xe591, 'GET LENGTH AND ADDRESS OF STRING WHOSE'),
(0xe594, 0xe594, '* SAVE LENGTH AND ADDRESS IN TEMPORARY'),
(0xe596, 0xe596, '* DESCRIPTOR (STRING B)'),
(0xe598, 0xe598, '= RETURN LENGTH AND ADDRESS OF STRING'),
(0xe59a, 0xe59a, '= WHOSE DESCRIPTOR ADDRESS IS STORED IN FPA1+2'),
(0xe59d, 0xe59d, 'LOAD ACCA WITH LENGTH OF STRING B'),
(0xe59f, 0xe59f, 'SAVE LENGTH A ON STACK'),
(0xe5a1, 0xe5a1, 'SUBTRACT LENGTH A FROM LENGTH B'),
(0xe5a3, 0xe5a3, 'BRANCH IF STRINGS OF EQUAL LENGTH'),
(0xe5a5, 0xe5a5, 'TRUE FLAG'),
(0xe5a7, 0xe5a7, 'TRUE IF LENGTH B > LENGTH A'),
(0xe5a9, 0xe5a9, 'LOAD ACCB WITH LENGTH B'),
(0xe5ab, 0xe5ab, 'SET FLAG = FALSE (1FF)'),
(0xe5ac, 0xe5ac, 'SAVE TRUE/FALSE FLAG'),
(0xe5ae, 0xe5ae, 'POINT U TO START OF STRING'),
(0xe5b0, 0xe5b0, 'COMPENSATE FOR THE DECB BELOW'),
(0xe5b1, 0xe5b1, 'DECREMENT SHORTER STRING LENGTH'),
(0xe5b2, 0xe5b2, 'BRANCH IF ALL OF STRING NOT COMPARED'),
(0xe5b4, 0xe5b4, 'GET TRUE/FALSE FLAB'),
(0xe5b6, 0xe5b6, 'CHECK TRUTH OF RELATIONAL COMPARISON'),
(0xe5b8, 0xe5b8, 'GET A BYTE FROM STRING A'),
(0xe5ba, 0xe5ba, 'COMPARE TO STRING B'),
(0xe5bc, 0xe5bc, 'CHECK ANOTHER CHARACTER IF ='),
(0xe5be, 0xe5be, 'FALSE FLAG IF STRING A > B'),
(0xe5c0, 0xe5c0, 'BRANCH IF STRING A > STRING B'),
(0xe5c2, 0xe5c2, 'SET FLAG = TRUE'),
(0xe5c3, 0xe5c3, 'CONVERT $FF,0,1 TO 0,1,2'),
(0xe5c5, 0xe5c5, "NOW IT'S 1,2,4 FOR > = <"),
(0xe5c6, 0xe5c6, "'AND' THE ACTUAL COMPARISON WITH THE DESIRED -"),
(0xe5c8, 0xe5c8, 'BRANCH IF FALSE (NO MATCHING BITS)'),
(0xe5ca, 0xe5ca, 'TRUE FLAG'),
(0xe5cc, 0xe5cc, 'CONVERT ACCB INTO FP NUMBER IN FPA0'),
(0xe5cf, 0xe5cf, 'SYNTAX CHECK FOR COMMA'),
(0xe5d2, 0xe5d2, 'DIMENSION FLAG'),
(0xe5d4, 0xe5d4, 'SAVE ARRAY SPACE FOR THIS VARIABLE'),
(0xe5d6, 0xe5d6, 'GET CURRENT INPUT CHARACTER'),
(0xe5d8, 0xe5d8, 'KEEP DIMENSIONING IF NOT END OF LINE'),
(0xe5db, 0xe5db, 'DIMENSION FLAG = 0; DO NOT SET UP AN ARRAY'),
(0xe5dc, 0xe5dc, 'GET CURRENT INPUT CHARACTER'),
(0xe5de, 0xe5de, 'SAVE ARRAY FLAG'),
(0xe5e0, 0xe5e0, 'SAVE INPUT CHARACTER'),
(0xe5e2, 0xe5e2, 'GET CURRENT INPUT CHARACTER'),
(0xe5e4, 0xe5e4, 'SET CARRY IF NOT ALPHA'),
(0xe5e6, 0xe5e6, 'SYNTAX ERROR IF NOT ALPHA'),
(0xe5ea, 0xe5ea, 'DEFAULT 2ND VARIABLE CHARACTER TO ZERO'),
(0xe5eb, 0xe5eb, 'SET VARIABLE TYPE TO NUMERIC'),
(0xe5ed, 0xe5ed, 'GET ANOTHER CHARACTER FROM BASIC'),
(0xe5ef, 0xe5ef, 'BRANCH IF NUMERIC (2ND CHARACTER IN'),
(0xe5f1, 0xe5f1, 'SET CARRY IF NOT ALPHA'),
(0xe5f3, 0xe5f3, 'BRANCH IF NOT ALPHA'),
(0xe5f5, 0xe5f5, 'SAVE 2ND CHARACTER IN ACCB'),
(0xe5f7, 0xe5f7, 'GET AN INPUT CHARACTER'),
(0xe5f9, 0xe5f9, 'BRANCH IF NUMERIC'),
(0xe5fb, 0xe5fb, 'SET CARRY IF NOT ALPHA'),
(0xe5fd, 0xe5fd, 'BRANCH IF ALPHA'),
(0xe5ff, 0xe5ff, 'CHECK FOR A STRING VARIABLE'),
(0xe601, 0xe601, 'BRANCH IF IT IS NOT A STRING'),
(0xe603, 0xe603, 'SET VARIABLE TYPE TO STRING'),
(0xe605, 0xe605, 'SET BIT 7 OF 2ND CHARACTER (STRING)'),
(0xe607, 0xe607, 'GET AN INPUT CHARACTER'),
(0xe609, 0xe609, 'SAVE 2ND CHARACTER IN VARNAM+1'),
(0xe60b, 0xe60b, 'OR IN THE ARRAY DISABLE FLAG - IF = $80,'),
(0xe60d, 0xe60d, 'IS THIS AN ARRAY VARIABLE?'),
(0xe60f, 0xe60f, 'BRANCH IF IT IS'),
(0xe613, 0xe613, 'RESET THE ARRAY DISABLE FLAG'),
(0xe615, 0xe615, 'POINT X TO THE START OF VARIABLES'),
(0xe617, 0xe617, 'GET VARIABLE IN QUESTION'),
(0xe619, 0xe619, 'COMPARE X TO THE END OF VARIABLES'),
(0xe61b, 0xe61b, 'BRANCH IF END OF VARIABLES'),
(0xe61d, 0xe61d, '* COMPARE VARIABLE IN QUESTION TO CURRENT'),
(0xe620, 0xe620, '* VARIABLE AND BRANCH IF MATCH'),
(0xe622, 0xe622, '= MOVE POINTER TO NEXT VARIABLE AND'),
(0xe624, 0xe624, '= KEEP LOOKING'),
(0xe626, 0xe626, "* CARRY SET IF < 'A'"),
(0xe62a, 0xe62a, '='),
(0xe62c, 0xe62c, '5'),
(0xe62f, 0xe62f, 'POINT X TO ZERO LOCATION'),
(0xe632, 0xe632, 'GET CURRENT RETURN ADDRESS'),
(0xe634, 0xe634, "DID WE COME FROM 'EVALUATE ALPHA EXPR'?"),
(0xe638, 0xe638, 'YES - RETURN A ZERO VALUE'),
(0xe63a, 0xe63a, '* GET END OF ARRAYS ADDRESS AND'),
(0xe63c, 0xe63c, '* SAVE IT AT V43'),
(0xe63e, 0xe63e, '= ADD 7 TO END OF ARRAYS (EACH'),
(0xe641, 0xe641, '= VARIABLE = 7 BYTES) AND SAVE AT V41'),
(0xe643, 0xe643, '* GET END OF VARIABLES AND SAVE AT V47'),
(0xe647, 0xe647, 'MAKE A SEVEN BYTE SLOT FOR NEW VARIABLE AT'),
(0xe64a, 0xe64a, '= GET NEW END OF ARRAYS AND SAVE IT'),
(0xe64c, 0xe64c, '='),
(0xe64e, 0xe64e, '* GET NEW END OF VARIABLES AND SAVE IT'),
(0xe652, 0xe652, 'GET OLD END OF VARIABLES'),
(0xe654, 0xe654, 'GET NEW VARIABLE NAME'),
(0xe656, 0xe656, 'SAVE VARIABLE NAME'),
(0xe658, 0xe658, '* ZERO OUT THE FP VALUE OF THE NUMERIC'),
(0xe659, 0xe659, '* VARIABLE OR THE LENGTH AND ADDRESS'),
(0xe65a, 0xe65a, '* OF A STRING VARIABLE'),
(0xe660, 0xe660, 'STORE ADDRESS OF VARIABLE VALUE'),
(0xe663, 0xe663, '* FLOATING POINT -32768'),
(0xe668, 0xe668, 'GET AN INPUT CHARACTER FROM BASIC'),
(0xe66a, 0xe66a, 'GO EVALUATE NUMERIC EXPRESSION'),
(0xe66d, 0xe66d, 'GET FPA0 MANTISSA SIGN'),
(0xe66f, 0xe66f, "'FC' ERROR IF NEGATIVE NUMBER"),
(0xe671, 0xe671, "'TM' ERROR IF STRING VARIABLE"),
(0xe674, 0xe674, 'GET FPA0 EXPONENT'),
(0xe676, 0xe676, '* COMPARE TO 32768 - LARGEST INTEGER EXPONENT AND'),
(0xe678, 0xe678, '* BRANCH IF FPA0 < 32768'),
(0xe67a, 0xe67a, 'POINT X TO FP VALUE OF -32768'),
(0xe67d, 0xe67d, 'COMPARE -32768 TO FPA0'),
(0xe680, 0xe680, "'FC' ERROR IF NOT ="),
(0xe682, 0xe682, 'CONVERT FPA0 TO A TWO BYTE INTEGER'),
(0xe685, 0xe685, 'GET THE INTEGER'),
(0xe688, 0xe688, 'GET ARRAY FLAG AND VARIABLE TYPE'),
(0xe68a, 0xe68a, 'SAVE THEM ON STACK'),
(0xe68c, 0xe68c, 'DEAD SPACE CAUSED BY 1.2 REVISION'),
(0xe68d, 0xe68d, 'RESET DIMENSION COUNTER'),
(0xe68e, 0xe68e, 'GET VARIABLE NAME'),
(0xe690, 0xe690, 'SAVE VARIABLE NAME AND DIMENSION COUNTER'),
(0xe692, 0xe692, 'EVALUATE EXPRESSION (DIMENSlON LENGTH)'),
(0xe694, 0xe694, 'PULL OFF VARIABLE NAME, DIMENSlON COUNTER,'),
(0xe696, 0xe696, 'SAVE VARIABLE NAME AND VARIABLE TYPE'),
(0xe698, 0xe698, 'GET DIMENSION LENGTH'),
(0xe69a, 0xe69a, 'SAVE DIMENSION LENGTH, ARRAY FLAG, VARIABLE TYPE'),
(0xe69c, 0xe69c, 'INCREASE DIMENSION COUNTER'),
(0xe69d, 0xe69d, 'GET CURRENT INPUT CHARACTER'),
(0xe69f, 0xe69f, 'CHECK FOR ANOTHER DIMENSION'),
(0xe6a1, 0xe6a1, 'BRANCH IF MORE'),
(0xe6a3, 0xe6a3, 'SAVE DIMENSION COUNTER'),
(0xe6a5, 0xe6a5, "SYNTAX CHECK FOR A ')'"),
(0xe6a8, 0xe6a8, '* RESTORE VARIABLE TYPE AND ARRAY'),
(0xe6aa, 0xe6aa, '* FLAG - LEAVE DIMENSION LENGTH ON STACK'),
(0xe6ac, 0xe6ac, 'GET START OF ARRAYS'),
(0xe6ae, 0xe6ae, 'COMPARE TO END OF ARRAYS'),
(0xe6b0, 0xe6b0, 'BRANCH IF NO MATCH FOUND'),
(0xe6b2, 0xe6b2, 'GET VARIABLE IN QUESTION'),
(0xe6b4, 0xe6b4, 'COMPARE TO CURRENT VARIABLE'),
(0xe6b7, 0xe6b7, 'BRANCH IF ='),
(0xe6b9, 0xe6b9, 'GET OFFSET TO NEXT ARRAY VARIABLE'),
(0xe6bb, 0xe6bb, 'ADD TO CURRENT POINTER'),
(0xe6bd, 0xe6bd, 'KEEP SEARCHING'),
(0xe6bf, 0xe6bf, "'REDIMENSIONED ARRAY' ERROR"),
(0xe6c1, 0xe6c1, '* TEST ARRAY FLAG - IF <>0 YOU ARE TRYING'),
(0xe6c3, 0xe6c3, '* TO REDIMENSION AN ARRAY'),
(0xe6c5, 0xe6c5, 'GET NUMBER OF DIMENSIONS IN ARRAY'),
(0xe6c7, 0xe6c7, 'COMPARE TO THIS ARRAYS DIMENSIONS'),
(0xe6c9, 0xe6c9, 'BRANCH IF ='),
(0xe6cb, 0xe6cb, "'BAD SUBSCRIPT'"),
(0xe6cd, 0xe6cd, 'SKIP TWO BYTES'),
(0xe6ce, 0xe6ce, "'ILLEGAL FUNCTION CALL'"),
(0xe6d0, 0xe6d0, 'JUMP TO ERROR SERVICING ROUTINE'),
(0xe6d3, 0xe6d3, '* 5 BYTES/ARRAY ENTRY SAVE AT COEFPT'),
(0xe6d8, 0xe6d8, '= GET NAME OF ARRAY AND SAVE IN'),
(0xe6da, 0xe6da, '= FIRST 2 BYTES OF DESCRIPTOR'),
(0xe6dc, 0xe6dc, 'GET NUMBER OF DIMENSIONS AND SAVE IN'),
(0xe6de, 0xe6de, '* 5TH BYTE OF DESCRIPTOR'),
(0xe6e0, 0xe6e0, 'CHECK FOR ROOM FOR DESCRIPTOR IN FREE RAM'),
(0xe6e3, 0xe6e3, 'TEMPORARILY SAVE DESCRIPTOR ADDRESS'),
(0xe6e5, 0xe6e5, '* DEFAULT DIMENSION VALUE:X(10)'),
(0xe6e8, 0xe6e8, '= CHECK ARRAY FLAG AND BRANCH IF'),
(0xe6ea, 0xe6ea, '= NOT DIMENSIONING AN ARRAY'),
(0xe6ec, 0xe6ec, 'GET DIMENSION LENGTH'),
(0xe6ee, 0xe6ee, 'ADD ONE (X(0) HAS A LENGTH OF ONE)'),
(0xe6f1, 0xe6f1, 'SAVE LENGTH OF ARRAY DIMENSION'),
(0xe6f3, 0xe6f3, 'MULTIPLY ACCUM ARRAY SIZE NUMBER LENGTH'),
(0xe6f5, 0xe6f5, 'TEMP STORE NEW CURRENT ACCUMULATED ARRAY SIZE'),
(0xe6f7, 0xe6f7, 'BUMP POINTER UP TWO'),
(0xe6f9, 0xe6f9, '* DECREMENT DIMENSION COUNTER AND BRANCH IF'),
(0xe6fb, 0xe6fb, '* NOT DONE WITH ALL DIMENSIONS'),
(0xe6fd, 0xe6fd, 'SAVE ADDRESS OF (END OF ARRAY DESCRIPTOR - 5)'),
(0xe6ff, 0xe6ff, 'ADD TOTAL SIZE OF NEW ARRAY'),
(0xe701, 0xe701, "'OM' ERROR IF > $FFFF"),
(0xe705, 0xe705, 'SAVE END OF ARRAY IN X'),
(0xe707, 0xe707, 'MAKE SURE THERE IS ENOUGH FREE RAM FOR ARRAY'),
(0xe70a, 0xe70a, 'SUBTRACT OUT THE (STACK BUFFER - 5)'),
(0xe70d, 0xe70d, 'SAVE NEW END OF ARRAYS'),
(0xe70f, 0xe70f, 'ZERO = TERMINATOR BYTE'),
(0xe710, 0xe710, '* STORE TWO TERMINATOR BYTES AT'),
(0xe712, 0xe712, '* THE END OF THE ARRAY DESCRIPTOR'),
(0xe718, 0xe718, 'GET ADDRESS OF START OF DESCRIPTOR'),
(0xe71a, 0xe71a, 'GET MSB OF END OF ARRAYS; LSB ALREADY THERE'),
(0xe71c, 0xe71c, 'SUBTRACT OUT ADDRESS OF START OF DESCRIPTOR'),
(0xe71e, 0xe71e, 'SAVE LENGTH OF (ARRAY AND DESCRIPTOR)'),
(0xe720, 0xe720, '* GET ARRAY FLAG AND BRANCH'),
(0xe722, 0xe722, '* BACK IF DIMENSIONING'),
(0xe724, 0xe724, 'GET THE NUMBER OF DIMENSIONS'),
(0xe726, 0xe726, 'TEMPORARILY SAVE'),
(0xe728, 0xe728, '* INITIALIZE POINTER'),
(0xe729, 0xe729, '* TO ZERO'),
(0xe72a, 0xe72a, 'SAVE ACCUMULATED POINTER'),
(0xe72c, 0xe72c, '* PULL DIMENSION ARGUMENT OFF THE'),
(0xe72e, 0xe72e, '* STACK AND SAVE IT'),
(0xe730, 0xe730, "COMPARE TO STORED 'DIM' ARGUMENT"),
(0xe733, 0xe733, '\'BS\' ERROR IF > = "DIM" ARGUMENT'),
(0xe735, 0xe735, '* GET ACCUMULATED POINTER AND'),
(0xe737, 0xe737, '* BRANCH IF 1ST DIMENSION'),
(0xe739, 0xe739, '= MULTIPLY ACCUMULATED POINTER AND DIMENSION'),
(0xe73b, 0xe73b, '= LENGTH AND ADD TO CURRENT ARGUMENT'),
(0xe73d, 0xe73d, 'MOVE POINTER TO NEXT DIMENSION'),
(0xe73f, 0xe73f, '* DECREMENT DIMENSION COUNTER AND'),
(0xe741, 0xe741, '* BRANCH IF ANY DIMENSIONS LEFT'),
(0xe746, 0xe746, 'TIMES 2'),
(0xe748, 0xe748, 'TIMES 4'),
(0xe749, 0xe749, 'TIMES 5'),
(0xe74b, 0xe74b, 'ADD OFFSET TO START OF ARRAY'),
(0xe74d, 0xe74d, 'ADJUST POINTER FOR SIZE OF DESCRIPTOR'),
(0xe74f, 0xe74f, 'SAVE POINTER TO ARRAY VALUE'),
(0xe752, 0xe752, '16 SHIFTS TO DO A MULTIPLY'),
(0xe754, 0xe754, 'SHIFT COUNTER'),
(0xe756, 0xe756, '* GET SIZE OF DIMENSION'),
(0xe758, 0xe758, '* AND SAVE IT'),
(0xe75a, 0xe75a, '* ZERO'),
(0xe75b, 0xe75b, '* ACCD'),
(0xe75c, 0xe75c, '= SHIFT ACCB LEFT'),
(0xe75d, 0xe75d, '= ONE BIT'),
(0xe75e, 0xe75e, "BS' ERROR IF CARRY"),
(0xe760, 0xe760, '* SHIFT MULTIPLICAND LEFT ONE'),
(0xe762, 0xe762, '* BIT - ADD MULTIPLIER TO ACCUMULATOR'),
(0xe764, 0xe764, '* IF CARRY <> 0'),
(0xe766, 0xe766, 'ADD MULTIPLIER TO ACCD'),
(0xe768, 0xe768, "BS' ERROR IF CARRY (>$FFFF)"),
(0xe76a, 0xe76a, '* DECREMENT SHIFT COUNTER'),
(0xe76c, 0xe76c, '* IF NOT DONE'),
(0xe76f, 0xe76f, "BS' ERROR"),
(0xe772, 0xe772, 'PUT STACK POINTER INTO ACCD'),
(0xe774, 0xe774, 'SUBTRACT END OF ARRAYS'),
(0xe776, 0xe776, 'SKIP ONE BYTE'),
(0xe777, 0xe777, 'CLEAR MS BYTE OF ACCD'),
(0xe778, 0xe778, 'SET VARIABLE TYPE TO NUMERIC'),
(0xe77a, 0xe77a, 'SAVE ACCD IN TOP OF FACA'),
(0xe77c, 0xe77c, 'EXPONENT REQUIRED IF THE TOP TWO BYTES'),
(0xe77e, 0xe77e, 'CONVERT THE REST OF FPA0 TO AN INTEGER'),
(0xe781, 0xe781, "TM' ERROR IF STRING VARIABLE"),
(0xe784, 0xe784, '*CONVERT FP NUMBER TO ASCII STRING IN'),
(0xe787, 0xe787, '*THE STRING BUFFER'),
(0xe78a, 0xe78a, 'PURGE THE RETURN ADDRESS FROM THE STACK'),
(0xe78c, 0xe78c, '*POINT X TO STRING BUFFER AND SAVE'),
(0xe78f, 0xe78f, '*THE STRING IN THE STRING SPACE'),
(0xe791, 0xe791, 'SAVE X IN V4D'),
(0xe793, 0xe793, 'RESERVE ACCB BYTES IN STRING SPACE'),
(0xe795, 0xe795, 'SAVE NEW STRING ADDRESS'),
(0xe797, 0xe797, 'SAVE LENGTH OF RESERVED BLOCK'),
(0xe79a, 0xe79a, 'MOVE POINTER BACK ONE'),
(0xe79c, 0xe79c, '* INITIALIZE'),
(0xe79e, 0xe79e, '* TERMINATORS'),
(0xe7a0, 0xe7a0, '* TO "'),
(0xe7a2, 0xe7a2, 'MOVE POINTER UP ONE'),
(0xe7a4, 0xe7a4, 'TEMPORARILY SAVE START OF STRING'),
(0xe7a6, 0xe7a6, 'SAVE START OF STRING IN TEMP DESCRIPTOR'),
(0xe7a8, 0xe7a8, 'INITIALIZE CHARACTER COUNTER TO - 1'),
(0xe7aa, 0xe7aa, 'INCREMENT CHARACTER COUNTER'),
(0xe7ab, 0xe7ab, 'GET CHARACTER'),
(0xe7ad, 0xe7ad, 'BRANCH IF END OF LINE'),
(0xe7af, 0xe7af, '* CHECK FOR TERMINATORS'),
(0xe7b1, 0xe7b1, '* IN CHARAC AND ENDCHR'),
(0xe7b3, 0xe7b3, "* DON'T MOVE POINTER BACK"),
(0xe7b5, 0xe7b5, '* ONE IF TERMINATOR IS "MATCHED"'),
(0xe7b7, 0xe7b7, '= COMPARE CHARACTER TO STRING DELIMITER'),
(0xe7b9, 0xe7b9, "= & DON'T MOVE POINTER BACK IF SO"),
(0xe7bb, 0xe7bb, 'MOVE POINTER BACK ONE'),
(0xe7bd, 0xe7bd, 'SAVE END OF STRING ADDRESS'),
(0xe7bf, 0xe7bf, 'SAVE STRING LENGTH IN TEMP DESCRIPTOR'),
(0xe7c1, 0xe7c1, 'GET INITlAL STRING START'),
(0xe7c3, 0xe7c3, 'COMPARE TO START OF STRING BUFFER'),
(0xe7c7, 0xe7c7, 'BRANCH IF > START OF STRING BUFFER'),
(0xe7c9, 0xe7c9, 'GO RESERVE SPACE FOR THE STRING'),
(0xe7cb, 0xe7cb, 'POINT X TO THE BEGINNING OF THE STRING'),
(0xe7cd, 0xe7cd, 'MOVE (B) BYTES FROM (X) TO'),
(0xe7d0, 0xe7d0, 'GET NEXT AVAILABLE STRING STACK DESCRIPTOR'),
(0xe7d2, 0xe7d2, 'COMPARE TO TOP OF STRING DESCRIPTOR STACK - WAS #CFNBUF'),
(0xe7d5, 0xe7d5, 'FORMULA O.K.'),
(0xe7d7, 0xe7d7, "STRING FORMULA TOO COMPLEX' ERROR"),
(0xe7d9, 0xe7d9, 'JUMP TO ERROR SERVICING ROUTINE'),
(0xe7dc, 0xe7dc, '* GET LENGTH OF STRING AND SAVE IT'),
(0xe7de, 0xe7de, '0'),
(0xe7e0, 0xe7e0, '= GET START ADDRESS OF ACTUAL STRING'),
(0xe7e2, 0xe7e2, '= AND SAVE IN BYTES 2,3 OF DESCRIPTOR'),
(0xe7e4, 0xe7e4, '* VARIABLE TYPE = STRING'),
(0xe7e6, 0xe7e6, '* SAVE IN VARIABLE TYPE FLAG'),
(0xe7e8, 0xe7e8, '= SAVE START OF DESCRIPTOR'),
(0xe7ea, 0xe7ea, '= ADDRESS IN LASTPT AND FPA0'),
(0xe7ec, 0xe7ec, '5 BYTES/STRING DESCRIPTOR'),
(0xe7ee, 0xe7ee, 'NEXT AVAILABLE STRING VARIABLE DESCRIPTOR'),
(0xe7f1, 0xe7f1, 'CLEAR STRING REORGANIZATION FLAG'),
(0xe7f3, 0xe7f3, '* PUSH THE LENGTH OF THE'),
(0xe7f4, 0xe7f4, '* STRING ONTO THE STACK'),
(0xe7f6, 0xe7f6, 'GET START OF STRING VARIABLES'),
(0xe7f8, 0xe7f8, 'SUBTRACT STRING LENGTH'),
(0xe7fa, 0xe7fa, 'COMPARE TO START OF STRING STORAGE'),
(0xe7fd, 0xe7fd, 'IF BELOW START, THEN REORGANIZE'),
(0xe7ff, 0xe7ff, 'SAVE NEW START OF STRING VARIABLES'),
(0xe801, 0xe801, 'GET START OF STRING VARIABLES'),
(0xe803, 0xe803, 'ADD ONE'),
(0xe805, 0xe805, 'SAVE START ADDRESS OF NEWLY RESERVED SPACE'),
(0xe807, 0xe807, 'RESTORE NUMBER OF BYTES RESERVED AND RETURN'),
(0xe809, 0xe809, "OUT OF STRING SPACE' ERROR"),
(0xe80b, 0xe80b, 'TOGGLE REORGANIZATiON FLAG'),
(0xe80d, 0xe80d, 'ERROR IF FRESHLY REORGANIZED'),
(0xe80f, 0xe80f, 'GO REORGANIZE STRING SPACE'),
(0xe811, 0xe811, 'GET BACK THE NUMBER OF BYTES TO RESERVE'),
(0xe813, 0xe813, 'TRY TO RESERVE ACCB BYTES AGAIN'),
(0xe815, 0xe815, 'GET THE TOP OF STRING SPACE'),
(0xe817, 0xe817, 'SAVE TOP OF UNORGANIZED STRING SPACE'),
(0xe819, 0xe819, '* ZERO OUT ACCD'),
(0xe81a, 0xe81a, '* AND RESET VARIABLE'),
(0xe81b, 0xe81b, '* POINTER TO 0'),
(0xe81d, 0xe81d, 'POINT X TO START OF STRING SPACE'),
(0xe81f, 0xe81f, 'SAVE POINTER IN V47'),
(0xe821, 0xe821, 'POINT X TO START OF STRING DESCRIPTOR STACK'),
(0xe824, 0xe824, 'COMPARE TO ADDRESS OF NEXT AVAILABLE DESCRIPTOR'),
(0xe826, 0xe826, 'BRANCH IF TOP OF STRING STACK'),
(0xe828, 0xe828, 'CHECK FOR STRING IN UNORGANIZED STRING SPACE'),
(0xe82a, 0xe82a, 'KEEP CHECKING'),
(0xe82c, 0xe82c, 'GET THE END OF BASIC PROGRAM'),
(0xe82e, 0xe82e, 'COMPARE TO END OF VARIABLES'),
(0xe830, 0xe830, 'BRANCH IF AT TOP OF VARIABLES'),
(0xe832, 0xe832, 'CHECK FOR STRING IN UNORGANIZED STRING SPACE'),
(0xe834, 0xe834, 'KEEP CHECKING VARIABLES'),
(0xe836, 0xe836, 'SAVE ADDRESS OF THE END OF VARIABLES'),
(0xe838, 0xe838, 'GET CURRENT ARRAY POINTER'),
(0xe83a, 0xe83a, 'COMPARE TO THE END OF ARRAYS'),
(0xe83c, 0xe83c, 'BRANCH IF AT END OF ARRAYS'),
(0xe83e, 0xe83e, 'GET LENGTH OF ARRAY AND DESCRIPTOR'),
(0xe840, 0xe840, '* ADD TO CURRENT ARRAY POINTER'),
(0xe842, 0xe842, '* AND SAVE IT'),
(0xe844, 0xe844, 'GET 1ST CHARACTER OF VARIABLE NAME'),
(0xe846, 0xe846, 'BRANCH IF NUMERIC ARRAY'),
(0xe848, 0xe848, 'GET THE NUMBER OF DIMENSIONS IN THIS ARRAY'),
(0xe84a, 0xe84a, 'MULTIPLY BY 2'),
(0xe84b, 0xe84b, 'ADD FIVE BYTES (VARIABLE NAME, ARRAY'),
(0xe84d, 0xe84d, 'X NOW POINTS TO START OF ARRAY ELEMENTS'),
(0xe84e, 0xe84e, 'AT END OF THIS ARRAY?'),
(0xe850, 0xe850, 'YES - CHECK FOR ANOTHER'),
(0xe852, 0xe852, 'CHECK FOR STRING LOCATED IN'),
(0xe854, 0xe854, 'KEEP CHECKING ELEMENTS IN THIS ARRAY'),
(0xe856, 0xe856, 'GET F1RST BYTE OF VARIABLE NAME'),
(0xe858, 0xe858, 'MOVE POINTER TO DESCRIPTOR'),
(0xe85a, 0xe85a, 'BRANCH IF VARIABLE IS NUMERIC'),
(0xe85c, 0xe85c, 'GET THE LENGTH OF THE STRING'),
(0xe85e, 0xe85e, 'BRANCH IF NULL - NO STRING'),
(0xe860, 0xe860, 'GET STARTING ADDRESS OF THE STRING'),
(0xe862, 0xe862, 'COMPARE TO THE START OF STRING VARIABLES'),
(0xe865, 0xe865, 'BRANCH IF THIS STRING IS STORED IN'),
(0xe867, 0xe867, 'COMPARE TO START OF STRING SPACE'),
(0xe86a, 0xe86a, 'BRANCH IF NOT STORED IN THE STRING SPACE'),
(0xe86c, 0xe86c, 'SAVE VARIABLE POINTER IF STORED IN STRING SPACE'),
(0xe86e, 0xe86e, 'SAVE STRING STARTING ADDRESS'),
(0xe870, 0xe870, 'MOVE TO NEXT VARIABLE DESCRIPTOR'),
(0xe873, 0xe873, 'GET ADDRESS OF THE DESCRIPTOR FOR THE'),
(0xe875, 0xe875, 'BRANCH IF NONE FOUND AND REORGANIZATION DONE'),
(0xe877, 0xe877, 'CLEAR MS BYTE OF LENGTH'),
(0xe878, 0xe878, 'GET LENGTH OF STRING'),
(0xe87a, 0xe87a, 'SUBTRACT ONE'),
(0xe87b, 0xe87b, 'ADD LENGTH OF STRING TO ITS STARTING ADDRESS'),
(0xe87d, 0xe87d, 'SAVE AS MOVE STARTING ADDRESS'),
(0xe87f, 0xe87f, 'POINT X TO THE START OF ORGANIZED STRING VARIABLES'),
(0xe881, 0xe881, 'SAVE AS MOVE ENDING ADDRESS'),
(0xe883, 0xe883, 'MOVE STRING FROM CURRENT POSITION TO THE'),
(0xe886, 0xe886, 'POINT X TO STRING DESCRIPTOR'),
(0xe888, 0xe888, '* GET NEW STARTING ADDRESS OF STRING AND'),
(0xe88a, 0xe88a, '* SAVE IT IN DESCRIPTOR'),
(0xe88c, 0xe88c, 'GET NEW TOP OF UNORGANIZED STRING SPACE'),
(0xe88e, 0xe88e, 'MOVE POINTER BACK ONE'),
(0xe890, 0xe890, 'JUMP BACK AND REORGANIZE SOME MORE'),
(0xe893, 0xe893, '* GET DESCRIPTOR ADDRESS OF STRING A'),
(0xe895, 0xe895, '* AND SAVE IT ON THE STACK'),
(0xe897, 0xe897, 'GET DESCRIPTOR ADDRESS OF STRING B'),
(0xe89a, 0xe89a, "TM' ERROR IF NUMERIC VARIABLE"),
(0xe89d, 0xe89d, '* POINT X TO STRING A DESCRIPTOR'),
(0xe89f, 0xe89f, '* ADDRESS AND SAVE IT IN RESSGN'),
(0xe8a1, 0xe8a1, 'GET LENGTH OF STRING A'),
(0xe8a3, 0xe8a3, 'POINT X TO DESCRIPTOR OF STRING B'),
(0xe8a5, 0xe8a5, 'ADD LENGTH OF STRING B TO STR1NG A'),
(0xe8a7, 0xe8a7, 'BRANCH IF LENGTH < 256'),
(0xe8a9, 0xe8a9, "STRING TOO LONG' ERROR IF LENGTH > 255"),
(0xe8ab, 0xe8ab, 'JUMP TO ERROR SERVICING ROUTINE'),
(0xe8ae, 0xe8ae, 'RESERVE ROOM IN STRING SPACE FOR NEW STRING'),
(0xe8b1, 0xe8b1, 'GET DESCRIPTOR ADDRESS OF STRING A'),
(0xe8b3, 0xe8b3, 'GET LENGTH OF STRING A'),
(0xe8b5, 0xe8b5, 'MOVE STRING A INTO RESERVED BUFFER IN STRING SPACE'),
(0xe8b7, 0xe8b7, 'GET DESCRIPTOR ADDRESS OF STRING B'),
(0xe8b9, 0xe8b9, 'GET LENGTH AND ADDRESS OF STRING B'),
(0xe8bb, 0xe8bb, 'MOVE STRING B INTO REST OF RESERVED BUFFER'),
(0xe8bd, 0xe8bd, 'POINT X TO DESCRIPTOR OF STRING A'),
(0xe8bf, 0xe8bf, 'DELETE STRING A IF LAST STRING ON STRING STACK'),
(0xe8c1, 0xe8c1, 'PUT STRING DESCRIPTOR ON THE STRING STACK'),
(0xe8c4, 0xe8c4, 'BRANCH BACK TO EXPRESSION EVALUATION'),
(0xe8c7, 0xe8c7, 'POINT X TO SOURCE ADDRESS'),
(0xe8c9, 0xe8c9, 'POINT U TO DESTINATION ADDRESS'),
(0xe8cb, 0xe8cb, 'COMPENSATION FOR THE DECB BELOW'),
(0xe8cc, 0xe8cc, 'GO MOVE THE BYTES'),
(0xe8ce, 0xe8ce, '* GET A SOURCE BYTE AND MOVE IT'),
(0xe8d0, 0xe8d0, '* TO THE DESTINATION'),
(0xe8d2, 0xe8d2, 'DECREMENT BYTE COUNTER'),
(0xe8d3, 0xe8d3, 'BRANCH IF ALL BYTES NOT MOVED'),
(0xe8d5, 0xe8d5, 'SAVE ENDING ADDRESS IN FRESPC'),
(0xe8d8, 0xe8d8, "TM' ERROR IF VARIABLE TYPE = NUMERIC"),
(0xe8db, 0xe8db, 'GET ADDRESS OF SELECTED STRING DESCRIPTOR'),
(0xe8dd, 0xe8dd, 'GET LENGTH OF STRING'),
(0xe8df, 0xe8df, '* CHECK TO SEE IF THIS STRING DESCRIPTOR WAS'),
(0xe8e1, 0xe8e1, '* THE LAST ONE PUT ON THE STRING STACK AND'),
(0xe8e3, 0xe8e3, 'GET START ADDRESS OF STRING JUST REMOVED'),
(0xe8e5, 0xe8e5, 'MOVE POINTER DOWN ONE'),
(0xe8e7, 0xe8e7, 'COMPARE TO START OF STRING VARIABLES'),
(0xe8e9, 0xe8e9, 'BRANCH IF THIS STRING IS NOT AT THE BOTTOM'),
(0xe8eb, 0xe8eb, 'SAVE LENGTH; ACCA WAS CLEARED'),
(0xe8ed, 0xe8ed, '* ADD THE LENGTH OF THE JUST REMOVED STRING'),
(0xe8ef, 0xe8ef, '* TO THE START OF STRING VARIABLES - THIS WILL'),
(0xe8f1, 0xe8f1, 'RESTORE LENGTH'),
(0xe8f3, 0xe8f3, 'ADD ONE TO POINTER'),
(0xe8f6, 0xe8f6, '*POINT X TO ADDRESS OF STRING NOT'),
(0xe8f8, 0xe8f8, '*ON THE STRING STACK'),
(0xe8f9, 0xe8f9, '*COMPARE TO LAST USED DESCRIPTOR ADDRESS'),
(0xe8fb, 0xe8fb, '*ON THE STRING STACK, RETURN IF DESCRIPTOR'),
(0xe8fd, 0xe8fd, 'SAVE LAST USED DESCRIPTOR AS NEXT AVAILABLE'),
(0xe8ff, 0xe8ff, '* MOVE LAST USED DESCRIPTOR BACK 5 BYTES'),
(0xe901, 0xe901, '* AND SAVE AS THE LAST USED DESCRIPTOR ADDR'),
(0xe903, 0xe903, 'SET ZERO FLAG'),
(0xe905, 0xe905, 'POINT X TO PROPER STRING AND GET LENGTH'),
(0xe907, 0xe907, 'CONVERT ACCB TO FP NUMBER IN FPA0'),
(0xe90a, 0xe90a, 'GET LENGTH AND ADDRESS OF STRING'),
(0xe90c, 0xe90c, 'SET VARIABLE TYPE TO NUMERIC'),
(0xe90e, 0xe90e, 'SET FLAGS ACCORDING TO LENGTH'),
(0xe910, 0xe910, 'CONVERT FPA0 TO AN INTEGER IN ACCD'),
(0xe913, 0xe913, '* RESERVE ONE BYTE IN'),
(0xe915, 0xe915, '* THE STRING SPACE'),
(0xe918, 0xe918, 'GET ASCII STRING VALUE'),
(0xe91a, 0xe91a, 'SAVE RESERVED STRING DESCRIPTOR IN TEMP DESCRIPTOR'),
(0xe91d, 0xe91d, "SAVE THE STRING (IT'S ONLY ONE BYTE)"),
(0xe91f, 0xe91f, 'PURGE THE RETURN ADDRESS OFF OF THE STACK'),
(0xe921, 0xe921, 'PUT TEMP DESCRIPTOR DATA ONTO STRING STACK'),
(0xe924, 0xe924, 'PUT 1ST CHARACTER OF STRING INTO ACCB'),
(0xe926, 0xe926, 'CONVERT ACCB INTO FP NUMBER IN FPA0'),
(0xe928, 0xe928, 'POINT X TO STRING DESCRIPTOR'),
(0xe92a, 0xe92a, "FC' ERROR IF NULL STRING"),
(0xe92c, 0xe92c, 'GET FIRST BYTE OF STRING'),
(0xe92f, 0xe92f, 'GET ARGUMENTS FROM STACK'),
(0xe931, 0xe931, 'CLEAR STRING POINTER OFFSET - OFFSET = 0 FOR LEFT$'),
(0xe932, 0xe932, '* COMPARE LENGTH PARAMETER TO LENGTH OF'),
(0xe934, 0xe934, '* STRING AND BRANCH IF LENGTH OF STRING'),
(0xe936, 0xe936, 'USE LENGTH OF STRING OTHERWISE'),
(0xe938, 0xe938, 'CLEAR STRING POINTER OFFSET (0 FOR LEFT$)'),
(0xe939, 0xe939, 'PUSH PARAMETERS ONTO STACK'),
(0xe93b, 0xe93b, 'RESERVE ACCB BYTES IN THE STRING SPACE'),
(0xe93e, 0xe93e, 'POINT X TO STRING DESCRIPTOR'),
(0xe940, 0xe940, 'GET ADDRESS OF OLD STRING (X=ADDRESS)'),
(0xe942, 0xe942, '* PULL STRING POINTER OFFSET OFF OF THE STACK'),
(0xe944, 0xe944, '* AND ADD IT TO STRING ADDRESS'),
(0xe945, 0xe945, 'PULL LENGTH PARAMETER OFF OF THE STACK'),
(0xe947, 0xe947, 'MOVE ACCB BYTES FROM (X) TO [FRESPC]'),
(0xe94a, 0xe94a, 'PUT TEMP STRING DESCRIPTOR ONTO THE STRING STACK'),
(0xe94c, 0xe94c, 'GET ARGUMENTS FROM STACK'),
(0xe94e, 0xe94e, 'ACCA=LENGTH PARAMETER - LENGTH OF OLD STRING'),
(0xe950, 0xe950, 'NOW ACCA = LENGTH OF OLD STRING'),
(0xe951, 0xe951, 'PUT NEW STRING IN THE STRING SPACE'),
(0xe953, 0xe953, '* GET DEFAULT VALUE OF LENGTH AND'),
(0xe955, 0xe955, '* SAVE IT IN FPA0'),
(0xe957, 0xe957, 'GET CURRENT CHARACTER FROM BASIC'),
(0xe959, 0xe959, 'ARGUMENT DELIMITER?'),
(0xe95b, 0xe95b, 'YES - NO LENGTH PARAMETER GIVEN'),
(0xe95d, 0xe95d, 'SYNTAX CHECK FOR COMMA'),
(0xe960, 0xe960, 'EVALUATE NUMERIC EXPRESSION (LENGTH)'),
(0xe962, 0xe962, 'GET ARGUMENTS FROM STACK'),
(0xe964, 0xe964, "FC' ERROR IF NULL STRING"),
(0xe966, 0xe966, 'CLEAR LENGTH COUNTER (DEFAULT VALUE)'),
(0xe967, 0xe967, '*SUOTRACT ONE FROM POSITION PARAMETER (THESE'),
(0xe968, 0xe968, '*ROUTINES EXPECT 1ST POSITION TO BE ZERO, NOT ONE)'),
(0xe96a, 0xe96a, 'IF POSITION > LENGTH OF OLD STRING, THEN NEW'),
(0xe96c, 0xe96c, 'SAVE ABSOLUTE POSITION PARAMETER IN ACCB'),
(0xe96e, 0xe96e, 'ACCB=POSITION-LENGTH OF OLD STRING'),
(0xe970, 0xe970, 'NOW ACCB=LENGTH OF OLDSTRING-POSITION'),
(0xe971, 0xe971, '*IF THE AMOUNT OF OLD STRING TO THE RIGHT OF'),
(0xe973, 0xe973, '*POSITION IS <= THE LENGTH PARAMETER, BRANCH AND'),
(0xe975, 0xe975, 'GET LENGTH OF NEW STRING'),
(0xe977, 0xe977, 'PUT NEW STRING IN STRING SPACE'),
(0xe979, 0xe979, 'SYNTAX CHECK FOR A ")"'),
(0xe97c, 0xe97c, 'LOAD THE RETURN ADDRESS INTO U REGISTER'),
(0xe97e, 0xe97e, '* GET ADDRESS OF STRING AND'),
(0xe980, 0xe980, '* SAVE IT IN V4D'),
(0xe982, 0xe982, '= PUT LENGTH OF STRING IN'),
(0xe984, 0xe984, '= BOTH ACCA AND ACCB'),
(0xe986, 0xe986, 'REMOVE DESCRIPTOR AND RETURN ADDRESS FROM STACK'),
(0xe988, 0xe988, 'JUMP TO ADDRESS IN U REGISTER'),
(0xe98a, 0xe98a, "ILLEGAL FUNCTION CALL'"),
(0xe98d, 0xe98d, 'GET NEXT BASIC INPUT CHARACTER'),
(0xe98f, 0xe98f, 'EVALUATE A NUMERIC EXPRESSION'),
(0xe992, 0xe992, 'CONVERT FPA0 TO INTEGER IN ACCD'),
(0xe995, 0xe995, 'TEST MS BYTE OF INTEGER'),
(0xe996, 0xe996, "FC' ERROR IF EXPRESSION > 255"),
(0xe998, 0xe998, 'GET CURRENT INPUT CHARACTER FROM BASIC'),
(0xe99a, 0xe99a, 'POINT X TO STRING ADDRESS'),
(0xe99d, 0xe99d, 'IF NULL STRING SET FPA0'),
(0xe9a1, 0xe9a1, 'SAVE INPUT POINTER IN REGISTER U'),
(0xe9a3, 0xe9a3, 'POINT INPUT POINTER TO ADDRESS OF STRING'),
(0xe9a5, 0xe9a5, 'TO END OF STRING TERMINATOR'),
(0xe9a6, 0xe9a6, 'GET LAST BYTE OF STRING'),
(0xe9a8, 0xe9a8, 'SAVE INPUT POINTER, STRING TERMINATOR'),
(0xe9aa, 0xe9aa, 'CLEAR STRING TERMINATOR : FOR ASCII - FP CONVERSION'),
(0xe9ac, 0xe9ac, 'GET CURRENT CHARACTER FROM BASIC'),
(0xe9ae, 0xe9ae, 'CONVERT AN ASCII STRING TO FLOATING POINT'),
(0xe9b1, 0xe9b1, 'RESTORE CHARACTERS AND POINTERS'),
(0xe9b3, 0xe9b3, 'REPLACE STRING TERMINATOR'),
(0xe9b5, 0xe9b5, 'RESTORE INPUT CHARACTER'),
(0xe9b8, 0xe9b8, '* EVALUATE AN EXPRESSION, RETURN'),
(0xe9ba, 0xe9ba, '* THE VALUE IN X; STORE IT IN BINVAL'),
(0xe9bc, 0xe9bc, 'SYNTAX CHECK FOR A COMMA'),
(0xe9bf, 0xe9bf, 'EVALUATE EXPRESSION IN RANGE 0 <= X < 256'),
(0xe9c1, 0xe9c1, 'EVALUATE NUMERIC EXPRESSION'),
(0xe9c4, 0xe9c4, 'GET SIGN OF FPA0 MANTISSA'),
(0xe9c6, 0xe9c6, "ILLEGAL FUNCTION CALL' IF NEGATIVE"),
(0xe9c8, 0xe9c8, 'GET EXPONENT OF FPA0'),
(0xe9ca, 0xe9ca, 'COMPARE TO LARGEST POSITIVE INTEGER'),
(0xe9cc, 0xe9cc, "ILLEGAL FUNCTION CALL' IF TOO LARGE"),
(0xe9ce, 0xe9ce, 'SHIFT BINARY POINT TO EXTREME RIGHT OF FPA0'),
(0xe9d1, 0xe9d1, 'LOAD X WITH LOWER TWO BYTES OF FPA0'),
(0xe9d4, 0xe9d4, 'CONVERT FPA0 TO INTEGER IN REGISTER X'),
(0xe9d6, 0xe9d6, "GET THE VALUE BEING 'PEEK'ED"),
(0xe9d8, 0xe9d8, 'CONVERT ACCB INTO A FP NUMBER'),
(0xe9db, 0xe9db, 'EVALUATE 2 EXPRESSIONS'),
(0xe9dd, 0xe9dd, "GET THE ADDRESS TO BE 'POKE'ED"),
(0xe9df, 0xe9df, 'STORE THE DATA IN THAT ADDRESS'),
(0xe9e2, 0xe9e2, 'SAVE ZERO FLAG ON STACK'),
(0xe9e4, 0xe9e4, 'CONVERT DECIMAL LINE NUMBER TO BINARY'),
(0xe9e7, 0xe9e7, '* FIND RAM ADDRESS OF THAT LINE NUMBER AND'),
(0xe9ea, 0xe9ea, '* SAVE IT IN LSTTXT'),
(0xe9ec, 0xe9ec, 'GET ZERO FLAG FROM STACK'),
(0xe9ee, 0xe9ee, 'BRANCH IF END OF LINE'),
(0xe9f0, 0xe9f0, 'GET CURRENT CHARACTER FROM BASIC'),
(0xe9f2, 0xe9f2, 'BRANCH IF END OF LINE'),
(0xe9f4, 0xe9f4, 'MINUS TOKEN (IS IT A RANGE OF LINE NUMBERS?)'),
(0xe9f6, 0xe9f6, 'NO - RETURN'),
(0xe9f8, 0xe9f8, 'GET NEXT CHARACTER FROM BASIC'),
(0xe9fa, 0xe9fa, 'BRANCH IF END OF LINE'),
(0xe9fc, 0xe9fc, 'GET ENDING LINE NUMBER'),
(0xe9ff, 0xe9ff, 'BRANCH IF LEGAL LINE NUMBER'),
(0xea02, 0xea02, '* SET THE DEFAULT ENDING LINE NUMBER'),
(0xea05, 0xea05, '* TO $FFFF'),
(0xea07, 0xea07, 'PURGE RETURN ADDRESS FROM THE STACK'),
(0xea09, 0xea09, 'POINT X TO STARTING LINE ADDRESS'),
(0xea0b, 0xea0b, 'MOVE CURSOR TO START OF A NEW LINE'),
(0xea0e, 0xea0e, 'CHECK FOR A BREAK OR PAUSE'),
(0xea11, 0xea11, 'GET ADDRESS OF NEXT BASIC LINE'),
(0xea13, 0xea13, 'BRANCH IF NOT END OF PROGRAM'),
(0xea15, 0xea15, "RETURN TO BASIC'S MAIN INPUT LOOP"),
(0xea18, 0xea18, 'SAVE NEW STARTING LINE ADDRESS'),
(0xea1a, 0xea1a, '* GET THE LINE NUMBER OF THIS LINE AND'),
(0xea1c, 0xea1c, '* COMPARE IT TO ENDING LINE NUMBER'),
(0xea1f, 0xea1f, 'EXIT IF LINE NUMBER > ENDING LINE NUMBER'),
(0xea21, 0xea21, 'PRINT THE NUMBER IN ACCD ON SCREEN IN DECIMAL'),
(0xea24, 0xea24, 'SEND A SPACE TO CONSOLE OUT'),
(0xea27, 0xea27, 'GET RAM ADDRESS OF THIS LINE'),
(0xea29, 0xea29, 'UNCRUNCH A LINE'),
(0xea2b, 0xea2b, 'POINT X TO START OF NEXT LINE'),
(0xea2f, 0xea2f, 'POINT U TO BUFFER FULL OF UNCRUNCHED LINE'),
(0xea32, 0xea32, 'GET A BYTE FROM THE BUFFER'),
(0xea34, 0xea34, 'BRANCH IF END OF BUFFER'),
(0xea36, 0xea36, 'SEND CHARACTER TO CONSOLE OUT'),
(0xea39, 0xea39, 'GET ANOTHER CHARACTER'),
(0xea3b, 0xea3b, 'MOVE POINTER PAST ADDRESS OF NEXT LINE AND LINE NUMBER'),
(0xea3d, 0xea3d, 'UNCRUNCH LINE INTO LINE INPUT BUFFER'),
(0xea41, 0xea41, 'GET A CHARACTER'),
(0xea43, 0xea43, 'BRANCH IF END OF LINE'),
(0xea45, 0xea45, "BRANCH IF IT'S A TOKEN"),
(0xea47, 0xea47, 'CHECK FOR END OF SUB LINE'),
(0xea49, 0xea49, 'BRNCH IF NOT END OF SUB LINE'),
(0xea4b, 0xea4b, 'GET CHARACTER FOLLOWING COLON'),
(0xea4d, 0xea4d, 'TOKEN FOR ELSE?'),
(0xea4f, 0xea4f, "YES - DON'T PUT IT IN BUFFER"),
(0xea51, 0xea51, 'TOKEN FOR REMARK?'),
(0xea53, 0xea53, "YES - DON'T PUT IT IN BUFFER"),
(0xea55, 0xea55, 'SKIP TWO BYTES'),
(0xea56, 0xea56, 'EXCLAMATION POINT'),
(0xea58, 0xea58, 'PUT CHARACTER IN BUFFER'),
(0xea5a, 0xea5a, 'GET ANOTHER CHARACTER'),
(0xea5c, 0xea5c, 'FIRST DO COMMANDS'),
(0xea5f, 0xea5f, 'CHECK FOR SECONDARY TOKEN'),
(0xea61, 0xea61, 'BRANCH IF NON SECONDARY TOKEN'),
(0xea63, 0xea63, 'GET SECONDARY TOKEN'),
(0xea65, 0xea65, 'BUMP IT UP TO SECONDARY FUNCTIONS'),
(0xea67, 0xea67, 'MASK OFF BIT 7 OF TOKEN'),
(0xea69, 0xea69, 'MOVE TO NEXT COMMAND TABLE'),
(0xea6b, 0xea6b, 'IS THIS TABLE ENABLED?'),
(0xea6d, 0xea6d, 'NO - ILLEGAL TOKEN'),
(0xea6f, 0xea6f, 'SUBTRACT THE NUMBER OF TOKENS FROM THE CURRENT TOKEN NUMBER'),
(0xea71, 0xea71, 'BRANCH IF TOKEN NOT IN THIS TABLE'),
(0xea73, 0xea73, 'RESTORE TOKEN NUMBER RELATIVE TO THIS TABLE'),
(0xea75, 0xea75, 'POINT U TO COMMAND DICTIONARY TABLE'),
(0xea77, 0xea77, 'DECREMENT TOKEN NUMBER'),
(0xea78, 0xea78, 'BRANCH IF THIS IS THE CORRECT TOKEN'),
(0xea7a, 0xea7a, 'GRAB A BYTE'),
(0xea7c, 0xea7c, 'BRANCH IF BIT 7 NOT SET'),
(0xea7e, 0xea7e, 'GO SEE IF THIS IS THE CORRECT TOKEN'),
(0xea80, 0xea80, 'GET A CHARACTER FROM DICTIONARY TABLE'),
(0xea82, 0xea82, 'PUT CHARACTER IN BUFFER'),
(0xea84, 0xea84, 'CHECK FOR START OF NEXT TOKEN'),
(0xea86, 0xea86, 'BRANCH IF NOT DONE WITH THIS TOKEN'),
(0xea88, 0xea88, 'GO GET ANOTHER CHARACTER'),
(0xea8a, 0xea8a, 'TEST FOR END OF LINE INPUT BUFFER'),
(0xea8e, 0xea8e, 'BRANCH IF AT END OF BUFFER'),
(0xea90, 0xea90, 'MASK OFF BIT 7'),
(0xea92, 0xea92, '* SAVE CHARACTER IN BUFFER AND'),
(0xea94, 0xea94, '* CLEAR NEXT CHARACTER SLOT IN BUFFER'),
(0xea97, 0xea97, "GET BASIC'S INPUT POINTER ADDRESS"),
(0xea99, 0xea99, 'POINT X TO LINE INPUT BUFFER'),
(0xea9c, 0xea9c, 'CLEAR ILLEGAL TOKEN FLAG'),
(0xea9e, 0xea9e, 'CLEAR DATA FLAG'),
(0xeaa0, 0xeaa0, 'GET INPUT CHAR'),
(0xeaa2, 0xeaa2, 'BRANCH IF END OF LINE'),
(0xeaa4, 0xeaa4, '* CHECK ILLEGAL TOKEN FLAG & BRANCH IF NOT'),
(0xeaa6, 0xeaa6, '* PROCESSING AN ILLEGAL TOKEN'),
(0xeaa8, 0xeaa8, 'SET CARRY IF NOT UPPER CASE ALPHA'),
(0xeaab, 0xeaab, 'BRANCH IF UPPER CASE ALPHA'),
(0xeaad, 0xeaad, "* DON'T CRUNCH ASCII NUMERIC CHARACTERS"),
(0xeaaf, 0xeaaf, '* BRANCH IF NOT NUMERIC'),
(0xeab3, 0xeab3, '* BRANCH IF NUMERIC'),
(0xeab5, 0xeab5, 'CLEAR ILLEGAL TOKEN FLAG'),
(0xeab7, 0xeab7, 'SPACE?'),
(0xeab9, 0xeab9, 'DO NOT REMOVE SPACES'),
(0xeabb, 0xeabb, 'SAVE INPUT CHARACTER AS SCAN DELIMITER'),
(0xeabd, 0xeabd, 'CHECK FOR STRING DELIMITER'),
(0xeabf, 0xeabf, 'BRANCH IF STRING'),
(0xeac1, 0xeac1, '* CHECK DATA FLAG AND BRANCH IF CLEAR'),
(0xeac3, 0xeac3, '* DO NOT CRUNCH DATA'),
(0xeac5, 0xeac5, 'SAVE CHARACTER IN BUFFER'),
(0xeac7, 0xeac7, 'BRANCH IF END OF LINE'),
(0xeac9, 0xeac9, '* CHECK FOR END OF SUBLINE'),
(0xeacb, 0xeacb, '* AND RESET FLAGS IF END OF SUBLINE'),
(0xeacd, 0xeacd, 'GO GET ANOTHER CHARACTER'),
(0xeacf, 0xeacf, '* DOUBLE ZERO AT END OF LINE'),
(0xead3, 0xead3, 'SAVE ADDRESS OF END OF LINE IN ACCD'),
(0xead5, 0xead5, 'LENGTH OF LINE IN ACCD'),
(0xead8, 0xead8, '* SET THE INPUT POINTER TO ONE BEFORE'),
(0xeadb, 0xeadb, '* THE START OF THE CRUNCHED LINE'),
(0xeadd, 0xeadd, "CRUNCH'"),
(0xeade, 0xeade, 'CHECK FOR "?" - PRINT ABBREVIATION'),
(0xeae0, 0xeae0, 'BRANCH IF NOT PRINT ABBREVIATION'),
(0xeae2, 0xeae2, '* GET THE PRINT TOKEN AND SAVE IT'),
(0xeae4, 0xeae4, '* IN BUFFER'),
(0xeae6, 0xeae6, 'APOSTROPHE IS SAME AS REM'),
(0xeae8, 0xeae8, 'BRANCH IF NOT REMARK'),
(0xeaea, 0xeaea, 'COLON, REM TOKEN'),
(0xeaed, 0xeaed, 'SAVE IN BUFFER'),
(0xeaef, 0xeaef, 'SET DELIMITER = 0 (END OF LINE)'),
(0xeaf1, 0xeaf1, 'SCAN TILL WE MATCH [V42]'),
(0xeaf3, 0xeaf3, 'BRANCH IF END OF LINE'),
(0xeaf5, 0xeaf5, 'DELIMITER?'),
(0xeaf7, 0xeaf7, 'BRANCH OUT IF SO'),
(0xeaf9, 0xeaf9, "DON'T CRUNCH REMARKS OR STRINGS"),
(0xeafb, 0xeafb, 'GO GET MORE STRING OR REMARK'),
(0xeafd, 0xeafd, '* LESS THAN ASCII ZERO?'),
(0xeaff, 0xeaff, '* BRANCH IF SO'),
(0xeb01, 0xeb01, '= CHECK FOR NUMERIC VALUE, COLON OR SEMICOLON'),
(0xeb03, 0xeb03, '= AND INSERT IN BUFFER IF SO'),
(0xeb05, 0xeb05, 'MOVE INPUT POINTER BACK ONE'),
(0xeb07, 0xeb07, 'SAVE POINTERS TO INPUT STRING, OUTPUT STRING'),
(0xeb09, 0xeb09, 'TOKEN FLAG 0 = COMMAND, FF = SECONDARY'),
(0xeb0b, 0xeb0b, 'POINT U TO COMMAND INTERPRETATION'),
(0xeb0e, 0xeb0e, 'INITIALIZE V42 AS TOKEN COUNTER'),
(0xeb10, 0xeb10, 'MOVE TO NEXT COMMAND INTERPRETATION TABLE'),
(0xeb12, 0xeb12, 'GET NUMBER OF COMMANDS'),
(0xeb14, 0xeb14, 'GO DO SECONDARY FUNCTIONS IF NO COMMAND TABLE'),
(0xeb16, 0xeb16, 'POINT Y TO COMMAND DICTIONARY TABLE'),
(0xeb19, 0xeb19, 'GET POINTER TO INPUT STRING'),
(0xeb1b, 0xeb1b, 'GET A BYTE FROM DICTIONARY TABLE'),
(0xeb1d, 0xeb1d, 'SUBTRACT INPUT CHARACTER'),
(0xeb1f, 0xeb1f, 'LOOP IF SAME'),
(0xeb21, 0xeb21, 'LAST CHAR IN RESERVED WORD TABLE HAD'),
(0xeb23, 0xeb23, 'BRANCH IF NO MATCH - CHECK ANOTHER COMMAND'),
(0xeb25, 0xeb25, 'DELETE OLD INPUT POINTER FROM STACK'),
(0xeb27, 0xeb27, 'GET POINTER TO OUTPUT STRING'),
(0xeb29, 0xeb29, 'OR IN THE TABLE POSITION TO MAKE THE TOKEN'),
(0xeb2b, 0xeb2b, '* CHECK TOKEN FLAG AND BRANCH'),
(0xeb2d, 0xeb2d, '* IF SECONDARY'),
(0xeb2f, 0xeb2f, 'IS IT ELSE TOKEN?'),
(0xeb31, 0xeb31, 'NO'),
(0xeb33, 0xeb33, 'PUT A COLON (SUBLINE) BEFORE ELSE TOKEN'),
(0xeb35, 0xeb35, 'SECONDARY TOKENS PRECEEDED BY $FF'),
(0xeb37, 0xeb37, 'GO PROCESS MORE INPUT CHARACTERS'),
(0xeb39, 0xeb39, 'SAVE THIS TOKEN'),
(0xeb3b, 0xeb3b, 'DATA TOKEN?'),
(0xeb3d, 0xeb3d, 'NO'),
(0xeb3f, 0xeb3f, 'SET DATA FLAG'),
(0xeb41, 0xeb41, 'REM TOKEN?'),
(0xeb43, 0xeb43, 'YES'),
(0xeb45, 0xeb45, 'GO PROCESS MORE INPUT CHARACTERS'),
(0xeb47, 0xeb47, 'NOW DO SECONDARY FUNCTIONS'),
(0xeb4a, 0xeb4a, 'TOGGLE THE TOKEN FLAG'),
(0xeb4c, 0xeb4c, 'BRANCH IF NOW CHECKING SECONDARY COMMANDS'),
(0xeb4e, 0xeb4e, 'RESTORE INPUT AND OUTPUT POINTERS'),
(0xeb50, 0xeb50, '* MOVE THE FIRST CHARACTER OF AN'),
(0xeb52, 0xeb52, '* ILLEGAL TOKEN'),
(0xeb54, 0xeb54, 'SET CARRY IF NOT ALPHA'),
(0xeb57, 0xeb57, 'BRANCH IF NOT ALPHA'),
(0xeb59, 0xeb59, 'SET ILLEGAL TOKEN FLAG IF UPPER CASE ALPHA'),
(0xeb5b, 0xeb5b, 'PROCESS MORE INPUT CHARACTERS'),
(0xeb5d, 0xeb5d, 'INCREMENT TOKEN COUNTER'),
(0xeb5f, 0xeb5f, 'DECR COMMAND COUNTER'),
(0xeb60, 0xeb60, 'GET ANOTHER COMMAND TABLE IF DONE W/THIS ONE'),
(0xeb62, 0xeb62, 'MOVE POINTER BACK ONE'),
(0xeb64, 0xeb64, '* GET TO NEXT'),
(0xeb66, 0xeb66, '* RESERVED WORD'),
(0xeb68, 0xeb68, 'GO SEE IF THIS WORD IS A MATCH'),
(0xeb6a, 0xeb6a, 'BRANCH IF NO ARGUMENT'),
(0xeb6c, 0xeb6c, 'CHECK FOR ALL PRINT OPTIONS'),
(0xeb6f, 0xeb6f, 'CALL EXTENDED BASIC ADD-IN'),
(0xeb72, 0xeb72, 'RETURN IF END OF LINE'),
(0xeb74, 0xeb74, 'TOKEN FOR TAB( ?'),
(0xeb76, 0xeb76, 'YES'),
(0xeb78, 0xeb78, 'COMMA?'),
(0xeb7a, 0xeb7a, 'YES - ADVANCE TO NEXT TAB FIELD'),
(0xeb7c, 0xeb7c, 'SEMICOLON?'),
(0xeb7e, 0xeb7e, 'YES - DO NOT ADVANCE CURSOR'),
(0xeb80, 0xeb80, 'EVALUATE EXPRESSION'),
(0xeb83, 0xeb83, '* GET VARIABLE TYPE AND'),
(0xeb85, 0xeb85, '* SAVE IT ON THE STACK'),
(0xeb87, 0xeb87, 'BRANCH IF STRING VARIABLE'),
(0xeb89, 0xeb89, 'CONVERT FP NUMBER TO AN ASCII STRING'),
(0xeb8c, 0xeb8c, 'PARSE A STRING FROM (X-1) AND PUT'),
(0xeb8f, 0xeb8f, 'PRINT STRING POINTED TO BY X'),
(0xeb91, 0xeb91, 'GET VARIABLE TYPE BACK'),
(0xeb93, 0xeb93, 'SET UP TAB WIDTH ZONE, ETC'),
(0xeb96, 0xeb96, 'CHECK CURRENT PRINT POSITION'),
(0xeb97, 0xeb97, 'BRANCH IF NOT AT START OF LINE'),
(0xeb99, 0xeb99, 'GET CURRENT INPUT CHARACTER'),
(0xeb9b, 0xeb9b, 'COMMA?'),
(0xeb9d, 0xeb9d, 'SKIP TO NEXT TAB FIELD'),
(0xeb9f, 0xeb9f, 'SEND A SPACE TO CONSOLE OUT'),
(0xeba1, 0xeba1, 'GET CURRENT INPUT CHARACTER'),
(0xeba3, 0xeba3, 'BRANCH IF NOT END OF LINE'),
(0xeba5, 0xeba5, '* SEND A CR TO'),
(0xeba7, 0xeba7, '* CONSOLE OUT'),
(0xeba9, 0xeba9, 'SET UP TAB WIDTH, ZONE ETC'),
(0xebac, 0xebac, 'BRANCH IF WIDTH = ZERO'),
(0xebae, 0xebae, 'GET PRINT POSITION'),
(0xebb0, 0xebb0, 'BRANCH IF NOT AT START OF LINE'),
(0xebb3, 0xebb3, 'SET UP TAB WIDTH, ZONE ETC'),
(0xebb6, 0xebb6, 'BRANCH IF LINE WIDTH = 0 (CASSETTE)'),
(0xebb8, 0xebb8, 'GET CURRENT POSITION'),
(0xebba, 0xebba, 'COMPARE TO LAST TAB ZONE'),
(0xebbc, 0xebbc, 'BRANCH IF < LAST TAB ZONE'),
(0xebbe, 0xebbe, 'SEND A CARRIAGE RETURN TO CONSOLE OUT'),
(0xebc0, 0xebc0, 'GET MORE DATA'),
(0xebc4, 0xebc4, '* SUBTRACT TAB FIELD WIDTH FROM CURRENT'),
(0xebc6, 0xebc6, '* POSITION UNTIL CARRY SET - NEGATING THE'),
(0xebc8, 0xebc8, '* REMAINDER LEAVES THE NUMBER OF SPACES TO NEXT'),
(0xebc9, 0xebc9, 'GO ADVANCE TO NEXT TAB ZONE'),
(0xebcb, 0xebcb, 'EVALUATE EXPRESSION - RETURN VALUE IN B'),
(0xebce, 0xebce, "* 'SYNTAX' ERROR IF NOT ')'"),
(0xebd4, 0xebd4, 'SET UP TAB WIDTH, ZONE ETC'),
(0xebd7, 0xebd7, 'GET DIFFERENCE OF PRINT POSITION & TAB POSITION'),
(0xebd9, 0xebd9, 'BRANCH IF TAB POSITION < CURRENT POSITION'),
(0xebdb, 0xebdb, 'SEND A SPACE TO CONSOLE OUT'),
(0xebdd, 0xebdd, 'DECREMENT DIFFERENCE COUNT'),
(0xebde, 0xebde, 'BRANCH UNTIL CURRENT POSITION = TAB POSITION'),
(0xebe0, 0xebe0, 'GET NEXT CHARACTER FROM BASIC'),
(0xebe2, 0xebe2, 'LOOK FOR MORE PRINT DATA'),
(0xebe5, 0xebe5, 'PARSE A STRING FROM X AND PUT'),
(0xebe8, 0xebe8, 'GET LENGTH OF STRING AND REMOVE'),
(0xebeb, 0xebeb, 'COMPENSATE FOR DECB BELOW'),
(0xebec, 0xebec, 'DECREMENT COUNTER'),
(0xebed, 0xebed, 'EXIT ROUTINE'),
(0xebef, 0xebef, 'GET A CHARACTER FROM X'),
(0xebf1, 0xebf1, 'SEND TO CONSOLE OUT'),
(0xebf3, 0xebf3, 'KEEP LOOPING'),
(0xebf5, 0xebf5, 'SPACE TO CONSOLE OUT'),
(0xebf7, 0xebf7, 'SKIP NEXT TWO BYTES'),
(0xebf8, 0xebf8, 'QUESTION MARK TO CONSOLE OUT'),
(0xebfa, 0xebfa, 'JUMP TO CONSOLE OUT'),
(0xebfd, 0xebfd, 'FLOATING POINT CONSTANT (.5)'),
(0xec00, 0xec00, 'ADD .5 TO FPA0'),
(0xec02, 0xec02, 'COPY PACKED FP DATA FROM (X) TO FPA1'),
(0xec05, 0xec05, 'CHANGE MANTISSA SIGN OF FPA0'),
(0xec07, 0xec07, 'REVERSE RESULT SIGN FLAG'),
(0xec09, 0xec09, 'GO ADD FPA1 AND FPA0'),
(0xec0b, 0xec0b, 'UNPACK PACKED FP DATA FROM (X) TO'),
(0xec0e, 0xec0e, 'CHECK EXPONENT OF FPA0'),
(0xec0f, 0xec0f, 'COPY FPA1 TO FPA0 IF FPA0 ='),
(0xec13, 0xec13, 'POINT X TO FPA1'),
(0xec16, 0xec16, 'PUT EXPONENT OF FPA1 INTO ACCB'),
(0xec18, 0xec18, 'CHECK EXPONENT'),
(0xec19, 0xec19, 'RETURN IF EXPONENT = 0 (ADDING 0 TO FPA0)'),
(0xec1b, 0xec1b, 'SUBTRACT EXPONENT OF FPA0 FROM EXPONENT OF FPA1'),
(0xec1d, 0xec1d, 'BRANCH IF EXPONENTS ARE EQUAL'),
(0xec1f, 0xec1f, 'BRANCH IF EXPONENT FPA0 > FPA1'),
(0xec21, 0xec21, 'REPLACE FPA0 EXPONENT WITH FPA1 EXPONENT'),
(0xec23, 0xec23, '* REPLACE FPA0 MANTISSA SIGN'),
(0xec25, 0xec25, '* WITH FPA1 MANTISSA SIGN'),
(0xec27, 0xec27, 'POINT X TO FPA0'),
(0xec2a, 0xec2a, 'NEGATE DIFFERENCE OF EXPONENTS'),
(0xec2b, 0xec2b, 'TEST DIFFERENCE OF EXPONENTS'),
(0xec2d, 0xec2d, 'BRANCH IF DIFFERENCE OF EXPONENTS <= 8'),
(0xec2f, 0xec2f, 'CLEAR OVERFLOW BYTE'),
(0xec30, 0xec30, 'SHIFT MS BYTE OF MANTISSA; BIT 7 = 0'),
(0xec32, 0xec32, 'GO SHIFT MANTISSA OF (X) TO THE RIGHT (B) TIMES'),
(0xec35, 0xec35, 'GET SIGN FLAG'),
(0xec37, 0xec37, 'BRANCH IF FPA0 AND FPA1 SIGNS ARE THE SAME'),
(0xec39, 0xec39, '* COMPLEMENT MANTISSA POINTED'),
(0xec3b, 0xec3b, '* TO BY (X) THE'),
(0xec3d, 0xec3d, '* ADCA BELOW WILL'),
(0xec3f, 0xec3f, '* CONVERT THIS OPERATION'),
(0xec41, 0xec41, '* INTO A NEG (MANTISSA)'),
(0xec42, 0xec42, 'ADD ONE TO ACCA - COMA ALWAYS SETS THE CARRY FLAG'),
(0xec44, 0xec44, 'SAVE FPA SUB BYTE'),
(0xec46, 0xec46, '* ADD LS BYTE'),
(0xec48, 0xec48, '* OF MANTISSA'),
(0xec4a, 0xec4a, 'SAVE IN FPA0 LSB'),
(0xec4c, 0xec4c, '* ADD NEXT BYTE'),
(0xec4e, 0xec4e, '* OF MANTISSA'),
(0xec50, 0xec50, 'SAVE IN FPA0'),
(0xec52, 0xec52, '* ADD NEXT BYTE'),
(0xec54, 0xec54, '* OF MANTISSA'),
(0xec56, 0xec56, 'SAVE IN FPA0'),
(0xec58, 0xec58, '* ADD MS BYTE'),
(0xec5a, 0xec5a, '* OF MANTISSA'),
(0xec5c, 0xec5c, 'SAVE IN FPA0'),
(0xec5e, 0xec5e, 'FLAG'),
(0xec5f, 0xec5f, 'BRANCH IF FPA0 & FPA1 SIGNS WERE ALIKE'),
(0xec61, 0xec61, 'BRANCH IF POSITIVE MANTISSA'),
(0xec63, 0xec63, 'NEGATE FPA0 MANTISSA'),
(0xec65, 0xec65, 'CLEAR TEMPORARY EXPONENT ACCUMULATOR'),
(0xec66, 0xec66, 'TEST MSB OF MANTISSA'),
(0xec68, 0xec68, 'BRANCH IF <> 0'),
(0xec6a, 0xec6a, '* IF THE MSB IS'),
(0xec6c, 0xec6c, '* 0, THEN SHIFT THE'),
(0xec6e, 0xec6e, '* MANTISSA A WHOLE BYTE'),
(0xec70, 0xec70, '* AT A TIME. THIS'),
(0xec72, 0xec72, '* IS FASTER THAN ONE'),
(0xec74, 0xec74, '* BIT AT A TIME'),
(0xec76, 0xec76, '* BUT USES MORE MEMORY.'),
(0xec78, 0xec78, '* FPSBYT, THE CARRY IN'),
(0xec7a, 0xec7a, '* BYTE, REPLACES THE MATISSA LSB.'),
(0xec7c, 0xec7c, 'SHIFTING ONE BYTE = 8 BIT SHIFTS; ADD 8 TO EXPONENT'),
(0xec7e, 0xec7e, 'CHECK FOR 5 SHIFTS'),
(0xec80, 0xec80, 'BRANCH IF < 5 SHIFTS, IF > 5, THEN MANTISSA = 0'),
(0xec82, 0xec82, 'A ZERO EXPONENT = 0 FLOATING POINT'),
(0xec83, 0xec83, 'ZERO OUT THE EXPONENT'),
(0xec85, 0xec85, 'ZERO OUT THE MANTISSA SIGN'),
(0xec88, 0xec88, 'SHIFT FPA0 MANTISSA TO RIGHT'),
(0xec8a, 0xec8a, 'CLEAR CARRY FLAG'),
(0xec8d, 0xec8d, 'ADD ONE TO EXPONENT ACCUMULATOR'),
(0xec8e, 0xec8e, 'SHIFT SUB BYTE ONE LEFT'),
(0xec90, 0xec90, 'SHIFT LS BYTE'),
(0xec92, 0xec92, 'SHIFT NS BYTE'),
(0xec94, 0xec94, 'SHIFT NS BYTE'),
(0xec96, 0xec96, 'SHIFT MS BYTE'),
(0xec98, 0xec98, 'BRANCH IF NOT YET NORMALIZED'),
(0xec9a, 0xec9a, 'GET CURRENT EXPONENT'),
(0xec9c, 0xec9c, 'SAVE EXPONENT MODIFIER CAUSED BY NORMALIZATION'),
(0xec9e, 0xec9e, 'SUBTRACT ACCUMULATED EXPONENT MODIFIER'),
(0xeca0, 0xeca0, 'SAVE AS NEW EXPONENT'),
(0xeca2, 0xeca2, 'SET FPA0 = 0 IF THE NORMALIZATION CAUSED'),
(0xeca4, 0xeca4, 'SKIP 2 BYTES'),
(0xeca5, 0xeca5, 'BRANCH IF MANTISSA OVERFLOW'),
(0xeca7, 0xeca7, 'SUB BYTE BIT 7 TO CARRY - USE AS ROUND-OFF'),
(0xeca9, 0xeca9, 'CLRA, BUT DO NOT CHANGE CARRY FLAG'),
(0xecab, 0xecab, 'CLEAR THE SUB BYTE'),
(0xecad, 0xecad, 'GO ROUND-OFF RESULT'),
(0xecaf, 0xecaf, 'INCREMENT EXPONENT - MULTIPLY BY 2'),
(0xecb1, 0xecb1, 'OVERFLOW ERROR IF CARRY PAST $FF'),
(0xecb3, 0xecb3, '* SHIFT MANTISSA'),
(0xecb5, 0xecb5, '* ONE TO'),
(0xecb7, 0xecb7, '* THE RIGHT -'),
(0xecb9, 0xecb9, '* DIVIDE BY TWO'),
(0xecbb, 0xecbb, 'BRANCH IF NO ROUND-OFF NEEDED'),
(0xecbd, 0xecbd, 'ADD ONE TO MANTISSA - ROUND OFF'),
(0xecbf, 0xecbf, 'BRANCH iF OVERFLOW - MANTISSA = 0'),
(0xecc2, 0xecc2, 'TOGGLE SIGN OF MANTISSA'),
(0xecc4, 0xecc4, '* COMPLEMENT ALL 4 MANTISSA BYTES'),
(0xeccc, 0xeccc, '* GET BOTTOM 2 MANTISSA'),
(0xecce, 0xecce, '* BYTES, ADD ONE TO'),
(0xecd0, 0xecd0, '* THEM AND SAVE THEM'),
(0xecd2, 0xecd2, 'BRANCH IF NO OVERFLOW'),
(0xecd4, 0xecd4, '* IF OVERFLOW ADD ONE'),
(0xecd6, 0xecd6, '* TO TOP 2 MANTISSA'),
(0xecd8, 0xecd8, '* BYTES AND SAVE THEM'),
(0xecdb, 0xecdb, "OV' OVERFLOW ERROR"),
(0xecdd, 0xecdd, 'PROCESS AN ERROR'),
(0xece0, 0xece0, 'POINT X TO FPA2'),
(0xece3, 0xece3, 'GET LS BYTE OF MANTISSA (X)'),
(0xece5, 0xece5, 'SAVE IN FPA SUB BYTE'),
(0xece7, 0xece7, '* SHIFT THE NEXT THREE BYTES OF THE'),
(0xece9, 0xece9, '* MANTISSA RIGHT ONE COMPLETE BYTE.'),
(0xecf3, 0xecf3, 'GET THE CARRY IN BYTE'),
(0xecf5, 0xecf5, 'STORE AS THE MS MANTISSA BYTE OF (X)'),
(0xecf7, 0xecf7, 'ADD 8 TO DIFFERENCE OF EXPONENTS'),
(0xecf9, 0xecf9, 'BRANCH IF EXPONENT DIFFERENCE < -8'),
(0xecfb, 0xecfb, 'GET FPA SUB BYTE'),
(0xecfd, 0xecfd, 'CAST OUT THE 8 ADDED IN ABOVE'),
(0xecff, 0xecff, 'BRANCH IF EXPONENT DIFFERENCE = 0'),
(0xed01, 0xed01, '* SHIFT MANTISSA AND SUB BYTE ONE BIT TO THE RIGHT'),
(0xed0a, 0xed0a, 'ADD ONE TO EXPONENT DIFFERENCE'),
(0xed0b, 0xed0b, 'BRANCH IF EXPONENTS NOT ='),
(0xed0e, 0xed0e, 'FLOATING POINT CONSTANT 1.0'),
(0xed13, 0xed13, 'MOVE PACKED FPA FROM (X) TO FPA1'),
(0xed15, 0xed15, 'BRANCH IF EXPONENT OF FPA0 = 0'),
(0xed17, 0xed17, 'CALCULATE EXPONENT OF PRODUCT'),
(0xed19, 0xed19, '* ZERO OUT MANTISSA OF FPA2'),
(0xed23, 0xed23, 'GET LS BYTE OF FPA0'),
(0xed25, 0xed25, 'MULTIPLY BY FPA1'),
(0xed27, 0xed27, '* TEMPORARILY SAVE SUB BYTE 4'),
(0xed2b, 0xed2b, 'GET NUMBER 3 MANTISSA BYTE OF FPA0'),
(0xed2d, 0xed2d, 'MULTIPLY BY FPA1'),
(0xed2f, 0xed2f, '* TEMPORARILY SAVE SUB BYTE 3'),
(0xed33, 0xed33, 'GET NUMBER 2 MANTISSA BYTE OF FPA0'),
(0xed35, 0xed35, 'MULTIPLY BY FPA1'),
(0xed37, 0xed37, '* TEMPORARILY SAVE SUB BYTE 2'),
(0xed3b, 0xed3b, 'GET MS BYTE OF FPA0 MANTISSA'),
(0xed3d, 0xed3d, 'MULTIPLY BY FPA1'),
(0xed3f, 0xed3f, '* TEMPORARILY SAVE SUB BYTE 1'),
(0xed43, 0xed43, 'COPY MANTISSA FROM FPA2 TO FPA0'),
(0xed46, 0xed46, 'NORMALIZE FPA0'),
(0xed49, 0xed49, 'SHIFT FPA2 ONE BYTE TO RIGHT'),
(0xed4b, 0xed4b, 'SET CARRY FLAG'),
(0xed4c, 0xed4c, 'GET FPA2 MS BYTE'),
(0xed4e, 0xed4e, 'CARRY FLAG INTO SHIFT COUNTER;'),
(0xed4f, 0xed4f, 'BRANCH WHEN 8 SHIFTS DONE'),
(0xed51, 0xed51, 'DO NOT ADD FPA1 IF DATA BIT = 0'),
(0xed53, 0xed53, '* ADD MANTISSA LS BYTE'),
(0xed59, 0xed59, '= ADD MANTISSA NUMBER 3 BYTE'),
(0xed5b, 0xed5b, '='),
(0xed5d, 0xed5d, '='),
(0xed5f, 0xed5f, '* ADD MANTISSA NUMBER 2 BYTE'),
(0xed65, 0xed65, '= ADD MANTISSA MS BYTE'),
(0xed67, 0xed67, '='),
(0xed69, 0xed69, 'CARRY INTO MS BYTE'),
(0xed6c, 0xed6c, '= ROTATE FPA2 ONE BIT TO THE RIGHT'),
(0xed6e, 0xed6e, '='),
(0xed70, 0xed70, '='),
(0xed72, 0xed72, '='),
(0xed74, 0xed74, 'CLEAR CARRY FLAG'),
(0xed75, 0xed75, 'KEEP LOOPING'),
(0xed78, 0xed78, 'GET TWO MSB BYTES OF MANTISSA FROM'),
(0xed7a, 0xed7a, 'SAVE PACKED MANTISSA SIGN BYTE'),
(0xed7c, 0xed7c, 'FORCE BIT 7 OF MSB MANTISSA = 1'),
(0xed7e, 0xed7e, 'SAVE 2 MSB BYTES IN FPA1'),
(0xed80, 0xed80, '* GET PACKED MANTISSA SIGN BYTE. EOR W/FPA0'),
(0xed82, 0xed82, '* SIGN - NEW SIGN POSITION IF BOTH OLD SIGNS ALIKE,'),
(0xed84, 0xed84, '* NEG IF BOTH OLD SIGNS DIFF. SAVE ADJUSTED'),
(0xed86, 0xed86, '= GET 2 LSB BYTES OF MANTISSA'),
(0xed88, 0xed88, '= AND PUT IN FPA1'),
(0xed8a, 0xed8a, '* GET EXPONENT FROM (X) AND'),
(0xed8c, 0xed8c, '* PUT IN EXPONENT OF FPA1'),
(0xed8e, 0xed8e, 'GET EXPONENT OF FPA0'),
(0xed91, 0xed91, 'TEST EXPONENT OF FPA1'),
(0xed92, 0xed92, 'PURGE RETURN ADDRESS & SET FPA0 = 0'),
(0xed94, 0xed94, 'ADD FPA1 EXPONENT TO FPA0 EXPONENT'),
(0xed96, 0xed96, 'ROTATE CARRY INTO BIT 7; BIT 0 INTO CARRY'),
(0xed97, 0xed97, 'SET OVERFLOW FLAG'),
(0xed98, 0xed98, 'BRANCH IF EXPONENT TOO LARGE OR SMALL'),
(0xed9a, 0xed9a, 'ADD $80 BIAS TO EXPONENT'),
(0xed9c, 0xed9c, 'SAVE NEW EXPONENT'),
(0xed9e, 0xed9e, 'SET FPA0'),
(0xeda0, 0xeda0, 'GET MANTISSA SIGN'),
(0xeda2, 0xeda2, 'SAVE AS MANTISSA SIGN OF FPA0'),
(0xeda5, 0xeda5, 'GET MANTISSA SIGN OF FPA0'),
(0xeda7, 0xeda7, 'CHANGE SIGN OF FPA0 MANTISSA'),
(0xedaa, 0xedaa, 'PURGE RETURN ADDRESS FROM STACK'),
(0xedac, 0xedac, 'ZERO FPA0 MANTISSA SIGN & EXPONENT'),
(0xedb0, 0xedb0, "OV' OVERFLOW ERROR"),
(0xedb3, 0xedb3, 'TRANSFER FPA0 TO FPA1'),
(0xedb6, 0xedb6, 'BRANCH IF EXPONENT = 0'),
(0xedb8, 0xedb8, 'ADD 2 TO EXPONENT (TIMES 4)'),
(0xedba, 0xedba, "OV' ERROR IF EXPONENT > $FF"),
(0xedbc, 0xedbc, 'CLEAR RESULT SIGN BYTE'),
(0xedbe, 0xedbe, 'ADD FPA1 TO FPA0 (TIMES 5)'),
(0xedc1, 0xedc1, 'ADD ONE TO EXPONENT (TIMES 10)'),
(0xedc3, 0xedc3, "OV' ERROR IF EXPONENT > $FF"),
(0xedc6, 0xedc6, 'FLOATING POINT CONSTANT 10'),
(0xedcb, 0xedcb, 'MOVE FPA0 TO FPA1'),
(0xedce, 0xedce, 'POINT TO FLOATING POINT CONSTANT 10'),
(0xedd1, 0xedd1, 'ZERO MANTISSA SIGN BYTE'),
(0xedd2, 0xedd2, 'STORE THE QUOTIENT MANTISSA SIGN BYTE'),
(0xedd4, 0xedd4, 'UNPACK AN FP NUMBER FROM (X) INTO FPA0'),
(0xedd7, 0xedd7, 'SKIP TWO BYTES'),
(0xedd8, 0xedd8, 'GET FP NUMBER FROM (X) TO FPA1'),
(0xedda, 0xedda, "/0' DIVIDE BY ZERO ERROR"),
(0xeddc, 0xeddc, 'GET EXPONENT OF RECIPROCAL OF DIVISOR'),
(0xedde, 0xedde, 'CALCULATE EXPONENT OF QUOTIENT'),
(0xede0, 0xede0, 'INCREMENT EXPONENT'),
(0xede2, 0xede2, "OV' OVERFLOW ERROR"),
(0xede4, 0xede4, 'POINT X TO MANTISSA OF FPA2 - HOLD'),
(0xede7, 0xede7, '5 BYTE DIVIDE'),
(0xede9, 0xede9, 'SAVE BYTE COUNTER'),
(0xedeb, 0xedeb, 'SHIFT COUNTER-AND TEMPORARY QUOTIENT BYTE'),
(0xeded, 0xeded, '* COMPARE THE TWO MS BYTES'),
(0xedef, 0xedef, '* OF FPA0 AND FPA1 AND'),
(0xedf1, 0xedf1, '* BRANCH IF <>'),
(0xedf3, 0xedf3, '= COMPARE THE NUMBER 2'),
(0xedf5, 0xedf5, '= BYTES AND'),
(0xedf7, 0xedf7, '= BRANCH IF <>'),
(0xedf9, 0xedf9, '* COMPARE THE NUMBER 3'),
(0xedfb, 0xedfb, '* BYTES AND'),
(0xedfd, 0xedfd, '* BRANCH IF <>'),
(0xedff, 0xedff, '= COMPARE THE LS BYTES'),
(0xee01, 0xee01, '= AND BRANCH'),
(0xee03, 0xee03, '= IF <>'),
(0xee05, 0xee05, 'SET CARRY FLAG IF FPA0 = FPA1'),
(0xee06, 0xee06, 'SAVE CARRY FLAG STATUS IN ACCA; CARRY'),
(0xee08, 0xee08, 'ROTATE CARRY INTO TEMPORARY QUOTIENT BYTE'),
(0xee09, 0xee09, 'CARRY WILL BE SET AFTER 8 SHIFTS'),
(0xee0b, 0xee0b, 'SAVE TEMPORARY QUOTIENT'),
(0xee0d, 0xee0d, 'DECREMENT BYTE COUNTER'),
(0xee0f, 0xee0f, 'BRANCH IF DONE'),
(0xee11, 0xee11, 'BRANCH IF LAST BYTE'),
(0xee13, 0xee13, 'RESET SHIFT COUNTER AND TEMPORARY QUOTIENT BYTE'),
(0xee15, 0xee15, 'RESTORE CARRY FLAG AND'),
(0xee17, 0xee17, 'BRANCH IF FPA0 =< FPA1'),
(0xee19, 0xee19, '* SHIFT FPA1 MANTISSA 1 BIT TO LEFT'),
(0xee21, 0xee21, 'BRANCH IF CARRY - ADD ONE TO PARTIAL QUOTIENT'),
(0xee23, 0xee23, 'IF MSB OF HIGH ORDER MANTISSA BYTE IS'),
(0xee25, 0xee25, 'CARRY IS CLEAR, CHECK ANOTHER BIT'),
(0xee27, 0xee27, '* SUBTRACT THE LS BYTES OF MANTISSA'),
(0xee2d, 0xee2d, '= THEN THE NEXT BYTE'),
(0xee2f, 0xee2f, '='),
(0xee31, 0xee31, '='),
(0xee33, 0xee33, '* AND THE NEXT'),
(0xee39, 0xee39, '= AND FINALLY, THE MS BYTE OF MANTISSA'),
(0xee3b, 0xee3b, '='),
(0xee3d, 0xee3d, '='),
(0xee3f, 0xee3f, 'GO SHIFT FPA1'),
(0xee41, 0xee41, 'USE ONLY TWO BITS OF THE LAST BYTE (FIFTH)'),
(0xee43, 0xee43, 'GO SHIFT THE LAST BYTE'),
(0xee45, 0xee45, 'CARRY (ALWAYS SET HERE) INTO'),
(0xee46, 0xee46, 'AND MOVE'),
(0xee47, 0xee47, '1,0 TO BITS 7,6'),
(0xee48, 0xee48, 'SAVE SUB BYTE'),
(0xee4a, 0xee4a, 'MOVE MANTISSA OF FPA2 TO FPA0'),
(0xee4c, 0xee4c, 'NORMALIZE FPA0'),
(0xee4f, 0xee4f, "/0' ERROR"),
(0xee51, 0xee51, 'PROCESS THE ERROR'),
(0xee54, 0xee54, '* MOVE TOP 2 BYTES'),
(0xee58, 0xee58, '= MOVE BOTTOM 2 BYTES'),
(0xee5a, 0xee5a, '='),
(0xee5d, 0xee5d, 'SAVE ACCA'),
(0xee5f, 0xee5f, 'GET TOP TWO MANTISSA BYTES'),
(0xee61, 0xee61, 'SAVE MS BYTE OF MANTISSA AS MANTISSA SIGN'),
(0xee63, 0xee63, 'UNPACK MS BYTE'),
(0xee65, 0xee65, 'SAVE UNPACKED TOP 2 MANTISSA BYTES'),
(0xee67, 0xee67, 'CLEAR MANTISSA SUB BYTE'),
(0xee69, 0xee69, 'GET EXPONENT TO ACCB'),
(0xee6b, 0xee6b, '* MOVE LAST 2'),
(0xee6d, 0xee6d, '* MANTISSA BYTES'),
(0xee6f, 0xee6f, 'SAVE EXPONENT'),
(0xee71, 0xee71, 'RESTORE ACCA AND RETURN'),
(0xee73, 0xee73, 'POINT X TO MANTISSA OF FPA4'),
(0xee76, 0xee76, 'MOVE FPA0 TO FPA4'),
(0xee78, 0xee78, 'POINT X TO MANTISSA OF FPA3'),
(0xee7b, 0xee7b, 'SKIP TWO BYTES'),
(0xee7c, 0xee7c, 'POINT X TO VARIABLE DESCRIPTOR IN VARDES'),
(0xee7e, 0xee7e, '* COPY EXPONENT'),
(0xee82, 0xee82, 'GET MANTISSA SIGN BIT'),
(0xee84, 0xee84, 'MASK THE BOTTOM 7 BITS'),
(0xee86, 0xee86, 'AND BIT 7 OF MANTISSA SIGN INTO BIT 7 OF MS BYTE'),
(0xee88, 0xee88, 'SAVE MS BYTE'),
(0xee8a, 0xee8a, '* MOVE 2ND MANTISSA BYTE'),
(0xee8e, 0xee8e, '= MOVE BOTTOM 2 MANTISSA BYTES'),
(0xee90, 0xee90, '='),
(0xee93, 0xee93, '* COPY MANTISSA SIGN FROM'),
(0xee95, 0xee95, '* FPA1 TO FPA0'),
(0xee97, 0xee97, '= COPY EXPONENT + MS BYTE FROM'),
(0xee99, 0xee99, '= FPA1 TO FPA0'),
(0xee9b, 0xee9b, 'CLEAR MANTISSA SUB BYTE'),
(0xee9d, 0xee9d, '* COPY 2ND MANTISSA BYTE'),
(0xee9f, 0xee9f, '* FROM FPA1 TO FPA0'),
(0xeea1, 0xeea1, 'GET MANTISSA SIGN'),
(0xeea3, 0xeea3, '* COPY 3RD AND 4TH MANTISSA BYTE'),
(0xeea5, 0xeea5, '* FROM FPA1 TO FPA0'),
(0xeea8, 0xeea8, '* TRANSFER EXPONENT & MS BYTE'),
(0xeeac, 0xeeac, '= TRANSFER MIDDLE TWO BYTES'),
(0xeeae, 0xeeae, '='),
(0xeeb0, 0xeeb0, '* TRANSFER BOTTOM TWO BYTES'),
(0xeeb4, 0xeeb4, 'SET FLAGS ACCORDING TO EXPONENT'),
(0xeeb6, 0xeeb6, 'GET EXPONENT'),
(0xeeb8, 0xeeb8, 'BRANCH IF FPA0 = 0'),
(0xeeba, 0xeeba, 'GET SIGN OF MANTISSA'),
(0xeebc, 0xeebc, 'BIT 7 TO CARRY'),
(0xeebd, 0xeebd, 'NEGATIVE FLAG'),
(0xeebf, 0xeebf, 'BRANCH IF NEGATIVE MANTISSA'),
(0xeec1, 0xeec1, 'ACCB = 1 IF POSITIVE MANTISSA'),
(0xeec3, 0xeec3, 'SET ACCB ACCORDING TO SIGN OF FPA0'),
(0xeec5, 0xeec5, 'SAVE ACCB IN FPA0'),
(0xeec7, 0xeec7, 'CLEAR NUMBER 2 MANTISSA BYTE OF FPA0'),
(0xeec9, 0xeec9, 'EXPONENT REQUIRED IF FPA0 IS TO BE AN INTEGER'),
(0xeecb, 0xeecb, 'GET MS BYTE OF MANTISSA'),
(0xeecd, 0xeecd, 'SET CARRY IF POSITIVE MANTISSA'),
(0xeecf, 0xeecf, 'SAVE EXPONENT'),
(0xeed1, 0xeed1, '* ZERO OUT ACCD AND'),
(0xeed3, 0xeed3, '* BOTTOM HALF OF FPA0'),
(0xeed5, 0xeed5, 'CLEAR SUB BYTE'),
(0xeed7, 0xeed7, 'CLEAR SIGN OF FPA0 MANTISSA'),
(0xeed9, 0xeed9, 'GO NORMALIZE FPA0'),
(0xeedc, 0xeedc, 'FORCE MANTISSA SIGN OF FPA0 POSITIVE'),
(0xeedf, 0xeedf, 'CHECK EXPONENT OF (X)'),
(0xeee1, 0xeee1, 'BRANCH IF FPA = 0'),
(0xeee3, 0xeee3, 'GET MS BYTE OF MANTISSA OF (X)'),
(0xeee5, 0xeee5, 'EOR WITH SIGN OF FPA0'),
(0xeee7, 0xeee7, 'BRANCH IF SIGNS NOT ='),
(0xeee9, 0xeee9, '* GET EXPONENT OF'),
(0xeeeb, 0xeeeb, '* FPA0, COMPARE TO EXPONENT OF'),
(0xeeed, 0xeeed, '* (X) AND BRANCH IF <>.'),
(0xeeef, 0xeeef, '* GET MS BYTE OF (X), KEEP ONLY'),
(0xeef1, 0xeef1, "* THE SIGN BIT - 'AND' THE BOTTOM 7"),
(0xeef3, 0xeef3, '* BITS OF FPA0 INTO ACCB'),
(0xeef5, 0xeef5, '= COMPARE THE BOTTOM 7 BITS OF THE MANTISSA'),
(0xeef7, 0xeef7, '= MS BYTE AND BRANCH IF <>'),
(0xeef9, 0xeef9, '* COMPARE 2ND BYTE'),
(0xeefb, 0xeefb, '* OF MANTISSA,'),
(0xeefd, 0xeefd, '* BRANCH IF <>'),
(0xeeff, 0xeeff, '= COMPARE 3RD BYTE'),
(0xef01, 0xef01, '= OF MANTISSA,'),
(0xef03, 0xef03, '= BRANCH IF <>'),
(0xef05, 0xef05, '* SUBTRACT LS BYTE'),
(0xef07, 0xef07, '* OF (X) FROM LS BYTE OF'),
(0xef09, 0xef09, '* FPA0, BRANCH IF <>'),
(0xef0b, 0xef0b, 'RETURN IF FP (X) = FPA0'),
(0xef0c, 0xef0c, 'SHIFT CARRY TO BIT 7; CARRY SET IF FPA0 < (X)'),
(0xef0d, 0xef0d, 'TOGGLE SIZE COMPARISON BIT IF FPA0 IS NEGATIVE'),
(0xef0f, 0xef0f, 'GO SET ACCB ACCORDING TO COMPARISON'),
(0xef11, 0xef11, 'GET EXPONENT OF FPA0'),
(0xef13, 0xef13, 'ZERO MANTISSA IF FPA0 = 0'),
(0xef15, 0xef15, 'SUBTRACT $A0 FROM FPA0 EXPONENT T THIS WILL YIELD'),
(0xef17, 0xef17, 'TEST SIGN OF FPA0 MANTISSA'),
(0xef19, 0xef19, 'BRANCH IF POSITIVE'),
(0xef1b, 0xef1b, 'COMPLEMENT CARRY IN BYTE'),
(0xef1d, 0xef1d, 'NEGATE MANTISSA OF FPA0'),
(0xef20, 0xef20, 'POINT X TO FPA0'),
(0xef23, 0xef23, 'EXPONENT DIFFERENCE < -8?'),
(0xef25, 0xef25, 'YES'),
(0xef27, 0xef27, 'SHIFT FPA0 RIGHT UNTIL FPA0 EXPONENT = $A0'),
(0xef2a, 0xef2a, 'CLEAR CARRY IN BYTE'),
(0xef2d, 0xef2d, 'CLEAR CARRY IN BYTE'),
(0xef2f, 0xef2f, '* GET SIGN OF FPA0 MANTISSA'),
(0xef31, 0xef31, '* ROTATE IT INTO THE CARRY FLAG'),
(0xef32, 0xef32, 'ROTATE CARRY (MANTISSA SIGN) INTO BIT 7'),
(0xef34, 0xef34, 'DE-NORMALIZE FPA0'),
(0xef37, 0xef37, 'GET EXPONENT OF FPA0'),
(0xef39, 0xef39, 'LARGEST POSSIBLE INTEGER EXPONENT'),
(0xef3b, 0xef3b, 'RETURN IF FPA0 >= 32768'),
(0xef3d, 0xef3d, 'SHIFT THE BINARY POINT ONE TO THE RIGHT OF THE'),
(0xef3f, 0xef3f, 'ACCB = 0: ZERO OUT THE SUB BYTE'),
(0xef41, 0xef41, 'GET MANTISSA SIGN'),
(0xef43, 0xef43, 'FORCE MANTISSA SIGN TO BE POSITIVE'),
(0xef45, 0xef45, 'SET CARRY IF MANTISSA'),
(0xef47, 0xef47, '* GET DENORMALIZED EXPONENT AND'),
(0xef49, 0xef49, '* SAVE IT IN FPA0 EXPONENT'),
(0xef4b, 0xef4b, '= GET LS BYTE OF FPA0 AND'),
(0xef4d, 0xef4d, '= SAVE IT IN CHARAC'),
(0xef4f, 0xef4f, 'NORMALIZE FPA0'),
(0xef52, 0xef52, '* LOAD MANTISSA OF FPA0 WITH CONTENTS OF ACCB'),
(0xef5b, 0xef5b, '(X) = 0'),
(0xef5d, 0xef5d, '* ZERO OUT FPA0 & THE SIGN FLAG (COEFCT)'),
(0xef65, 0xef65, 'INITIALIZE EXPONENT & EXPONENT SIGN FLAG TO ZERO'),
(0xef67, 0xef67, 'INITIALIZE RIGHT DECIMAL CTR & DECIMAL PT FLAG TO 0'),
(0xef69, 0xef69, 'IF CARRY SET (NUMERIC CHARACTER), ASSUME ACCA CONTAINS FIRST'),
(0xef6b, 0xef6b, 'CALL EXTENDED BASIC ADD-IN'),
(0xef6e, 0xef6e, '* CHECK FOR A LEADING MINUS SIGN AND BRANCH'),
(0xef70, 0xef70, '* IF NO MINUS SIGN'),
(0xef72, 0xef72, 'TOGGLE SIGN; 0 = +; FF = -'),
(0xef74, 0xef74, 'INTERPRET THE REST OF THE STRING'),
(0xef76, 0xef76, '* CHECK FOR LEADING PLUS SlGN AND BRANCH'),
(0xef78, 0xef78, '* IF NOT A PLUS SIGN'),
(0xef7a, 0xef7a, 'GET NEXT INPUT CHARACTER FROM BASIC'),
(0xef7c, 0xef7c, 'BRANCH IF NUMERIC CHARACTER'),
(0xef7e, 0xef7e, 'DECIMAL POlNT?'),
(0xef80, 0xef80, 'YES'),
(0xef82, 0xef82, '"E" SHORTHAND FORM (SCIENTIFIC NOTATION)?'),
(0xef84, 0xef84, 'NO'),
(0xef86, 0xef86, 'GET NEXT INPUT CHARACTER FROM BASIC'),
(0xef88, 0xef88, 'BRANCH IF NUMERIC'),
(0xef8a, 0xef8a, 'MINUS TOKEN?'),
(0xef8c, 0xef8c, 'YES'),
(0xef8e, 0xef8e, 'ASCII MINUS?'),
(0xef90, 0xef90, 'YES'),
(0xef92, 0xef92, 'PLUS TOKEN?'),
(0xef94, 0xef94, 'YES'),
(0xef96, 0xef96, 'ASCII PLUS?'),
(0xef98, 0xef98, 'YES'),
(0xef9a, 0xef9a, 'BRANCH IF NO SIGN FOUND'),
(0xef9c, 0xef9c, 'SET EXPONENT SIGN FLAG TO NEGATIVE'),
(0xef9e, 0xef9e, 'GET NEXT INPUT CHARACTER FROM BASIC'),
(0xefa0, 0xefa0, 'IF NUMERIC CHARACTER, CONVERT TO BINARY'),
(0xefa2, 0xefa2, '* CHECK EXPONENT SIGN FLAG'),
(0xefa4, 0xefa4, '* AND BRANCH IF POSITIVE'),
(0xefa6, 0xefa6, 'NEGATE VALUE OF EXPONENT'),
(0xefaa, 0xefaa, '*TOGGLE DECIMAL PT FLAG AND INTERPRET ANOTHER'),
(0xefac, 0xefac, '*CHARACTER IF <> 0 - TERMINATE INTERPRETATION'),
(0xefae, 0xefae, '* GET EXPONENT, SUBTRACT THE NUMBER OF'),
(0xefb0, 0xefb0, '* PLACES TO THE RIGHT OF DECIMAL POINT'),
(0xefb2, 0xefb2, '* AND RESAVE IT.'),
(0xefb4, 0xefb4, 'EXIT ROUTINE IF ADJUSTED EXPONENT = ZERO'),
(0xefb6, 0xefb6, 'BRANCH IF POSITIVE EXPONENT'),
(0xefb8, 0xefb8, 'DIVIDE FPA0 BY 10'),
(0xefbb, 0xefbb, 'INCREMENT EXPONENT COUNTER (MULTIPLY BY 10)'),
(0xefbd, 0xefbd, 'KEEP MULTIPLYING'),
(0xefbf, 0xefbf, 'EXIT ROUTINE'),
(0xefc1, 0xefc1, 'MULTIPLY FPA0 BY 10'),
(0xefc4, 0xefc4, 'DECREMENT EXPONENT COUNTER (DIVIDE BY 10)'),
(0xefc6, 0xefc6, 'KEEP MULTIPLYING'),
(0xefc8, 0xefc8, 'GET THE SIGN FLAG'),
(0xefca, 0xefca, 'RETURN IF POSITIVE'),
(0xefcc, 0xefcc, 'TOGGLE MANTISSA SIGN OF FPA0, IF NEGATIVE'),
(0xefcf, 0xefcf, '*GET THE RIGHT DECIMAL COUNTER AND SUBTRACT'),
(0xefd1, 0xefd1, '*THE DECIMAL POINT FLAG FROM IT. IF DECIMAL POINT'),
(0xefd3, 0xefd3, '*FLAG=0, NOTHING HAPPENS. IF DECIMAL POINT FLAG IS'),
(0xefd5, 0xefd5, 'SAVE NEW DIGIT ON STACK'),
(0xefd7, 0xefd7, 'MULTIPLY FPA0 BY 10'),
(0xefda, 0xefda, 'GET NEW DIGIT BACK'),
(0xefdc, 0xefdc, 'MASK OFF ASCII'),
(0xefde, 0xefde, 'ADD ACCB TO FPA0'),
(0xefe0, 0xefe0, 'GET ANOTHER CHARACTER FROM BASIC'),
(0xefe2, 0xefe2, 'PACK FPA0 AND SAVE IT IN FPA3'),
(0xefe5, 0xefe5, 'CONVERT ACCB TO FP NUMBER IN FPA0'),
(0xefe8, 0xefe8, '* ADD FPA0 TO'),
(0xefeb, 0xefeb, '* FPA3'),
(0xeff0, 0xeff0, 'TIMES 2'),
(0xeff1, 0xeff1, 'TIMES 4'),
(0xeff2, 0xeff2, 'ADD 1 = TIMES 5'),
(0xeff4, 0xeff4, 'TIMES 10'),
(0xeff5, 0xeff5, '*MASK OFF ASCII FROM ACCA, PUSH'),
(0xeff7, 0xeff7, '*RESULT ONTO THE STACK AND'),
(0xeff9, 0xeff9, 'ADD lT TO ACCB'),
(0xeffb, 0xeffb, 'SAVE IN V47'),
(0xeffd, 0xeffd, 'INTERPRET ANOTHER CHARACTER'),
(0xefff, 0xefff, '* 99999999.9'),
(0xf004, 0xf004, '* 999999999'),
(0xf009, 0xf009, '* 1E + 09'),
(0xf00e, 0xf00e, 'POINT X TO " IN " MESSAGE'),
(0xf011, 0xf011, 'COPY A STRING FROM (X) TO CONSOLE OUT'),
(0xf013, 0xf013, 'GET CURRENT BASIC LINE NUMBER TO ACCD'),
(0xf015, 0xf015, 'SAVE ACCD IN TOP HALF OF FPA0'),
(0xf017, 0xf017, "REQ'D EXPONENT IF TOP HALF OF ACCD = INTEGER"),
(0xf019, 0xf019, 'SET CARRY FLAG - FORCE POSITIVE MANTISSA'),
(0xf01a, 0xf01a, 'ZERO BOTTOM HALF AND SIGN OF FPA0, THEN'),
(0xf01d, 0xf01d, 'CONVERT FP NUMBER TO ASCII STRING'),
(0xf01f, 0xf01f, 'COPY A STRING FROM (X) TO CONSOLE OUT'),
(0xf022, 0xf022, 'POINT U TO BUFFER WHICH WILL NOT CAUSE'),
(0xf025, 0xf025, 'SPACE = DEFAULT SIGN FOR POSITIVE #'),
(0xf027, 0xf027, 'GET SIGN OF FPA0'),
(0xf029, 0xf029, 'BRANCH IF POSITIVE'),
(0xf02b, 0xf02b, 'ASCII MINUS SIGN'),
(0xf02d, 0xf02d, 'STORE SIGN OF NUMBER'),
(0xf02f, 0xf02f, 'SAVE BUFFER POINTER'),
(0xf031, 0xf031, 'SAVE SIGN (IN ASCII)'),
(0xf033, 0xf033, 'ASCII ZERO IF EXPONENT = 0'),
(0xf035, 0xf035, 'GET FPA0 EXPONENT'),
(0xf037, 0xf037, 'BRANCH IF FPA0 = 0'),
(0xf03b, 0xf03b, 'BASE 10 EXPONENT=0 FOR FP NUMBER > 1'),
(0xf03c, 0xf03c, 'CHECK EXPONENT'),
(0xf03e, 0xf03e, 'BRANCH IF FP NUMBER > 1'),
(0xf040, 0xf040, 'POINT X TO FP 1E+09'),
(0xf043, 0xf043, 'MULTIPLY FPA0 BY (X)'),
(0xf046, 0xf046, 'BASE 10 EXPONENT = -9'),
(0xf048, 0xf048, 'BASE 10 EXPONENT'),
(0xf04a, 0xf04a, 'POINT X TO FP 999,999,999'),
(0xf04d, 0xf04d, 'COMPARE FPA0 TO 999,999,999'),
(0xf050, 0xf050, 'BRANCH IF > 999,999,999'),
(0xf052, 0xf052, 'POINT X TO FP 99,999,999.9'),
(0xf055, 0xf055, 'COMPARE FPA0 TO 99,999,999.9'),
(0xf058, 0xf058, 'BRANCH IF > 99,999,999.9 (IN RANGE)'),
(0xf05a, 0xf05a, 'MULTIPLY FPA0 BY 10'),
(0xf05d, 0xf05d, 'SUBTRACT ONE FROM DECIMAL OFFSET'),
(0xf05f, 0xf05f, 'PSEUDO - NORMALIZE SOME MORE'),
(0xf061, 0xf061, 'DIVIDE FPA0 BY 10'),
(0xf064, 0xf064, 'ADD ONE TO BASE 10 EXPONENT'),
(0xf066, 0xf066, 'PSEUDO - NORMALIZE SOME MORE'),
(0xf068, 0xf068, 'ADD .5 TO FPA0 (ROUND OFF)'),
(0xf06b, 0xf06b, 'CONVERT FPA0 TO AN INTEGER'),
(0xf06e, 0xf06e, 'DEFAULT DECIMAL POINT FLAG (FORCE IMMED DECIMAL PT)'),
(0xf070, 0xf070, '* GET BASE 10 EXPONENT AND ADD TEN TO IT'),
(0xf072, 0xf072, "* (NUMBER 'NORMALIZED' TO 9 PLACES & DECIMAL PT)"),
(0xf074, 0xf074, 'BRANCH IF NUMBER < 1.0'),
(0xf076, 0xf076, 'NINE PLACES MAY BE DISPLAYED WITHOUT'),
(0xf078, 0xf078, 'BRANCH IF SCIENTIFIC NOTATION REQUIRED'),
(0xf07a, 0xf07a, '* SUBTRACT 1 FROM MODIFIED BASE 10 EXPONENT CTR'),
(0xf07b, 0xf07b, '* AND SAVE IT IN ACCB (DECiMAL POINT FLAG)'),
(0xf07d, 0xf07d, "FORCE EXPONENT = 0 - DON'T USE SCIENTIFIC NOTATION"),
(0xf07f, 0xf07f, '* SUBTRACT TWO (WITHOUT AFFECTING CARRY)'),
(0xf080, 0xf080, '* FROM BASE 10 EXPONENT'),
(0xf081, 0xf081, 'SAVE EXPONENT - ZERO EXPONENT = DO NOT DISPLAY'),
(0xf083, 0xf083, 'DECIMAL POINT FLAG - NUMBER OF PLACES TO'),
(0xf085, 0xf085, 'BRANCH IF >= 1'),
(0xf087, 0xf087, 'POINT U TO THE STRING BUFFER'),
(0xf089, 0xf089, '* STORE A PERIOD'),
(0xf08b, 0xf08b, '* IN THE BUFFER'),
(0xf08d, 0xf08d, 'POINT FLAG'),
(0xf08e, 0xf08e, 'BRANCH IF NOTHING TO LEFT OF DECIMAL POINT'),
(0xf090, 0xf090, '* STORE A ZERO'),
(0xf092, 0xf092, '* IN THE BUFFER'),
(0xf094, 0xf094, 'POINT X TO FP POWER OF 10 MANTISSA'),
(0xf097, 0xf097, 'INITIALIZE DIGIT COUNTER TO 0+$80'),
(0xf099, 0xf099, '* ADD MANTISSA LS'),
(0xf09b, 0xf09b, '* BYTE OF FPA0'),
(0xf09d, 0xf09d, '* AND (X)'),
(0xf09f, 0xf09f, '= ADD MANTISSA'),
(0xf0a1, 0xf0a1, '= NUMBER 3 BYTE OF'),
(0xf0a3, 0xf0a3, '= FPA0 AND (X)'),
(0xf0a5, 0xf0a5, '* ADD MANTISSA'),
(0xf0a7, 0xf0a7, '* NUMBER 2 BYTE OF'),
(0xf0a9, 0xf0a9, '* FPA0 AND (X)'),
(0xf0ab, 0xf0ab, '= ADD MANTISSA'),
(0xf0ad, 0xf0ad, '= MS BYTE OF'),
(0xf0af, 0xf0af, '= FPA0 AND (X)'),
(0xf0b1, 0xf0b1, 'ADD ONE TO DIGIT COUNTER'),
(0xf0b2, 0xf0b2, 'CARRY INTO BIT 7'),
(0xf0b3, 0xf0b3, '*SET OVERFLOW FLAG AND BRANCH IF CARRY = 1 AND'),
(0xf0b4, 0xf0b4, '*POSITIVE MANTISSA OR CARRY = 0 AND NEG MANTISSA'),
(0xf0b6, 0xf0b6, 'BRANCH IF NEGATIVE MANTISSA'),
(0xf0b8, 0xf0b8, "* TAKE THE 9'S COMPLEMENT IF"),
(0xf0ba, 0xf0ba, '* ADDING MANTISSA'),
(0xf0bb, 0xf0bb, 'ADD ASCII OFFSET TO DIGIT'),
(0xf0bd, 0xf0bd, 'MOVE TO NEXT POWER OF 10 MANTISSA'),
(0xf0bf, 0xf0bf, 'SAVE DIGIT IN ACCA'),
(0xf0c1, 0xf0c1, 'MASK OFF BIT 7 (ADD/SUBTRACT FLAG)'),
(0xf0c3, 0xf0c3, 'STORE DIGIT IN STRING BUFFER'),
(0xf0c5, 0xf0c5, 'DECREMENT DECIMAL POINT FLAG'),
(0xf0c7, 0xf0c7, 'BRANCH IF NOT TIME FOR DECIMAL POINT'),
(0xf0c9, 0xf0c9, '* STORE DECIMAL POINT IN'),
(0xf0cb, 0xf0cb, '* STRING BUFFER'),
(0xf0cd, 0xf0cd, 'TOGGLE BIT 7 (ADD/SUBTRACT FLAG)'),
(0xf0ce, 0xf0ce, 'MASK OFF ALL BUT ADD/SUBTRACT FLAG'),
(0xf0d0, 0xf0d0, 'COMPARE X TO END OF MANTISSA TABLE'),
(0xf0d3, 0xf0d3, 'BRANCH IF NOT AT END OF TABLE'),
(0xf0d5, 0xf0d5, 'GET THE LAST CHARACTER; MOVE POINTER BACK'),
(0xf0d7, 0xf0d7, 'WAS IT A ZERO?'),
(0xf0d9, 0xf0d9, 'IGNORE TRAILING ZEROS IF SO'),
(0xf0db, 0xf0db, 'CHECK FOR DECIMAL POINT'),
(0xf0dd, 0xf0dd, 'BRANCH IF NOT DECIMAL POINT'),
(0xf0df, 0xf0df, 'STEP OVER THE DECIMAL POINT'),
(0xf0e1, 0xf0e1, 'ASCII PLUS SIGN'),
(0xf0e3, 0xf0e3, 'GET SCIENTIFIC NOTATION EXPONENT'),
(0xf0e5, 0xf0e5, 'BRANCH IF NOT SCIENTIFIC NOTATION'),
(0xf0e7, 0xf0e7, 'BRANCH IF POSITIVE EXPONENT'),
(0xf0e9, 0xf0e9, 'ASCII MINUS SIGN'),
(0xf0eb, 0xf0eb, 'NEGATE EXPONENT IF NEGATIVE'),
(0xf0ec, 0xf0ec, 'STORE EXPONENT SIGN IN STRING'),
(0xf0ee, 0xf0ee, "* GET ASCII 'E' (SCIENTIFIC NOTATION"),
(0xf0f0, 0xf0f0, '* FLAG) AND SAVE IT IN THE STRING'),
(0xf0f2, 0xf0f2, 'INITIALIZE ACCA TO ASCII ZERO'),
(0xf0f4, 0xf0f4, "ADD ONE TO 10'S DIGIT OF EXPONENT"),
(0xf0f5, 0xf0f5, 'SUBTRACT 10 FROM ACCB'),
(0xf0f7, 0xf0f7, "ADD 1 TO 10'S DIGIT IF NO CARRY"),
(0xf0f9, 0xf0f9, 'CONVERT UNITS DIGIT TO ASCII'),
(0xf0fb, 0xf0fb, 'SAVE EXPONENT IN STRING'),
(0xf0fd, 0xf0fd, 'CLEAR LAST BYTE (TERMINATOR)'),
(0xf0ff, 0xf0ff, 'GO RESET POINTER'),
(0xf101, 0xf101, 'STORE LAST CHARACTER'),
(0xf103, 0xf103, 'CLEAR LAST BYTE (TERMINATOR - REQUIRED BY'),
(0xf105, 0xf105, 'RESET POINTER TO START OF BUFFER'),
(0xf109, 0xf109, 'FLOATING POINT .5'),
(0xf10e, 0xf10e, '-100000000'),
(0xf112, 0xf112, '10000000'),
(0xf116, 0xf116, '-1000000'),
(0xf11a, 0xf11a, '100000'),
(0xf11e, 0xf11e, '-10000'),
(0xf122, 0xf122, '1000'),
(0xf126, 0xf126, '-100'),
(0xf12a, 0xf12a, '10'),
(0xf12e, 0xf12e, '-1'),
(0xf132, 0xf132, 'GET EXPONENT OF FPA0'),
(0xf134, 0xf134, 'BRANCH IF FPA0 = 0'),
(0xf136, 0xf136, 'TOGGLE MANTISSA SIGN OF FPA0'),
(0xf139, 0xf139, 'SAVE COEFFICIENT TABLE POINTER'),
(0xf13b, 0xf13b, 'MOVE FPA0 TO FPA3'),
(0xf13e, 0xf13e, 'MULTIPLY FPA3 BY FPA0'),
(0xf140, 0xf140, 'EXPAND POLYNOMIAL'),
(0xf142, 0xf142, 'POINT X TO FPA3'),
(0xf145, 0xf145, 'MULTIPLY (X) BY FPA0'),
(0xf148, 0xf148, 'SAVE COEFFICIENT TABLE POINTER'),
(0xf14a, 0xf14a, 'MOVE FPA0 TO FPA4'),
(0xf14d, 0xf14d, 'GET THE COEFFICIENT POINTER'),
(0xf14f, 0xf14f, 'GET THE TOP OF COEFFICIENT TABLE TO'),
(0xf151, 0xf151, '* USE AND STORE IT IN TEMPORARY COUNTER'),
(0xf153, 0xf153, 'SAVE NEW COEFFICIENT POINTER'),
(0xf155, 0xf155, 'MULTIPLY (X) BY FPA0'),
(0xf157, 0xf157, '*GET COEFFICIENT POINTER'),
(0xf159, 0xf159, '*MOVE TO NEXT FP NUMBER'),
(0xf15b, 0xf15b, '*SAVE NEW COEFFICIENT POINTER'),
(0xf15d, 0xf15d, 'ADD (X) AND FPA0'),
(0xf160, 0xf160, 'POINT (X) TO FPA4'),
(0xf163, 0xf163, 'DECREMENT TEMP COUNTER'),
(0xf165, 0xf165, 'BRANCH IF MORE COEFFICIENTS LEFT'),
(0xf168, 0xf168, 'TEST FPA0'),
(0xf16b, 0xf16b, 'BRANCH IF FPA0 = NEGATIVE'),
(0xf16d, 0xf16d, 'BRANCH IF FPA0 = 0'),
(0xf16f, 0xf16f, 'CONVERT FPA0 TO AN INTEGER'),
(0xf171, 0xf171, 'PACK FPA0 TO FPA3'),
(0xf174, 0xf174, 'GET A RANDOM NUMBER: FPA0 < 1.0'),
(0xf176, 0xf176, 'POINT (X) TO FPA3'),
(0xf179, 0xf179, 'MULTIPLY (X) BY FPA0'),
(0xf17b, 0xf17b, 'POINT (X) TO FP VALUE OF 1.0'),
(0xf17e, 0xf17e, 'ADD 1.0 TO FPA0'),
(0xf181, 0xf181, 'CONVERT FPA0 TO AN INTEGER'),
(0xf184, 0xf184, '* MOVE VARIABLE'),
(0xf186, 0xf186, '* RANDOM NUMBER'),
(0xf188, 0xf188, '* SEED TO'),
(0xf18a, 0xf18a, '* FPA0'),
(0xf18c, 0xf18c, '= MOVE FIXED'),
(0xf18f, 0xf18f, '= RANDOM NUMBER'),
(0xf191, 0xf191, '= SEED TO'),
(0xf194, 0xf194, '= MANTISSA OF FPA0'),
(0xf196, 0xf196, 'MULTIPLY FPA0 X FPA1'),
(0xf199, 0xf199, 'GET THE TWO LOWEST ORDER PRODUCT BYTES'),
(0xf19b, 0xf19b, 'ADD A CONSTANT'),
(0xf19e, 0xf19e, 'SAVE NEW LOW ORDER VARIABLE RANDOM # SEED'),
(0xf1a0, 0xf1a0, 'SAVE NEW LOW ORDER BYTES OF FPA0 MANTISSA'),
(0xf1a2, 0xf1a2, 'GET 2 MORE LOW ORDER PRODUCT BYTES'),
(0xf1a4, 0xf1a4, 'ADD A CONSTANT'),
(0xf1a6, 0xf1a6, 'ADD A CONSTANT'),
(0xf1a8, 0xf1a8, 'SAVE NEW HIGH ORDER VARIABLE RANDOM # SEED'),
(0xf1aa, 0xf1aa, 'SAVE NEW HIGH ORDER FPA0 MANTISSA'),
(0xf1ac, 0xf1ac, 'FORCE FPA0 MANTISSA = POSITIVE'),
(0xf1ae, 0xf1ae, '* SET FPA0 BIASED EXPONENT'),
(0xf1b0, 0xf1b0, '* TO 0 1 < FPA0 < 0'),
(0xf1b2, 0xf1b2, 'GET A BYTE FROM FPA2 (MORE RANDOMNESS)'),
(0xf1b4, 0xf1b4, 'SAVE AS SUB BYTE'),
(0xf1b6, 0xf1b6, 'NORMALIZE FPA0'),
(0xf1b9, 0xf1b9, '*CONSTANT RANDOM NUMBER GENERATOR SEED'),
(0xf1bd, 0xf1bd, 'COPY FPA0 TO FPA1'),
(0xf1c0, 0xf1c0, 'POINT (X) TO 2*PI'),
(0xf1c3, 0xf1c3, '*GET MANTISSA SIGN OF FPA1'),
(0xf1c5, 0xf1c5, '*AND DIVIDE FPA0 BY 2*PI'),
(0xf1c8, 0xf1c8, 'COPY FPA0 TO FPA1'),
(0xf1cb, 0xf1cb, 'CONVERT FPA0 TO AN INTEGER'),
(0xf1cd, 0xf1cd, 'SET RESULT SIGN = POSITIVE'),
(0xf1cf, 0xf1cf, '*GET EXPONENT OF FPA1'),
(0xf1d1, 0xf1d1, '*GET EXPONENT OF FPA0'),
(0xf1d3, 0xf1d3, '*SUBTRACT FPA0 FROM FPA1'),
(0xf1d6, 0xf1d6, 'POINT X TO FP (.25)'),
(0xf1d9, 0xf1d9, 'SUBTRACT FPA0 FROM .25 (PI/2)'),
(0xf1dc, 0xf1dc, 'GET MANTISSA SIGN OF FPA0'),
(0xf1de, 0xf1de, 'SAVE IT ON STACK'),
(0xf1e0, 0xf1e0, 'BRANCH IF MANTISSA POSITIVE'),
(0xf1e2, 0xf1e2, 'ADD .5 (PI) TO FPA0'),
(0xf1e5, 0xf1e5, 'GET SIGN OF FPA0'),
(0xf1e7, 0xf1e7, 'BRANCH IF NEGATIVE'),
(0xf1e9, 0xf1e9, 'COM IF +(3*PI)/2 >= ARGUMENT >+ PI/2 (QUADRANT FLAG)'),
(0xf1eb, 0xf1eb, 'TOGGLE MANTISSA SIGN OF FPA0'),
(0xf1ee, 0xf1ee, 'POINT X TO FP (.25)'),
(0xf1f1, 0xf1f1, 'ADD .25 (PI/2) TO FPA0'),
(0xf1f4, 0xf1f4, 'GET OLD MANTISSA SIGN'),
(0xf1f6, 0xf1f6, '* BRANCH IF OLD'),
(0xf1f7, 0xf1f7, '* SIGN WAS POSITIVE'),
(0xf1f9, 0xf1f9, 'TOGGLE MANTISSA SIGN'),
(0xf1fc, 0xf1fc, 'POINT X TO TABLE OF COEFFICIENTS'),
(0xf1ff, 0xf1ff, 'GO CALCULATE POLYNOMIAL VALUE'),
(0xf202, 0xf202, '6.28318531 (2*PI)'),
(0xf207, 0xf207, '.25'),
(0xf20c, 0xf20c, 'SIX COEFFICIENTS'),
(0xf20d, 0xf20d, '* -((2*PI)**11)/11!'),
(0xf212, 0xf212, '* ((2*PI)**9)/9!'),
(0xf217, 0xf217, '* -((2*PI)**7)/7!'),
(0xf21c, 0xf21c, '* ((2*PI)**5)/5!'),
(0xf221, 0xf221, '* -((2*PI)**3)/3!'),
(0xf22b, 0xf22b, 'UNUSED GARBAGE BYTES'),
(0xf230, 0xf230, 'UNUSED GARBAGE BYTES'),
(0xf235, 0xf235, 'POINT X TO FP CONSTANT (P1/2)'),
(0xf238, 0xf238, 'ADD FPA0 TO (X)'),
(0xf23b, 0xf23b, 'JUMP TO SIN ROUTINE'),
(0xf23e, 0xf23e, 'PACK FPA0 AND MOVE IT TO FPA3'),
(0xf241, 0xf241, 'RESET QUADRANT FLAG'),
(0xf243, 0xf243, 'CALCULATE SIN OF ARGUMENT'),
(0xf245, 0xf245, 'POINT X TO FPA5'),
(0xf248, 0xf248, 'PACK FPA0 AND MOVE IT TO FPA5'),
(0xf24b, 0xf24b, 'POINT X TO FPA3'),
(0xf24e, 0xf24e, 'MOVE FPA3 TO FPA0'),
(0xf251, 0xf251, 'FORCE FPA0 MANTISSA TO BE POSITIVE'),
(0xf253, 0xf253, 'GET THE QUADRANT FLAG - COS NEGATIVE IN QUADS 2,3'),
(0xf255, 0xf255, 'CALCULATE VALUE OF COS(FPA0)'),
(0xf257, 0xf257, 'CHECK EXPONENT OF FPA0'),
(0xf259, 0xf259, "'OV' ERROR IF COS(X)=0"),
(0xf25d, 0xf25d, 'POINT X TO FPA5'),
(0xf260, 0xf260, 'DIVIDE (X) BY FPA0 - SIN(X)/COS(X)'),
(0xf263, 0xf263, 'SAVE SIGN FLAG ON STACK'),
(0xf265, 0xf265, 'EXPAND POLYNOMIAL'),
(0xf268, 0xf268, '1.57079633 (PI/2)'),
(0xf26d, 0xf26d, '* GET THE SIGN OF THE MANTISSA AND'),
(0xf26f, 0xf26f, '* SAVE IT ON THE STACK'),
(0xf271, 0xf271, 'BRANCH IF POSITIVE MANTISSA'),
(0xf273, 0xf273, 'CHANGE SIGN OF FPA0'),
(0xf275, 0xf275, '* GET EXPONENT OF FPA0 AND'),
(0xf277, 0xf277, '* SAVE IT ON THE STACK'),
(0xf279, 0xf279, 'IS FPAO < 1.0?'),
(0xf27b, 0xf27b, 'YES'),
(0xf27d, 0xf27d, 'POINT X TO FP CONSTANT 1.0'),
(0xf280, 0xf280, 'GET RECIPROCAL OF FPA0'),
(0xf282, 0xf282, 'POINT (X) TO TAYLOR SERIES COEFFICIENTS'),
(0xf285, 0xf285, 'EXPAND POLYNOMIAL'),
(0xf288, 0xf288, 'GET EXPONENT OF ARGUMENT'),
(0xf28a, 0xf28a, 'WAS ARGUMENT < 1.0?'),
(0xf28c, 0xf28c, 'YES'),
(0xf28e, 0xf28e, 'POINT (X) TO FP NUMBER (PI/2)'),
(0xf291, 0xf291, 'SUBTRACT FPA0 FROM (PI/2)'),
(0xf294, 0xf294, '* GET SIGN OF INITIAL ARGUMENT MANTISSA'),
(0xf296, 0xf296, '* AND SET FLAGS ACCORDING TO IT'),
(0xf297, 0xf297, 'RETURN IF ARGUMENT WAS POSITIVE'),
(0xf299, 0xf299, 'CHANGE MANTISSA SIGN OF FPA0'),
(0xf29d, 0xf29d, 'TWELVE COEFFICIENTS'),
(0xf29e, 0xf29e, '-6.84793912E-04 1/23'),
(0xf2a3, 0xf2a3, '+4.85094216E-03 1/21'),
(0xf2a8, 0xf2a8, '-0.0161117018'),
(0xf2ad, 0xf2ad, '0.0342096381'),
(0xf2b2, 0xf2b2, '-0.0542791328'),
(0xf2b7, 0xf2b7, '0.0724571965'),
(0xf2bc, 0xf2bc, '-0.0898023954'),
(0xf2c1, 0xf2c1, '0.110932413'),
(0xf2c6, 0xf2c6, '-0.142839808'),
(0xf2cb, 0xf2cb, '0.199999121'),
(0xf2d0, 0xf2d0, '-0.333333316'),
(0xf2d5, 0xf2d5, '1'),
(0xf2da, 0xf2da, 'FOUR COEFFICIENTS'),
(0xf2db, 0xf2db, '0.434255942'),
(0xf2e0, 0xf2e0, '0.576584541'),
(0xf2e5, 0xf2e5, '0.961800759'),
(0xf2ea, 0xf2ea, '2.88539007'),
(0xf2ef, 0xf2ef, '1/SQR(2)'),
(0xf2f4, 0xf2f4, 'SQR(2)'),
(0xf2f9, 0xf2f9, '-0.5'),
(0xf2fe, 0xf2fe, 'LN(2)'),
(0xf303, 0xf303, 'CHECK STATUS OF FPA0'),
(0xf306, 0xf306, "'FC' ERROR IF NEGATIVE OR ZERO"),
(0xf30a, 0xf30a, 'POINT (X) TO FP NUMBER (1/SQR(2))'),
(0xf30d, 0xf30d, '*GET EXPONENT OF ARGUMENT'),
(0xf30f, 0xf30f, '*SUBTRACT OFF THE BIAS AND'),
(0xf311, 0xf311, '*SAVE IT ON THE STACK'),
(0xf317, 0xf317, 'ADD FPA0 TO (X)'),
(0xf31a, 0xf31a, 'POINT X TO SQR(2)'),
(0xf31d, 0xf31d, 'DIVIDE SQR(2) BY FPA0'),
(0xf320, 0xf320, 'POINT X TO FP VALUE OF 1.00'),
(0xf323, 0xf323, 'SUBTRACT FPA0 FROM (X)'),
(0xf326, 0xf326, 'POINT X TO TABLE OF COEFFICIENTS'),
(0xf329, 0xf329, 'EXPAND POLYNOMIAL'),
(0xf32c, 0xf32c, 'POINT X TO FP VALUE OF (-.5)'),
(0xf32f, 0xf32f, 'ADD FPA0 TO X'),
(0xf332, 0xf332, 'GET EXPONENT OF ARGUMENT BACK (WITHOUT BIAS)'),
(0xf334, 0xf334, 'ADD ACCB TO FPA0'),
(0xf337, 0xf337, 'POINT X TO LN(2)'),
(0xf33a, 0xf33a, 'MULTIPLY FPA0 * LN(2)'),
(0xf33d, 0xf33d, 'MOVE FPA0 TO FPA1'),
(0xf340, 0xf340, 'POINT (X) TO FP NUMBER (.5)'),
(0xf343, 0xf343, 'COPY A PACKED NUMBER FROM (X) TO FPA0'),
(0xf346, 0xf346, 'DO A NATURAL EXPONENTIATION IF EXPONENT = 0'),
(0xf348, 0xf348, '*CHECK VALUE BEING EXPONENTIATED'),
(0xf349, 0xf349, '*AND BRANCH IF IT IS <> 0'),
(0xf34b, 0xf34b, 'FPA0=0 IF RAISING ZERO TO A POWER'),
(0xf34e, 0xf34e, '* PACK FPA0 AND SAVE'),
(0xf351, 0xf351, "* IT IN FPA5 (ARGUMENT'S EXPONENT)"),
(0xf354, 0xf354, 'ACCB=DEFAULT RESULT SIGN FLAG; 0=POSITIVE'),
(0xf355, 0xf355, '*CHECK THE SIGN OF ARGUMENT'),
(0xf357, 0xf357, '*BRANCH IF POSITIVE'),
(0xf359, 0xf359, 'CONVERT EXPONENT INTO AN INTEGER'),
(0xf35c, 0xf35c, 'POINT X TO FPA5 (ORIGINAL EXPONENT)'),
(0xf35f, 0xf35f, 'GET MANTISSA SIGN OF FPA1 (ARGUMENT)'),
(0xf361, 0xf361, '*COMPARE FPA0 TO (X) AND'),
(0xf364, 0xf364, '*BRANCH IF NOT EQUAL'),
(0xf366, 0xf366, 'TOGGLE FPA1 MANTISSA SIGN - FORCE POSITIVE'),
(0xf367, 0xf367, 'GET LS BYTE OF INTEGER VALUE OF EXPONENT (RESULT SIGN FLAG)'),
(0xf369, 0xf369, 'COPY FPA1 TO FPA0; ACCA = MANTISSA SIGN'),
(0xf36c, 0xf36c, 'PUT RESULT SIGN FLAG ON THE STACK'),
(0xf371, 0xf371, 'POINT (X) TO FPA5'),
(0xf374, 0xf374, 'MULTIPLY FPA0 BY FPA5'),
(0xf377, 0xf377, 'CALCULATE E**(FPA0)'),
(0xf379, 0xf379, '* GET RESULT SIGN FLAG FROM THE STACK'),
(0xf37b, 0xf37b, 'IF NEGATIVE'),
(0xf37c, 0xf37c, 'CHANGE SIGN OF FPA0 MANTISSA'),
(0xf381, 0xf381, '1.44269504 ( CF )'),
(0xf386, 0xf386, 'EIGHT COEFFICIENTS'),
(0xf387, 0xf387, '2.14987637E-05: 1/(7!*(CF**7))'),
(0xf38c, 0xf38c, '1.4352314E-04 : 1/(6!*(CF**6))'),
(0xf391, 0xf391, '1.34226348E-03: 1/(5!*(CF**5))'),
(0xf396, 0xf396, '9.61401701E-03: 1/(4!*(CF**4))'),
(0xf39b, 0xf39b, '0.0555051269'),
(0xf3a0, 0xf3a0, '0.240226385'),
(0xf3a5, 0xf3a5, '0.693147186'),
(0xf3aa, 0xf3aa, '1'),
(0xf3af, 0xf3af, 'POINT X TO THE CORRECTION FACTOR'),
(0xf3b2, 0xf3b2, 'MULTIPLY FPA0 BY (X)'),
(0xf3b5, 0xf3b5, 'PACK FPA0 AND STORE IT IN FPA3'),
(0xf3b8, 0xf3b8, '*GET EXPONENT OF FPA0 AND'),
(0xf3ba, 0xf3ba, '*COMPARE TO THE MAXIMUM VALUE'),
(0xf3bc, 0xf3bc, 'BRANCH IF FPA0 < 128'),
(0xf3be, 0xf3be, "SET FPA0 = 0 OR 'OV' ERROR"),
(0xf3c1, 0xf3c1, 'CONVERT FPA0 TO INTEGER'),
(0xf3c4, 0xf3c4, 'GET LS BYTE OF INTEGER'),
(0xf3c6, 0xf3c6, '* WAS THE ARGUMENT =127, IF SO'),
(0xf3c8, 0xf3c8, "* THEN 'OV' ERROR; THIS WILL ALSO ADD THE $80 BIAS"),
(0xf3ca, 0xf3ca, 'DECREMENT ONE FROM THE EXPONENT, BECAUSE $81, NOT $80 WAS USED ABOVE'),
(0xf3cb, 0xf3cb, 'SAVE EXPONENT OF INTEGER PORTION ON STACK'),
(0xf3cd, 0xf3cd, 'POINT (X) TO FPA3'),
(0xf3d0, 0xf3d0, 'SUBTRACT FPA0 FROM (X) - GET FRACTIONAL PART OF ARGUMENT'),
(0xf3d3, 0xf3d3, 'POINT X TO COEFFICIENTS'),
(0xf3d6, 0xf3d6, 'EVALUATE POLYNOMIAL FOR FRACTIONAL PART'),
(0xf3d9, 0xf3d9, 'FORCE THE MANTISSA TO BE POSITIVE'),
(0xf3db, 0xf3db, 'GET INTEGER EXPONENT FROM STACK'),
(0xf3dd, 0xf3dd, '* CALCULATE EXPONENT OF NEW FPA0 BY ADDING THE EXPONENTS OF THE'),
(0xf3e1, 0xf3e1, 'CHECK STATUS OF FPA0'),
(0xf3e4, 0xf3e4, 'BRANCH IF FPA0 = NEGATIVE'),
(0xf3e6, 0xf3e6, 'CONVERT FPA0 TO INTEGER'),
(0xf3e9, 0xf3e9, 'TOGGLE SIGN OF FPA0 MANTISSA'),
(0xf3eb, 0xf3eb, 'CONVERT FPA0 TO INTEGER'),
(0xf3ed, 0xf3ed, 'TOGGLE SIGN OF FPA0'),
(0xf3f0, 0xf3f0, 'GET LINE NUMBER FROM BASIC'),
(0xf3f3, 0xf3f3, 'RETURN ADDRESS OFF OF THE STACK'),
(0xf3f5, 0xf3f5, "'LIST' FLAG"),
(0xf3f7, 0xf3f7, 'SET FLAG TO LIST LINE'),
(0xf3f9, 0xf3f9, 'GO FIND THE LINE NUMBER IN PROGRAM'),
(0xf3fc, 0xf3fc, "#7 'UNDEFINED LINE #'"),
(0xf400, 0xf400, 'GO UNCRUNCH LINE INTO BUFFER AT LINBUF+1'),
(0xf403, 0xf403, 'PUT ABSOLUTE ADDRESS OF END OF LINE TO ACCD'),
(0xf405, 0xf405, 'SUBTRACT OUT THE START OF LINE'),
(0xf408, 0xf408, 'SAVE LENGTH OF LINE'),
(0xf40a, 0xf40a, 'GET THE HEX VALUE OF LINE NUMBER'),
(0xf40c, 0xf40c, 'LIST THE LINE NUMBER ON THE SCREEN'),
(0xf40f, 0xf40f, 'PRINT A SPACE'),
(0xf412, 0xf412, 'POINT X TO BUFFER'),
(0xf415, 0xf415, '* CHECK TO SEE IF LINE IS TO BE'),
(0xf417, 0xf417, '* LISTED TO SCREEN - BRANCH IF IT IS'),
(0xf419, 0xf419, 'RESET DIGIT ACCUMULATOR - DEFAULT VALUE'),
(0xf41a, 0xf41a, 'GET KEY STROKE'),
(0xf41d, 0xf41d, 'SET CARRY IF NOT NUMERIC'),
(0xf420, 0xf420, 'BRANCH IF NOT NUMERIC'),
(0xf422, 0xf422, 'OFF ASCII'),
(0xf424, 0xf424, 'IT ON STACK'),
(0xf426, 0xf426, 'NUMBER BEING CONVERTED IS BASE 10'),
(0xf428, 0xf428, 'ACCUMULATED VALUE BY BASE (10)'),
(0xf429, 0xf429, 'DIGIT TO ACCUMULATED VALUE'),
(0xf42b, 0xf42b, 'CHECK FOR ANOTHER DIGIT'),
(0xf42d, 0xf42d, 'REPEAT PARAMETER IN ACCB; IF IT'),
(0xf42f, 0xf42f, "0, THEN MAKE IT '1'"),
(0xf431, 0xf431, 'BORT?'),
(0xf433, 0xf433, 'NO'),
(0xf435, 0xf435, 'PRINT CARRIAGE RETURN TO SCREEN'),
(0xf438, 0xf438, 'RESTART EDIT PROCESS - CANCEL ALL CHANGES'),
(0xf43a, 0xf43a, 'IST?'),
(0xf43c, 0xf43c, 'NO'),
(0xf43e, 0xf43e, 'LIST THE LINE'),
(0xf440, 0xf440, "RESET THE LIST FLAG TO 'NO LIST'"),
(0xf442, 0xf442, 'PRINT CARRIAGE RETURN'),
(0xf445, 0xf445, 'GO INTERPRET ANOTHER EDIT COMMAND'),
(0xf447, 0xf447, 'RETURN ADDRESS OFF OF THE STACK'),
(0xf449, 0xf449, 'KEY?'),
(0xf44b, 0xf44b, 'NO'),
(0xf44d, 0xf44d, 'ECHO THE LINE TO THE SCREEN'),
(0xf44f, 0xf44f, 'PRINT CARRIAGE RETURN'),
(0xf452, 0xf452, "* RESET BASIC'S INPUT POINTER"),
(0xf455, 0xf455, '* TO THE LINE INPUT BUFFER'),
(0xf457, 0xf457, 'GO PUT LINE BACK IN PROGRAM'),
(0xf45a, 0xf45a, 'XIT?'),
(0xf45c, 0xf45c, 'YES - SAME AS ENTER EXCEPT NO ECHO'),
(0xf45e, 0xf45e, 'UIT?'),
(0xf460, 0xf460, 'NO'),
(0xf462, 0xf462, 'PRINT CARRIAGE RETURN TO SCREEN'),
(0xf465, 0xf465, 'GO TO COMMAND LEVEL - MAKE NO CHANGES'),
(0xf468, 0xf468, 'INTERPRET THE REMAINING COMMANDS AS SUBROUTINES'),
(0xf46a, 0xf46a, 'GO INTERPRET ANOTHER EDIT COMMAND'),
(0xf46c, 0xf46c, 'SPACE BAR?'),
(0xf46e, 0xf46e, 'NO'),
(0xf470, 0xf470, 'SKIP TWO BYTES'),
(0xf471, 0xf471, '250 BYTES MAX IN BUFFER'),
(0xf473, 0xf473, 'GET A CHARACTER FROM BUFFER'),
(0xf475, 0xf475, "EXIT IF IT'S A 0"),
(0xf477, 0xf477, 'SEND CHAR TO CONSOLE OUT'),
(0xf47a, 0xf47a, 'POINTER UP ONE'),
(0xf47c, 0xf47c, 'CHARACTER COUNTER'),
(0xf47d, 0xf47d, 'LOOP IF NOT DONE'),
(0xf480, 0xf480, 'ELETE?'),
(0xf482, 0xf482, 'NO'),
(0xf484, 0xf484, '* CHECK FOR END OF LINE'),
(0xf486, 0xf486, '* AND BRANCH IF SO'),
(0xf488, 0xf488, 'REMOVE A CHARACTER'),
(0xf48a, 0xf48a, 'REPEAT PARAMETER'),
(0xf48b, 0xf48b, 'BRANCH IF NOT DONE'),
(0xf48e, 0xf48e, 'DECREMENT LENGTH OF BUFFER'),
(0xf490, 0xf490, 'POINT Y TO ONE BEFORE CURRENT BUFFER POINTER'),
(0xf492, 0xf492, 'TEMPORARY BUFFER POINTER'),
(0xf494, 0xf494, 'GET NEXT CHARACTER'),
(0xf496, 0xf496, 'PUT IT IN CURRENT POSITION'),
(0xf498, 0xf498, 'BRANCH IF NOT END OF LINE'),
(0xf49b, 0xf49b, 'NSERT?'),
(0xf49d, 0xf49d, 'YES'),
(0xf49f, 0xf49f, 'XTEND?'),
(0xf4a1, 0xf4a1, 'YES'),
(0xf4a3, 0xf4a3, 'ACK?'),
(0xf4a5, 0xf4a5, 'NO'),
(0xf4a7, 0xf4a7, 'TURN CURRENT BUFFER POINTER INTO END OF LINE FLAG'),
(0xf4a9, 0xf4a9, 'PUT CURRENT BUFFER POINTER IN ACCD'),
(0xf4ab, 0xf4ab, 'SUBTRACT INITIAL POINTER POSITION'),
(0xf4ae, 0xf4ae, 'SAVE NEW BUFFER LENGTH'),
(0xf4b0, 0xf4b0, 'DISPLAY THE LINE ON THE SCREEN'),
(0xf4b2, 0xf4b2, 'GET A KEYSTROKE'),
(0xf4b5, 0xf4b5, 'KEY?'),
(0xf4b7, 0xf4b7, 'YES - INTERPRET ANOTHER COMMAND - PRINT LINE'),
(0xf4b9, 0xf4b9, 'SCAPE?'),
(0xf4bb, 0xf4bb, "YES - RETURN TO COMMAND LEVEL - DON'T PRINT LINE"),
(0xf4bd, 0xf4bd, 'SPACE?'),
(0xf4bf, 0xf4bf, 'NO'),
(0xf4c1, 0xf4c1, 'COMPARE POINTER TO START OF BUFFER'),
(0xf4c4, 0xf4c4, 'DO NOT ALLOW BS IF AT START'),
(0xf4c6, 0xf4c6, 'MOVE POINTER BACK ONE, BS TO SCREEN'),
(0xf4c8, 0xf4c8, 'REMOVE ONE CHARACTER FROM BUFFER'),
(0xf4ca, 0xf4ca, 'GET INSERT SUB COMMAND'),
(0xf4cc, 0xf4cc, 'HANGE?'),
(0xf4ce, 0xf4ce, 'NO'),
(0xf4d0, 0xf4d0, 'CHECK CURRENT BUFFER CHARACTER'),
(0xf4d2, 0xf4d2, 'BRANCH IF END OF LINE'),
(0xf4d4, 0xf4d4, 'GET A KEYSTROKE'),
(0xf4d7, 0xf4d7, 'BRANCH IF LEGITIMATE KEY'),
(0xf4d9, 0xf4d9, 'TRY AGAIN IF ILLEGAL KEY'),
(0xf4db, 0xf4db, 'INSERT NEW CHARACTER INTO BUFFER'),
(0xf4dd, 0xf4dd, 'SEND NEW CHARACTER TO SCREEN'),
(0xf4df, 0xf4df, 'REPEAT PARAMETER'),
(0xf4e0, 0xf4e0, 'BRANCH IF NOT DONE'),
(0xf4e3, 0xf4e3, 'GET LENGTH OF LINE'),
(0xf4e5, 0xf4e5, 'COMPARE TO MAXIMUM LENGTH'),
(0xf4e7, 0xf4e7, 'BRANCH IF NOT AT MAXIMUM'),
(0xf4e9, 0xf4e9, 'IGNORE INPUT IF LINE AT MAXIMUM LENGTH'),
(0xf4eb, 0xf4eb, 'CURRENT BUFFER POINTER'),
(0xf4ed, 0xf4ed, '* SCAN THE LINE UNTIL END OF'),
(0xf4ef, 0xf4ef, '* LINE (0) IS FOUND'),
(0xf4f1, 0xf4f1, 'DECR TEMP LINE POINTER AND GET A CHARACTER'),
(0xf4f3, 0xf4f3, 'PUT CHARACTER BACK DOWN ONE SPOT'),
(0xf4f5, 0xf4f5, 'WE REACHED STARTING POINT?'),
(0xf4f7, 0xf4f7, 'NO - KEEP GOING'),
(0xf4f9, 0xf4f9, 'BUFFER POINTER FROM STACK'),
(0xf4fb, 0xf4fb, 'INSERT NEW CHARACTER INTO THE LINE'),
(0xf4fd, 0xf4fd, 'SEND A CHARACTER TO CONSOLE OUT'),
(0xf4ff, 0xf4ff, 'ADD ONE TO BUFFER LENGTH'),
(0xf501, 0xf501, 'GET INSERT SUB COMMAND'),
(0xf503, 0xf503, 'CKSPACE?'),
(0xf505, 0xf505, 'NO'),
(0xf507, 0xf507, 'MOVE POINTER BACK 1, SEND BS TO SCREEN'),
(0xf509, 0xf509, 'REPEAT PARAMETER'),
(0xf50a, 0xf50a, 'LOOP UNTIL DONE'),
(0xf50d, 0xf50d, 'COMPARE POINTER TO START OF BUFFER'),
(0xf510, 0xf510, 'DO NOT ALLOW BS IF AT START'),
(0xf512, 0xf512, 'MOVE POINTER BACK ONE'),
(0xf514, 0xf514, 'BACK SPACE'),
(0xf516, 0xf516, 'SEND TO CONSOLE OUT'),
(0xf519, 0xf519, 'ILL?'),
(0xf51b, 0xf51b, 'YES'),
(0xf51d, 0xf51d, 'EARCH?'),
(0xf51f, 0xf51f, 'YES'),
(0xf522, 0xf522, 'KILL/SEARCH FLAG ON STACK'),
(0xf524, 0xf524, '* GET A KEYSTROKE (TARGET CHARACTER)'),
(0xf526, 0xf526, 'SAVE IT ON STACK'),
(0xf528, 0xf528, 'GET CURRENT BUFFER CHARACTER'),
(0xf52a, 0xf52a, 'AND RETURN IF END OF LINE'),
(0xf52c, 0xf52c, 'CHECK KILL/SEARCH FLAG'),
(0xf52e, 0xf52e, 'BRANCH IF KILL'),
(0xf530, 0xf530, 'SEND A CHARACTER TO CONSOLE OUT'),
(0xf532, 0xf532, 'BUFFER POINTER'),
(0xf534, 0xf534, 'CHECK NEXT INPUT CHARACTER'),
(0xf536, 0xf536, 'REMOVE ONE CHARACTER FROM BUFFER'),
(0xf539, 0xf539, 'GET CURRENT INPUT CHARACTER'),
(0xf53b, 0xf53b, 'TO TARGET CHARACTER'),
(0xf53d, 0xf53d, 'BRANCH IF NO MATCH'),
(0xf53f, 0xf53f, 'REPEAT PARAMETER'),
(0xf540, 0xf540, 'BRANCH IF NOT DONE'),
(0xf542, 0xf542, 'Y PULL WILL CLEAN UP THE STACK FOR THE 2 PSHS A'),
(0xf544, 0xf544, 'CALL CONSOLE IN : DEV NBR=SCREEN'),
(0xf547, 0xf547, 'CHARACTER?'),
(0xf549, 0xf549, 'YES - GET ANOTHER CHAR'),
(0xf54b, 0xf54b, 'UP ARROW (QUIT INSERT)'),
(0xf54d, 0xf54d, 'NO'),
(0xf54f, 0xf54f, 'REPLACE W/ESCAPE CODE'),
(0xf551, 0xf551, 'KEY'),
(0xf553, 0xf553, 'YES'),
(0xf555, 0xf555, 'SCAPE?'),
(0xf557, 0xf557, 'YES'),
(0xf559, 0xf559, 'CKSPACE?'),
(0xf55b, 0xf55b, 'YES'),
(0xf55d, 0xf55d, 'SPACE'),
(0xf55f, 0xf55f, 'GET ANOTHER CHAR IF CONTROL CHAR'),
(0xf561, 0xf561, 'CARRY'),
(0xf564, 0xf564, 'SKIP ONE BYTE AND LDA #$4F'),
(0xf565, 0xf565, 'TROFF FLAG'),
(0xf566, 0xf566, 'TRON/TROFF FLAG:0=TROFF, <> 0=TRON'),
(0xf569, 0xf569, 'GET DEVICE NUMBER'),
(0xf56b, 0xf56b, 'GET PRINT POSITION'),
(0xf56d, 0xf56d, 'CONVERT ACCB TO 2 DIGIT SIGNED INTEGER'),
(0xf56e, 0xf56e, 'CONVERT ACCD TO FLOATING POINT'),
(0xf571, 0xf571, "SYNTAX CHECK FOR '('"),
(0xf574, 0xf574, 'GET ADDR OF END OF ARRAYS'),
(0xf576, 0xf576, 'SAVE IT ON STACK'),
(0xf578, 0xf578, 'GET VARIABLE DESCRIPTOR'),
(0xf57b, 0xf57b, "SYNTAX CHECK FOR ')'"),
(0xf57e, 0xf57e, 'GET END OF ARRAYS ADDR BACK'),
(0xf580, 0xf580, 'SWAP END OF ARRAYS AND VARIABLE DESCRIPTOR'),
(0xf582, 0xf582, 'COMPARE TO NEW END OF ARRAYS'),
(0xf584, 0xf584, "'FC' ERROR IF VARIABLE WAS NOT DEFINED PRIOR TO CALLING VARPTR"),
(0xf586, 0xf586, 'CONVERT VARIABLE DESCRIPTOR INTO A FP NUMBER'),
(0xf589, 0xf589, 'GET INPUT CHAR FROM BASIC'),
(0xf58b, 0xf58b, "SYNTAX CHECK FOR '('"),
(0xf58e, 0xf58e, '* GET VARIABLE DESCRIPTOR ADDRESS AND'),
(0xf591, 0xf591, '* SAVE IT ON THE STACK'),
(0xf593, 0xf593, 'POINT ACCD TO START OF OLDSTRING'),
(0xf595, 0xf595, 'COMPARE TO START OF CLEARED SPACE'),
(0xf598, 0xf598, 'BRANCH IF <='),
(0xf59a, 0xf59a, 'SUBTRACT OUT TOP OF CLEARED SPACE'),
(0xf59c, 0xf59c, 'BRANCH IF STRING IN STRING SPACE'),
(0xf59e, 0xf59e, 'GET LENGTH OF OLDSTRING'),
(0xf5a0, 0xf5a0, 'RESERVE ACCB BYTES IN STRING SPACE'),
(0xf5a3, 0xf5a3, 'SAVE RESERVED SPACE STRING ADDRESS ON STACK'),
(0xf5a5, 0xf5a5, 'POINT X TO OLDSTRING DESCRIPTOR'),
(0xf5a7, 0xf5a7, 'MOVE OLDSTRING INTO STRING SPACE'),
(0xf5aa, 0xf5aa, '* GET OLDSTRING DESCRIPTOR ADDRESS AND RESERVED STRING'),
(0xf5ac, 0xf5ac, '* ADDRESS AND SAVE RESERVED ADDRESS AS OLDSTRING ADDRESS'),
(0xf5ae, 0xf5ae, 'SAVE OLDSTRING DESCRIPTOR ADDRESS'),
(0xf5b0, 0xf5b0, 'SYNTAX CHECK FOR COMMA AND EVALUATE LENGTH EXPRESSION'),
(0xf5b3, 0xf5b3, 'SAVE POSITION PARAMETER ON STACK'),
(0xf5b5, 0xf5b5, 'POSITION PARAMETER AND BRANCH'),
(0xf5b6, 0xf5b6, '* IF START OF STRING'),
(0xf5b8, 0xf5b8, 'DEFAULT REPLACEMENT LENGTH = $FF'),
(0xf5ba, 0xf5ba, '* CHECK FOR END OF MID$ STATEMENT AND'),
(0xf5bc, 0xf5bc, '* BRANCH IF AT END OF STATEMENT'),
(0xf5be, 0xf5be, 'SYNTAX CHECK FOR COMMA AND EVALUATE LENGTH EXPRESSION'),
(0xf5c1, 0xf5c1, 'SAVE LENGTH PARAMETER ON STACK'),
(0xf5c3, 0xf5c3, "SYNTAX CHECK FOR ')'"),
(0xf5c6, 0xf5c6, 'TOKEN FOR ='),
(0xf5c8, 0xf5c8, 'SYNTAX CHECK FOR "=\''),
(0xf5cb, 0xf5cb, 'EVALUATE REPLACEMENT STRING'),
(0xf5cd, 0xf5cd, 'SAVE REPLACEMENT STRING ADDRESS IN U'),
(0xf5cf, 0xf5cf, 'POINT X TO OLOSTRING DESCRIPTOR ADDRESS'),
(0xf5d1, 0xf5d1, 'GET LENGTH OF OLDSTRING'),
(0xf5d3, 0xf5d3, 'SUBTRACT POSITION PARAMETER'),
(0xf5d5, 0xf5d5, 'INSERT REPLACEMENT STRING INTO OLDSTRING'),
(0xf5d7, 0xf5d7, "'FC' ERROR IF POSITION > LENGTH OF OLDSTRING"),
(0xf5da, 0xf5da, '* NOW ACCA = NUMBER OF CHARACTERS TO THE RIGHT'),
(0xf5dd, 0xf5dd, 'BRANCH IF NEW STRING WILL FIT IN OLDSTRING'),
(0xf5df, 0xf5df, 'IF NOT, USE AS MUCH OF LENGTH PARAMETER AS WILL FIT'),
(0xf5e1, 0xf5e1, 'GET POSITION PARAMETER'),
(0xf5e3, 0xf5e3, 'ACCA=LENGTH OF REPL STRING, ACCB=POSITION PARAMETER'),
(0xf5e5, 0xf5e5, 'POINT X TO OLDSTRING ADDRESS'),
(0xf5e7, 0xf5e7, "* BASIC'S POSITION PARAMETER STARTS AT 1; THIS ROUTINE"),
(0xf5e8, 0xf5e8, 'POINT X TO POSITION IN OLDSTRING WHERE THE REPLACEMENT WILL GO'),
(0xf5e9, 0xf5e9, '* IF THE LENGTH OF THE REPLACEMENT STRING IS ZERO'),
(0xf5ea, 0xf5ea, '* THEN RETURN'),
(0xf5ee, 0xf5ee, 'ADJUSTED LENGTH PARAMETER, THEN BRANCH'),
(0xf5f0, 0xf5f0, 'OTHERWISE USE AS MUCH ROOM AS IS AVAILABLE'),
(0xf5f2, 0xf5f2, 'SAVE NUMBER OF BYTES TO MOVE IN ACCB'),
(0xf5f4, 0xf5f4, 'SWAP SOURCE AND DESTINATION POINTERS'),
(0xf5f6, 0xf5f6, 'MOVE (B) BYTES FROM (X) TO (U)'),
(0xf5f9, 0xf5f9, 'PC'),
(0xf5fb, 0xf5fb, 'EVALUATE EXPRESSION'),
(0xf5fe, 0xf5fe, "*'TM' ERROR IF NUMERIC; RETURN WITH X POINTING"),
(0xf601, 0xf601, "SYNTAX CHECK FOR '('"),
(0xf604, 0xf604, 'EVALUATE EXPRESSION; ERROR IF > 255'),
(0xf607, 0xf607, 'SAVE LENGTH OF STRING'),
(0xf609, 0xf609, 'SYNTAX CHECK FOR COMMA'),
(0xf60c, 0xf60c, 'EVALUATE EXPRESSION'),
(0xf60f, 0xf60f, "SYNTAX CHECK FOR ')'"),
(0xf612, 0xf612, 'GET VARIABLE TYPE'),
(0xf614, 0xf614, 'BRANCH IF STRING'),
(0xf616, 0xf616, 'CONVERT FPA0 INTO AN INTEGER IN ACCB'),
(0xf619, 0xf619, 'SAVE THE STRING IN STRING SPACE'),
(0xf61b, 0xf61b, 'GET FIRST BYTE OF STRING'),
(0xf61e, 0xf61e, 'SAVE FIRST BYTE OF EXPRESSION'),
(0xf620, 0xf620, 'GET LENGTH OF STRING'),
(0xf622, 0xf622, 'RESERVE ACCB BYTES IN STRING SPACE'),
(0xf625, 0xf625, 'GET LENGTH OF STRING AND CHARACTER'),
(0xf627, 0xf627, 'BRANCH IF NULL STRING'),
(0xf629, 0xf629, 'SAVE A CHARACTER IN STRING SPACE'),
(0xf62b, 0xf62b, 'DECREMENT LENGTH'),
(0xf62c, 0xf62c, 'BRANCH IF NOT DONE'),
(0xf62e, 0xf62e, 'PUT STRING DESCRIPTOR ONTO STRING STACK'),
(0xf631, 0xf631, "SYNTAX CHECK FOR '('"),
(0xf634, 0xf634, 'EVALUATE EXPRESSION'),
(0xf637, 0xf637, 'DEFAULT POSITION = 1 (SEARCH START)'),
(0xf639, 0xf639, 'SAVE START'),
(0xf63b, 0xf63b, 'GET VARIABLE TYPE'),
(0xf63d, 0xf63d, 'BRANCH IF STRING'),
(0xf63f, 0xf63f, 'CONVERT FPA0 TO INTEGER IN ACCB'),
(0xf642, 0xf642, 'SAVE START SEARCH VALUE'),
(0xf644, 0xf644, 'BRANCH IF START SEARCH AT ZERO'),
(0xf646, 0xf646, 'SYNTAX CHECK FOR COMMA'),
(0xf649, 0xf649, 'EVALUATE EXPRESSION - SEARCH STRING'),
(0xf64c, 0xf64c, "'TM' ERROR IF NUMERIC"),
(0xf64f, 0xf64f, 'SEARCH STRING DESCRIPTOR ADDRESS'),
(0xf651, 0xf651, 'SAVE ON THE STACK'),
(0xf653, 0xf653, 'SYNTAX CHECK FOR COMMA'),
(0xf656, 0xf656, 'EVALUATE TARGET STRING EXPRESSION'),
(0xf659, 0xf659, 'SAVE ADDRESS AND LENGTH ON STACK'),
(0xf65b, 0xf65b, "SYNTAX CHECK FOR ')'"),
(0xf65e, 0xf65e, '* LOAD X WITH SEARCH STRING DESCRIPTOR ADDRESS'),
(0xf660, 0xf660, '* AND GET THE LENGTH ANDADDRESS OF SEARCH STRING'),
(0xf663, 0xf663, 'SAVE LENGTH ON STACK'),
(0xf665, 0xf665, 'COMPARE LENGTH OF SEARCH STRING TO START'),
(0xf667, 0xf667, 'POSITION; RETURN 0 IF LENGTH < START'),
(0xf669, 0xf669, 'GET LENGTH OF TARGET STRING'),
(0xf66b, 0xf66b, 'BRANCH IF TARGET STRING = NULL'),
(0xf66d, 0xf66d, 'GET START POSITION'),
(0xf66f, 0xf66f, 'MOVE BACK ONE'),
(0xf670, 0xf670, 'TO POSITION IN SEARCH STRING WHERE SEARCHING WILL START'),
(0xf671, 0xf671, 'POINT Y TO SEARCH POSITION'),
(0xf673, 0xf673, 'POINT U TO START OF TARGET'),
(0xf675, 0xf675, 'LOAD ACCB WITH LENGTH OF TARGET'),
(0xf677, 0xf677, 'LOAD ACCA WITH LENGTH OF SEARCH'),
(0xf679, 0xf679, 'SUBTRACT SEARCH POSITION FROM SEARCH LENGTH'),
(0xf67b, 0xf67b, 'ADD ONE'),
(0xf67c, 0xf67c, 'COMPARE TO TARGET LENGTH'),
(0xf67e, 0xf67e, "RETURN 0 IF TARGET LENGTH > WHAT'S LEFT OF SEARCH STRING"),
(0xf680, 0xf680, 'GET A CHARACTER FROM SEARCH STRING'),
(0xf682, 0xf682, 'COMPARE IT TO TARGET STRING'),
(0xf684, 0xf684, 'BRANCH IF NO MATCH'),
(0xf686, 0xf686, 'DECREMENT TARGET LENGTH'),
(0xf687, 0xf687, 'CHECK ANOTHER CHARACTER'),
(0xf689, 0xf689, 'GET MATCH POSITION'),
(0xf68b, 0xf68b, 'SKIP NEXT BYTE'),
(0xf68c, 0xf68c, 'MATCH ADDRESS = 0'),
(0xf68d, 0xf68d, 'CLEAN UP THE STACK'),
(0xf68f, 0xf68f, 'CONVERT ACCB TO FP NUMBER'),
(0xf692, 0xf692, 'INCREMENT SEARCH POSITION'),
(0xf694, 0xf694, 'MOVE X TO NEXT SEARCH POSITION'),
(0xf696, 0xf696, 'KEEP LOOKING FOR A MATCH'),
(0xf69a, 0xf69a, '* RETURN IF NOT HEX OR OCTAL VARIABLE'),
(0xf69c, 0xf69c, 'PURGE RETURN ADDRESS FROM STACK'),
(0xf69e, 0xf69e, '* CLEAR BOTTOM TWO'),
(0xf6a0, 0xf6a0, '* BYTES OF FPA0'),
(0xf6a2, 0xf6a2, 'BYTES 2,3 OF FPA0 = (TEMPORARY ACCUMULATOR)'),
(0xf6a5, 0xf6a5, 'GET A CHARACTER FROM BASIC'),
(0xf6a9, 0xf6a9, 'YES'),
(0xf6ad, 0xf6ad, 'YES'),
(0xf6af, 0xf6af, 'GET CURRENT INPUT CHARACTER'),
(0xf6b1, 0xf6b1, 'DEFAULT TO OCTAL (&O)'),
(0xf6b9, 0xf6b9, 'BASE 8 MULTIPLIER'),
(0xf6bb, 0xf6bb, 'ADD DIGIT TO TEMPORARY ACCUMULATOR'),
(0xf6bd, 0xf6bd, 'GET A CHARACTER FROM BASIC'),
(0xf6bf, 0xf6bf, 'BRANCH IF NUMERIC'),
(0xf6c1, 0xf6c1, '* CLEAR 2 HIGH ORDER'),
(0xf6c3, 0xf6c3, '* BYTES OF FPA0'),
(0xf6c5, 0xf6c5, 'SET VARXABLE TYPE TO NUMERIC'),
(0xf6c7, 0xf6c7, 'ZERO OUT SUB BYTE OF FPA0'),
(0xf6c9, 0xf6c9, 'ZERO OUT MANTISSA SIGN OF FPA0'),
(0xf6cb, 0xf6cb, '* SET EXPONENT OF FPA0'),
(0xf6cf, 0xf6cf, 'GO NORMALIZE FPA0'),
(0xf6d2, 0xf6d2, 'GET A CHARACTER FROM BASIC'),
(0xf6d4, 0xf6d4, 'BRANCH IF NUMERIC'),
(0xf6d6, 0xf6d6, 'SET CARRY IF NOT ALPHA'),
(0xf6d9, 0xf6d9, 'BRANCH IF NOT ALPHA OR NUMERIC'),
(0xf6db, 0xf6db, 'CHECK FOR LETTERS A-F'),
(0xf6dd, 0xf6dd, 'BRANCH IF >= G (ILLEGAL HEX LETTER)'),
(0xf6df, 0xf6df, 'SUBTRACT ASCII DIFFERENCE BETWEEN A AND 9'),
(0xf6e1, 0xf6e1, 'BASE 16 DIGIT MULTIPLIER = 2**4'),
(0xf6e3, 0xf6e3, 'ADD DIGIT TO TEMPORARY ACCUMULATOR'),
(0xf6e5, 0xf6e5, 'KEEP EVALUATING VARIABLE'),
(0xf6e7, 0xf6e7, '* MULTIPLY TEMPORARY'),
(0xf6e9, 0xf6e9, '* ACCUMULATOR BY TWO'),
(0xf6eb, 0xf6eb, "'OV' OVERFLOW ERROR"),
(0xf6ef, 0xf6ef, 'DECREMENT SHIFT COUNTER'),
(0xf6f0, 0xf6f0, 'MULTIPLY TEMPORARY ACCUMULATOR AGAIN'),
(0xf6f2, 0xf6f2, 'MASK OFF ASCII'),
(0xf6f4, 0xf6f4, '* ADD DIGIT TO TEMPORARY'),
(0xf6f6, 0xf6f6, '* ACCUMULATOR AND SAVE IT'),
(0xf6f9, 0xf6f9, 'PULL RETURN ADDRESS AND SAVE IN U REGISTER'),
(0xf6fb, 0xf6fb, 'SET VARIABLE TYPE TO NUMERIC'),
(0xf6fd, 0xf6fd, 'CURRENT INPUT POINTER TO X'),
(0xf6ff, 0xf6ff, 'GET CHARACTER FROM BASIC'),
(0xf701, 0xf701, 'HEX AND OCTAL VARIABLES ARE PRECEEDED BY &'),
(0xf703, 0xf703, "PROCESS A '&' VARIABLE"),
(0xf705, 0xf705, 'TOKEN FOR FN'),
(0xf707, 0xf707, 'PROCESS FN CALL'),
(0xf709, 0xf709, 'CHECK FOR SECONDARY TOKEN'),
(0xf70b, 0xf70b, 'NOT SECONDARY'),
(0xf70d, 0xf70d, 'GET CHARACTER FROM BASIC'),
(0xf70f, 0xf70f, 'TOKEN FOR USR'),
(0xf711, 0xf711, 'PROCESS USR CALL'),
(0xf715, 0xf715, "RESTORE BASIC'S INPUT POINTER"),
(0xf717, 0xf717, 'RETURN TO CALLING ROUTINE'),
(0xf719, 0xf719, 'GET CURRENT LINE NUMBER'),
(0xf71b, 0xf71b, 'IN DIRECT MODE?'),
(0xf71d, 0xf71d, 'RETURN IF NOT IN DIRECT MODE'),
(0xf71f, 0xf71f, "'ILLEGAL DIRECT STATEMENT' ERROR"),
(0xf721, 0xf721, 'PROCESS ERROR'),
(0xf724, 0xf724, 'GET TWO INPUT CHARS'),
(0xf728, 0xf728, 'TOKEN FOR USR'),
(0xf72b, 0xf72b, 'BRANCH IF DEF USR'),
(0xf72f, 0xf72f, 'GET DESCRIPTOR ADDRESS FOR FN VARIABLE NAME'),
(0xf731, 0xf731, "DON'T ALLOW DEF FN IF IN DIRECT MODE"),
(0xf733, 0xf733, "SYNTAX CHECK FOR '('"),
(0xf736, 0xf736, '* GET THE FLAG TO INDICATE ARRAY VARIABLE SEARCH DISABLE'),
(0xf738, 0xf738, '* AND SAVE IT IN THE ARRAY DISABLE FLAG'),
(0xf73a, 0xf73a, 'GET VARIABLE DESCRIPTOR'),
(0xf73d, 0xf73d, "'TM' ERROR IF STRING"),
(0xf73f, 0xf73f, "SYNTAX CHECK FOR ')'"),
(0xf742, 0xf742, "TOKEN FOR '='"),
(0xf744, 0xf744, 'DO A SYNTAX CHECK FOR ='),
(0xf747, 0xf747, 'GET THE ADDRESS OF THE FN NAME DESCRIPTOR'),
(0xf749, 0xf749, '* GET THE CURRENT INPUT POINTER ADDRESS AND'),
(0xf74b, 0xf74b, '* SAVE IT IN FIRST 2 BYTES OF THE DESCRIPTOR'),
(0xf74d, 0xf74d, '= GET THE DESCRIPTOR ADDRESS OF THE ARGUMENT'),
(0xf74f, 0xf74f, '= VARIABLE AND SAVE IT IN THE DESCRIPTOR OF THE FN NAME'),
(0xf751, 0xf751, 'MOVE INPUT POINTER TO END OF LINE OR SUBLINE'),
(0xf754, 0xf754, 'TOKEN FOR FN'),
(0xf756, 0xf756, 'DO A SYNTAX CHECK FOR FN'),
(0xf759, 0xf759, '* GET THE FLAG TO INDICATE ARRAY VARIABLE SEARCH DISABLE FLAG'),
(0xf75b, 0xf75b, '* AND SAVE IT IN ARRAY VARIABLE FLAG'),
(0xf75d, 0xf75d, 'SET BIT 7 OF CURRENT INPUT CHARACTER TO INDICATE AN FN VARIABLE'),
(0xf75f, 0xf75f, '* GET THE DESCRIPTOR ADDRESS OF THIS'),
(0xf762, 0xf762, '* VARIABLE AND SAVE IT IN V4B'),
(0xf764, 0xf764, "'TM' ERROR IF STRING VARIABLE"),
(0xf767, 0xf767, '* GET THE DESCRIPTOR OF THE FN NAME'),
(0xf769, 0xf769, '* VARIABLE AND SAVE IT ON THE STACK'),
(0xf76b, 0xf76b, "SYNTAX CHECK FOR '(' & EVALUATE EXPR"),
(0xf76e, 0xf76e, "'TM' ERROR IF STRING VARIABLE"),
(0xf770, 0xf770, 'POINT U TO FN NAME DESCRIPTOR'),
(0xf772, 0xf772, "'UNDEFINED FUNCTION CALL' ERROR"),
(0xf774, 0xf774, 'POINT X TO ARGUMENT VARIABLE DESCRIPTOR'),
(0xf776, 0xf776, 'BRANCH TO ERROR HANDLER'),
(0xf778, 0xf778, 'SAVE CURRENT INPUT POINTER IN Y'),
(0xf77b, 0xf77b, '* POINT U TO START OF FN FORMULA AND'),
(0xf77d, 0xf77d, '* SAVE IT IN INPUT POINTER'),
(0xf77f, 0xf77f, '= GET FP VALUE OF'),
(0xf781, 0xf781, '= ARGUMENT VARIABLE, CURRENT INPUT'),
(0xf783, 0xf783, '= POINTER, AND ADDRESS OF START'),
(0xf785, 0xf785, '= OF FN FORMULA AND SAVE'),
(0xf787, 0xf787, '= THEM ON THE STACK'),
(0xf789, 0xf789, 'PACK FPA0 AND SAVE IT IN (X)'),
(0xf78c, 0xf78c, 'EVALUATE FN EXPRESSION'),
(0xf78f, 0xf78f, 'RESTORE REGISTERS'),
(0xf791, 0xf791, '* GET THE FP'),
(0xf793, 0xf793, '* VALUE OF THE ARGUMENT'),
(0xf795, 0xf795, '* VARIABLE OFF OF THE'),
(0xf797, 0xf797, '* STACK AND RE-SAVE IT'),
(0xf799, 0xf799, 'GET FINAL CHARACTER OF THE FN FORMULA'),
(0xf79b, 0xf79b, "'SYNTAX' ERROR IF NOT END OF LINE"),
(0xf79f, 0xf79f, 'RESTORE INPUT POINTER'),
(0xf7a3, 0xf7a3, 'SKIP PAST SECOND BYTE OF DEF USR TOKEN'),
(0xf7a5, 0xf7a5, 'GET FN NUMBER'),
(0xf7a7, 0xf7a7, 'SAVE FN EXEC ADDRESS STORAGE LOC'),
(0xf7a9, 0xf7a9, 'CALCULATE EXEC ADDRESS'),
(0xf7ab, 0xf7ab, 'GET FN EXEC ADDRESS STORAGE LOC'),
(0xf7ad, 0xf7ad, 'SAVE EXEC ADDRESS'),
(0xf7b0, 0xf7b0, 'DEFAULT TO USR0 IF NO ARGUMENT'),
(0xf7b1, 0xf7b1, 'GET A CHARACTER FROM BASIC'),
(0xf7b3, 0xf7b3, 'BRANCH IF NOT NUMERIC'),
(0xf7b5, 0xf7b5, 'MASK OFF ASCII'),
(0xf7b7, 0xf7b7, 'SAVE USR NUMBER IN ACCB'),
(0xf7b9, 0xf7b9, 'GET A CHARACTER FROM BASIC'),
(0xf7bb, 0xf7bb, 'GET ADDRESS OF STORAGE LOCs FOR USR ADDRESS'),
(0xf7bd, 0xf7bd, 'X2 - 2 BYTES/USR ADDRESS'),
(0xf7be, 0xf7be, 'ADD OFFSET TO START ADDRESS OF STORAGE LOCs'),
(0xf7c0, 0xf7c0, 'GET STORAGE LOC OF EXEC ADDRESS FOR USR N'),
(0xf7c2, 0xf7c2, '* GET EXEC ADDRESS AND'),
(0xf7c4, 0xf7c4, '* PUSH IT ONTO STACK'),
(0xf7c6, 0xf7c6, "SYNTAX CHECK FOR '(' & EVALUATE EXPR"),
(0xf7c9, 0xf7c9, 'POINT X TO FPA0'),
(0xf7cc, 0xf7cc, 'GET VARIABLE TYPE'),
(0xf7ce, 0xf7ce, 'BRANCH IF NUMERIC, STRING IF <> 0'),
(0xf7d0, 0xf7d0, 'GET LENGTH & ADDRESS OF STRING VARIABLE'),
(0xf7d3, 0xf7d3, 'GET POINTER TO STRING DESCRIPTOR'),
(0xf7d5, 0xf7d5, 'GET VARIABLE TYPE'),
(0xf7d7, 0xf7d7, 'JUMP TO USR ROUTINE (PSHS X ABOVE)'),
(0xf7d8, 0xf7d8, "TOKEN FOR '='"),
(0xf7da, 0xf7da, 'DO A SYNTAX CHECK FOR ='),
(0xf7dd, 0xf7dd, 'EVALUATE EXPRESSION, RETURN VALUE IN X'),
(0xf7e0, 0xf7e0, "FC' ERROR IF NO ARGUMENT"),
(0xf7e4, 0xf7e4, 'CONVERT A DECIMAL BASiC NUMBER TO BINARY'),
(0xf7e7, 0xf7e7, 'FIND RAM ADDRESS OF START OF A BASIC LINE'),
(0xf7ea, 0xf7ea, 'SAVE RAM ADDRESS OF STARTING LINE NUMBER'),
(0xf7ec, 0xf7ec, 'GET CURRENT INPUT CHARACTER'),
(0xf7ee, 0xf7ee, 'BRANCH IF END OF LINE'),
(0xf7f0, 0xf7f0, "TOKEN FOR '-'"),
(0xf7f2, 0xf7f2, "TERMINATE COMMAND IF LINE NUMBER NOT FOLLOWED BY '-'"),
(0xf7f4, 0xf7f4, 'GET A CHARACTER FROM BASIC'),
(0xf7f6, 0xf7f6, 'IF END OF LINE, USE DEFAULT ENDING LINE NUMBER'),
(0xf7f8, 0xf7f8, '* CONVERT ENDING LINE NUMBER TO BINARY'),
(0xf7fa, 0xf7fa, '* AND SAVE IT IN BINVAL'),
(0xf7fc, 0xf7fc, '= USE $FFXX AS DEFAULT ENDING'),
(0xf7fe, 0xf7fe, '= LINE NUMBER - SAVE IT IN BINVAL'),
(0xf800, 0xf800, 'POINT U TO STARTING LINE NUMBER ADDRESS'),
(0xf802, 0xf802, 'SKIP TWO BYTES'),
(0xf803, 0xf803, 'POINT U TO START OF NEXT LINE'),
(0xf805, 0xf805, 'CHECK FOR END OF PROGRAM'),
(0xf807, 0xf807, 'BRANCH IF END OF PROGRAM'),
(0xf809, 0xf809, "LOAD ACCD WITH THIS LINE'S NUMBER"),
(0xf80b, 0xf80b, 'SUBTRACT ENDING LINE NUMBER ADDRESS'),
(0xf80d, 0xf80d, 'BRANCH IF = < ENDING LINE NUMBER'),
(0xf80f, 0xf80f, 'GET STARTING LINE NUMBER'),
(0xf811, 0xf811, 'MOVE (U) TO (X) UNTIL END OF PROGRAM'),
(0xf813, 0xf813, "RESET BASIC'S INPUT POINTER AND ERASE VARIABLES"),
(0xf816, 0xf816, 'GET STARTING LINE NUMBER ADDRESS'),
(0xf818, 0xf818, 'RECOMPUTE START OF NEXT LINE ADDRESSES'),
(0xf81b, 0xf81b, "JUMP TO BASIC'S MAIN COMMAND LOOP"),
(0xf81e, 0xf81e, 'GO GET LINE NUMBER CONVERTED TO BINARY'),
(0xf821, 0xf821, "MAKE SURE THERE'S NO MORE ON THIS LINE"),
(0xf824, 0xf824, 'GET A BYTE FROM (U)'),
(0xf826, 0xf826, 'MOVE THE BYTE TO (X)'),
(0xf828, 0xf828, 'COMPARE TO END OF BASIC'),
(0xf82b, 0xf82b, 'BRANCH IF NOT AT END'),
(0xf82d, 0xf82d, 'SAVE (X) AS NEW END OF BASIC'),
(0xf830, 0xf830, "'BS' ERROR IF IN DIRECT MODE"),
(0xf833, 0xf833, 'GET A CHAR FROM BASIC'),
(0xf835, 0xf835, 'CHECK FOR PROMPT STRING'),
(0xf837, 0xf837, 'BRANCH IF NO PROMPT STRING'),
(0xf839, 0xf839, 'STRIP OFF PROMPT STRING & PUT IT ON STRING STACK'),
(0xf83e, 0xf83e, '* DO A SYNTAX CHECK FOR;'),
(0xf841, 0xf841, 'REMOVE PROMPT STRING FROM STRING STACK & SEND TO CONSOLE OUT'),
(0xf844, 0xf844, 'RESERVE TWO STORAGE SLOTS ON STACK'),
(0xf846, 0xf846, 'INPUT A LINE FROM CURRENT INPUT DEVICE'),
(0xf849, 0xf849, 'CLEAN UP THE STACK'),
(0xf84b, 0xf84b, 'SEARCH FOR A VARIABLE'),
(0xf84e, 0xf84e, 'SAVE POINTER TO VARIABLE DESCRIPTOR'),
(0xf850, 0xf850, "'TM' ERROR IF VARIABLE TYPE = NUMERIC"),
(0xf853, 0xf853, 'POINT X TO THE STRING BUFFER WHERE THE INPUT STRING WAS STORED'),
(0xf856, 0xf856, 'TERMINATOR CHARACTER 0 (END OF LINE)'),
(0xf857, 0xf857, 'PARSE THE INPUT STRING AND STORE IT IN THE STRING SPACE'),
(0xf85a, 0xf85a, 'REMOVE DESCRIPTOR FROM STRING STACK'),
(0xf85d, 0xf85d, 'STRIP A DECIMAL NUMBER FROM BASIC INPUT LINE'),
(0xf860, 0xf860, 'GET BINARY VALUE'),
(0xf863, 0xf863, 'GET CURRENT OLD NUMBER BEING RENUMBERED'),
(0xf865, 0xf865, 'SAVE THE LINE NUMBER BEING SEARCHED FOR'),
(0xf867, 0xf867, 'GO FIND THE LINE NUMBER IN BASIC PROGRAM'),
(0xf86a, 0xf86a, 'ERASE VARIABLES'),
(0xf86d, 0xf86d, 'DEFAULT LINE NUMBER INTERVAL'),
(0xf870, 0xf870, 'SAVE DEFAULT RENUMBER START LINE NUMBER'),
(0xf872, 0xf872, 'SAVE DEFAULT INTERVAL'),
(0xf874, 0xf874, 'NOW ACCD = 0'),
(0xf875, 0xf875, 'DEFAULT LINE NUMBER OF WHERE TO START RENUMBERING'),
(0xf877, 0xf877, 'GET CURRENT INPUT CHARACTER'),
(0xf879, 0xf879, 'BRANCH IF NOT NUMERIC'),
(0xf87b, 0xf87b, 'CONVERT DECIMAL NUMBER IN BASIC PROGRAM TO BINARY'),
(0xf87d, 0xf87d, 'SAVE LINE NUMBER WHERE RENUMBERING STARTS'),
(0xf87f, 0xf87f, 'GET CURRENT INPUT CHARACTER'),
(0xf881, 0xf881, 'BRANCH IF END OF LINE'),
(0xf883, 0xf883, 'SYNTAX CHECK FOR COMMA'),
(0xf886, 0xf886, 'BRANCH IF NEXT CHARACTER NOT NUMERIC'),
(0xf888, 0xf888, 'CONVERT DECIMAL NUMBER IN BASIC PROGRAM TO BINARY'),
(0xf88a, 0xf88a, 'SAVE NEW RENUMBER LINE'),
(0xf88c, 0xf88c, 'GET CURRENT INPUT CHARACTER'),
(0xf88e, 0xf88e, 'BRANCH IF END OF LINE'),
(0xf890, 0xf890, 'SYNTAX CHECK FOR COMMA'),
(0xf893, 0xf893, 'BRANCH IF NEXT CHARACTER NOT NUMERIC'),
(0xf895, 0xf895, 'CONVERT DECIMAL NUMBER IN BASIC PROGRAM TO BINARY'),
(0xf897, 0xf897, 'SAVE NEW INTERVAL'),
(0xf899, 0xf899, "'FC' ERROR"),
(0xf89b, 0xf89b, "CHECK FOR MORE CHARACTERS ON LINE - 'SYNTAX' ERROR IF ANY"),
(0xf89e, 0xf89e, 'GO GET ADDRESS OF OLD NUMBER BEING RENUMBERED'),
(0xf8a0, 0xf8a0, 'SAVE ADDRESS'),
(0xf8a2, 0xf8a2, 'GET NEXT RENUMBERED LINE NUMBER TO USE'),
(0xf8a4, 0xf8a4, 'FIND THE LINE NUMBER IN THE BASIC PROGRAM'),
(0xf8a6, 0xf8a6, 'COMPARE TO ADDRESS OF OLD LINE NUMBER'),
(0xf8a8, 0xf8a8, "'FC' ERROR IF NEW ADDRESS < OLD ADDRESS"),
(0xf8aa, 0xf8aa, 'MAKE SURE RENUMBERED LINE NUMBERS WILL BE IN RANGE'),
(0xf8ac, 0xf8ac, "CONVERT ASCII LINE NUMBERS TO 'EXPANDED' BINARY"),
(0xf8af, 0xf8af, 'RECALCULATE NEXT LINE RAM ADDRESSES'),
(0xf8b2, 0xf8b2, 'GET RAM ADDRESS OF FIRST LINE TO BE RENUMBERED'),
(0xf8b4, 0xf8b4, 'SAVE IT'),
(0xf8b6, 0xf8b6, 'MAKE SURE LINE NUMBERS EXIST'),
(0xf8b8, 0xf8b8, 'INSERT NEW LINE NUMBERS IN LINE HEADERS'),
(0xf8ba, 0xf8ba, 'INSERT NEW LINE NUMBERS IN PROGRAM STATEMENTS'),
(0xf8bc, 0xf8bc, 'CONVERT PACKED BINARY LINE NUMBERS TO ASCII'),
(0xf8bf, 0xf8bf, 'ERASE VARIABLES'),
(0xf8c2, 0xf8c2, 'RECALCULATE NEXT LINE RAM ADDRESS'),
(0xf8c5, 0xf8c5, "GO BACK TO BASIC'S MAIN LOOP"),
(0xf8c8, 0xf8c8, 'SKIP ONE BYTE - LDA #$4F'),
(0xf8c9, 0xf8c9, 'NEW LINE NUMBER FLAG - 0; INSERT NEW LINE NUMBERS'),
(0xf8ca, 0xf8ca, 'SAVE NEW LINE NUMBER FLAG; 0 = INSERT NEW NUMBERS'),
(0xf8cc, 0xf8cc, 'GET ADDRESS OF OLD LINE NUMBER BEING RENUMBERED'),
(0xf8ce, 0xf8ce, 'GET THE CURRENT RENUMBERED LINE NUMBER'),
(0xf8d0, 0xf8d0, 'RETURN IF END OF PROGRAM'),
(0xf8d2, 0xf8d2, 'CHECK NEW LINE NUMBER FLAG'),
(0xf8d4, 0xf8d4, 'BRANCH IF NOT INSERTING NEW LINE NUMBERS'),
(0xf8d6, 0xf8d6, 'STORE THE NEW LINE NUMBER IN THE BASIC PROGRAM'),
(0xf8d8, 0xf8d8, 'POINT X TO THE NEXT LINE IN BASIC'),
(0xf8da, 0xf8da, 'RETURN IF END OF PROGRAM'),
(0xf8dc, 0xf8dc, 'ADD INTERVAL TO CURRENT RENUMBERED LINE NUMBER'),
(0xf8de, 0xf8de, "'FC' ERROR IF LINE NUMBER > $FFFF"),
(0xf8e0, 0xf8e0, 'LARGEST LINE NUMBER = $F9FF'),
(0xf8e2, 0xf8e2, 'BRANCH IF LEGAL LINE NUMBER'),
(0xf8e4, 0xf8e4, "'FC' ERROR IF LINE NUMBER MS BYTE > $F9"),
(0xf8e7, 0xf8e7, 'SAVE ACCD'),
(0xf8e9, 0xf8e9, 'TEST THE 2 BYTES POINTED TO BY X'),
(0xf8eb, 0xf8eb, 'RESTORE ACCD'),
(0xf8ed, 0xf8ed, 'BRANCH IF NOT END OF PROGRAM'),
(0xf8ef, 0xf8ef, 'PURGE RETURN ADDRESS FROM STACK'),
(0xf8f2, 0xf8f2, 'GET START OF BASIC PROGRAM'),
(0xf8f4, 0xf8f4, 'MOVE POINTER BACK ONE'),
(0xf8f6, 0xf8f6, 'MOVE POINTER UP ONE'),
(0xf8f8, 0xf8f8, 'RETURN IF END OF PROGRAM'),
(0xf8fa, 0xf8fa, 'SKIP OVER NEXT LINE ADDRESS AND LINE NUMBER'),
(0xf8fc, 0xf8fc, 'MOVE POINTER TO NEXT CHARACTER'),
(0xf8fe, 0xf8fe, 'CHECK CURRENT CHARACTER'),
(0xf900, 0xf900, 'BRANCH IF END OF LINE'),
(0xf902, 0xf902, 'SAVE CURRENT POINTER'),
(0xf904, 0xf904, '='),
(0xf905, 0xf905, '=BRANCH IF START OF PACKED NUMERIC LINE'),
(0xf908, 0xf908, '*BRANCH IF LINE NUMBER EXISTS'),
(0xf90a, 0xf90a, '='),
(0xf90b, 0xf90b, '=MOVE TO NEXT CHARACTER IF > 3'),
(0xf90d, 0xf90d, '* SET 1ST BYTE = 3 TO INDICATE LINE'),
(0xf90f, 0xf90f, "* NUMBER DOESN'T CURRENTLY EXIST"),
(0xf911, 0xf911, 'GO GET ANOTHER CHARACTER'),
(0xf913, 0xf913, 'GET MS BYTE OF LINE NUMBER'),
(0xf915, 0xf915, 'DECREMENT ZERO CHECK BYTE'),
(0xf917, 0xf917, 'BRANCH IF MS BYTE <> 0'),
(0xf919, 0xf919, 'CLEAR MS BYTE'),
(0xf91a, 0xf91a, 'GET LS BYTE OF LINE NUMBER'),
(0xf91c, 0xf91c, 'DECREMENT ZERO CHECK FLAG'),
(0xf91e, 0xf91e, 'BRANCH IF IS BYTE <> 0'),
(0xf920, 0xf920, 'CLEAR LS BYTE'),
(0xf921, 0xf921, 'SAVE BINARY LINE NUMBER'),
(0xf923, 0xf923, 'SAVE TRIAL LINE NUMBER'),
(0xf925, 0xf925, 'FIND RAM ADDRESS OF A BASIC LINE NUMBER'),
(0xf928, 0xf928, 'GET BACK POINTER TO START OF PACKED LINE NUMBER'),
(0xf92a, 0xf92a, 'BRANCH IF NO LINE NUMBER MATCH FOUND'),
(0xf92c, 0xf92c, 'GET START ADDRESS OF LINE NUMBER'),
(0xf92e, 0xf92e, '* SET 1ST BYTE = 2, TO INDICATE LINE NUMBER EXISTS IF CHECKING FOR'),
(0xf930, 0xf930, 'SAVE RAM ADDRESS OF CORRECT LINE NUMBER'),
(0xf932, 0xf932, 'GO GET ANOTHER CHARACTER'),
(0xf934, 0xf934, 'CLEAR CARRY FLAG AND 1ST BYTE'),
(0xf936, 0xf936, 'POINT X TO RAM ADDRESS OF CORRECT LINE NUMBER'),
(0xf938, 0xf938, 'PUT CORRECT LINE NUMBER INTO (X)'),
(0xf93a, 0xf93a, 'SAVE IT TEMPORARILY'),
(0xf93c, 0xf93c, 'GO INSERT IT INTO BASIC LINE'),
(0xf93e, 0xf93e, 'GET BEGINNING OF BASIC PROGRAM'),
(0xf942, 0xf942, '*GET CURRENT INPUT POINTER'),
(0xf944, 0xf944, '*AND BUMP IT ONE'),
(0xf946, 0xf946, 'RETURN IF END OF PROGRAM'),
(0xf948, 0xf948, 'SKIP PAST NEXT LINE ADDRESS'),
(0xf94a, 0xf94a, 'ADVANCE POINTER BY ONE'),
(0xf94c, 0xf94c, 'SAVE NEW BASIC INPUT POINTER'),
(0xf94e, 0xf94e, 'GET NEXT CHARACTER FROM BASIC'),
(0xf950, 0xf950, 'CHECK THE CHARACTER'),
(0xf951, 0xf951, 'BRANCH IF END OF LINE'),
(0xf953, 0xf953, 'BRANCH IF NOT A TOKEN'),
(0xf955, 0xf955, 'GET CURRENT INPUT POINTER'),
(0xf957, 0xf957, 'IS THIS A SECONDARY TOKEN?'),
(0xf959, 0xf959, 'YES - IGNORE IT'),
(0xf95b, 0xf95b, 'TOKEN FOR THEN?'),
(0xf95d, 0xf95d, 'YES'),
(0xf95f, 0xf95f, 'TOKEN FOR ELSE?'),
(0xf961, 0xf961, 'YES'),
(0xf963, 0xf963, 'TOKEN FOR GO?'),
(0xf965, 0xf965, 'NO'),
(0xf967, 0xf967, 'GET A CHARACTER FROM BASIC'),
(0xf969, 0xf969, 'TOKEN FOR TO?'),
(0xf96b, 0xf96b, 'YES'),
(0xf96d, 0xf96d, 'TOKEN FOR SUB?'),
(0xf96f, 0xf96f, 'NO'),
(0xf971, 0xf971, 'GET A CHARACTER FROM BASIC'),
(0xf973, 0xf973, 'BRANCH IF NUMERIC'),
(0xf975, 0xf975, 'GET CURRENT BASIC INPUT CHARRACTER'),
(0xf977, 0xf977, 'KEEP CHECKING THE LINE'),
(0xf979, 0xf979, 'GET CURRENT INPUT ADDRESS'),
(0xf97b, 0xf97b, 'SAVE IT ON THE STACK'),
(0xf97d, 0xf97d, 'CONVERT DECIMAL BASIC NUMBER TO BINARY'),
(0xf980, 0xf980, 'GET CURRENT INPUT POINTER'),
(0xf982, 0xf982, 'GET PREVIOUS INPUT CHARACTER'),
(0xf984, 0xf984, 'CLEAR CARRY IF NUMERIC INPUT VALUE'),
(0xf987, 0xf987, 'BRANCH IF NON-NUMERIC'),
(0xf989, 0xf989, 'MOVE POINTER UP ONE'),
(0xf98b, 0xf98b, 'NOW ACCD POINTS TO ONE PAST END OF LINE NUMBER'),
(0xf98d, 0xf98d, 'SUBTRACT PRE-NUMERIC POINTER LS BYTE'),
(0xf98f, 0xf98f, 'MAKE SURE THERE ARE AT LEAST 5 CHARACTERS IN THE NUMERIC LINE'),
(0xf991, 0xf991, 'BRANCH IF EXACTLY 5'),
(0xf993, 0xf993, 'BRANCH IF < 5'),
(0xf995, 0xf995, 'TRANSFER X TO U'),
(0xf997, 0xf997, 'NEGATE B'),
(0xf998, 0xf998, 'MOVE X BACK B BYTES'),
(0xf99a, 0xf99a, '*MOVE BYTES FROM (U) TO (X) UNTIL'),
(0xf99f, 0xf99f, 'SAVE END OF NUMERIC VALUE'),
(0xf9a1, 0xf9a1, 'GET END OF BASIC PROGRAM'),
(0xf9a3, 0xf9a3, 'SAVE IT'),
(0xf9a5, 0xf9a5, 'NEGATE B'),
(0xf9a6, 0xf9a6, 'ADD IT TO END OF NUMERIC POiNTER'),
(0xf9a8, 0xf9a8, 'SAVE POINTER'),
(0xf9aa, 0xf9aa, 'STORE END OF BASIC PROGRAM'),
(0xf9ac, 0xf9ac, 'ACCD = TOP OF ARRAYS - CHECK FOR ENOUGH ROOM'),
(0xf9af, 0xf9af, '* GET AND SAVE THE'),
(0xf9b1, 0xf9b1, '* NEW CURRENT INPUT POINTER'),
(0xf9b3, 0xf9b3, 'RESTORE POINTER TO START OF NUMERIC VALUE'),
(0xf9b5, 0xf9b5, 'NEW LINE NUMBER FLAG'),
(0xf9b7, 0xf9b7, '* SAVE NEW LINE FLAG'),
(0xf9bd, 0xf9bd, 'GET MS BYTE OF BINARY LINE NUMBER'),
(0xf9bf, 0xf9bf, 'BRANCH IF IT IS NOT ZERO'),
(0xf9c1, 0xf9c1, 'SAVE A 1 IF BYTE IS 0; OTHERWISE, BASIC WILL'),
(0xf9c3, 0xf9c3, 'IF 2,X = 2, THEN PREVIOUS BYTE WAS A ZERO'),
(0xf9c5, 0xf9c5, 'SAVE MS BYTE OF BINARY LINE NUMBER'),
(0xf9c7, 0xf9c7, 'GET IS BYTE OF BINARY LINE NUMBER'),
(0xf9c9, 0xf9c9, 'BRANCH IF NOT A ZERO BYTE'),
(0xf9cb, 0xf9cb, 'SAVE A 1 IF BYTE IS A 0'),
(0xf9cd, 0xf9cd, 'IF 4,X = 2, THEN PREVIOUS BYTE WAS A 0'),
(0xf9cf, 0xf9cf, 'SAVE LS BYTE OF BINARY LINE NUMBER'),
(0xf9d1, 0xf9d1, 'GET CURRENT INPUT CHARACTER'),
(0xf9d3, 0xf9d3, 'IS IT A COMMA?'),
(0xf9d5, 0xf9d5, 'YES - PROCESS ANOTHER NUMERIC VALUE'),
(0xf9d7, 0xf9d7, 'NO - GO GET AND PROCESS AN INPUT CHARACTER'),
(0xf9d9, 0xf9d9, 'POINT X TO START OF BASIC PROGRAM'),
(0xf9db, 0xf9db, 'MOVE POINTER BACK ONE'),
(0xf9dd, 0xf9dd, 'MOVE POINTER UP ONE'),
(0xf9df, 0xf9df, 'GET ADDRESS OF NEXT LINE'),
(0xf9e1, 0xf9e1, 'SAVE IT IN CURLIN'),
(0xf9e3, 0xf9e3, 'RETURN IF END OF PROGRAM'),
(0xf9e6, 0xf9e6, 'SKIP OVER ADDRESS OF NEXT LINE AND 1ST BYTE OF LINE NUMBER'),
(0xf9e8, 0xf9e8, 'MOVE POINTER UP ONE'),
(0xf9ea, 0xf9ea, 'GET CURRENT CHARACTER'),
(0xf9ec, 0xf9ec, 'BRANCH IF END OF LINE'),
(0xf9ee, 0xf9ee, 'INPUT CHARACTER = 1? - VALID LINE NUMBER'),
(0xf9ef, 0xf9ef, 'YES'),
(0xf9f1, 0xf9f1, 'INPUT CHARACTER 3? - UL LINE NUMBER'),
(0xf9f3, 0xf9f3, 'NO'),
(0xf9f5, 0xf9f5, 'SAVE CURRENT POSITION OF INPUT POINTER'),
(0xf9f7, 0xf9f7, "POINT X TO 'UL' MESSAGE"),
(0xf9fa, 0xf9fa, 'PRINT STRING TO THE SCREEN'),
(0xf9fd, 0xf9fd, 'GET INPUT POINTER'),
(0xf9ff, 0xf9ff, 'GET THE UNDEFINED LINE NUMBER'),
(0xfa01, 0xfa01, 'CONVERT NUMBER IN ACCD TO DECIMAL AND DISPLAY IT'),
(0xfa04, 0xfa04, "PRINT 'IN XXXX' XXXX = CURRENT LINE NUMBER"),
(0xfa07, 0xfa07, 'SEND A CR TO CONSOLE OUT'),
(0xfa0a, 0xfa0a, 'GET INPUT POINTER BACK'),
(0xfa0c, 0xfa0c, 'SAVE CURRENT POSITION OF INPUT POINTER'),
(0xfa0e, 0xfa0e, 'LOAD ACCD WITH BINARY VALUE OF LINE NUMBER'),
(0xfa10, 0xfa10, 'SAVE IN BOTTOM 2 BYTES OF FPA0'),
(0xfa12, 0xfa12, 'ADJUST REST OF FPA0 AS AN INTEGER'),
(0xfa15, 0xfa15, 'CONVERT FPA0 TO ASCII, STORE IN LINE NUMBER'),
(0xfa18, 0xfa18, 'LOAD U WITH PREVIOUS ADDRESS OF INPUT POINTER'),
(0xfa1a, 0xfa1a, 'EACH EXPANDED LINE NUMBER USES 5 BYTES'),
(0xfa1c, 0xfa1c, 'MOVE POINTER FORWARD ONE'),
(0xfa1e, 0xfa1e, 'GET AN ASCII BYTE'),
(0xfa20, 0xfa20, 'BRANCH IF END OF NUMBER'),
(0xfa22, 0xfa22, 'DECREMENT BYTE COUNTER'),
(0xfa23, 0xfa23, 'STORE ASCII NUMBER IN BASIC LINE'),
(0xfa25, 0xfa25, 'CHECK FOR ANOTHER DIGIT'),
(0xfa27, 0xfa27, 'TRANSFER NEW LINE POINTER TO (X)'),
(0xfa29, 0xfa29, 'NEW LINE NUMBER REQUIRE 5 BYTES?'),
(0xfa2a, 0xfa2a, 'YES - GO GET ANOTHER INPUT CHARACTER'),
(0xfa2c, 0xfa2c, 'SAVE NEW LINE POINTER IN Y'),
(0xfa2e, 0xfa2e, 'POINT U TO END OF 5 BYTE PACKED LINE NUMBER BLOCK'),
(0xfa30, 0xfa30, 'MOVE BYTES FROM (U) TO (X) UNTIL END OF PROGRAM'),
(0xfa33, 0xfa33, 'LOAD (X) WITH NEW LINE POINTER'),
(0xfa35, 0xfa35, 'GO GET ANOTHER INPUT CHARACTER'),
(0xfa37, 0xfa37, 'UNKNOWN LINE NUMBER MESSAGE'),
(0xfa3b, 0xfa3b, 'CONVERT FPA0 INTO A POSITIVE 2 BYTE INTEGER'),
(0xfa3e, 0xfa3e, 'POINT TO TEMPORARY BUFFER'),
(0xfa41, 0xfa41, 'CONVERT 4 NIBBLES'),
(0xfa43, 0xfa43, 'SAVE NIBBLE COUNTER'),
(0xfa45, 0xfa45, 'CLEAR CARRY FLAG'),
(0xfa46, 0xfa46, '4 SHIFTS'),
(0xfa48, 0xfa48, '* SHIFT BOTTOM TWO BYTES OF'),
(0xfa4a, 0xfa4a, '* FPA0 LEFT ONE BIT (X2)'),
(0xfa4c, 0xfa4c, 'IF OVERFLOW, ACCB <> 0'),
(0xfa4d, 0xfa4d, '* DECREMENT SHIFT COUNTER AND'),
(0xfa4e, 0xfa4e, '* BRANCH IF NOT DONE'),
(0xfa50, 0xfa50, 'OVERFLOW'),
(0xfa51, 0xfa51, 'BRANCH IF OVERFLOW'),
(0xfa53, 0xfa53, '* GET NIBBLE COUNTER,'),
(0xfa55, 0xfa55, '* DECREMENT IT AND'),
(0xfa56, 0xfa56, '* BRANCH IF DONE'),
(0xfa58, 0xfa58, 'DO NOT DO A CONVERSION UNTIL A NON-ZERO'),
(0xfa5b, 0xfa5b, 'BYTE IS FOUND - LEADING ZERO SUPPRESSION'),
(0xfa5d, 0xfa5d, 'ADD IN ASCII ZERO'),
(0xfa5f, 0xfa5f, 'COMPARE TO ASCII 9'),
(0xfa61, 0xfa61, 'BRANCH IF < 9'),
(0xfa63, 0xfa63, 'ADD ASCII OFFSET IF HEX LETTER'),
(0xfa65, 0xfa65, 'STORE HEX VALUE AND ADVANCE POINTER'),
(0xfa67, 0xfa67, 'CLEAR NEXT BYTE - END OF STRING FLAG'),
(0xfa69, 0xfa69, '* GET NIBBLE COUNTER,'),
(0xfa6b, 0xfa6b, '* DECREMENT IT AND'),
(0xfa6c, 0xfa6c, '* BRANCH IF NOT DONE'),
(0xfa6e, 0xfa6e, 'PURGE RETURN ADDRESS OFF OF STACK'),
(0xfa70, 0xfa70, 'RESET POINTER'),
(0xfa73, 0xfa73, 'SAVE STRING ON STRING STACK'),
(0xfa76, 0xfa76, '* SET SPACES'),
(0xfa78, 0xfa78, '* COUNTER = 1'),
(0xfa7a, 0xfa7a, 'DECREMENT FORMAT STRING LENGTH COUNTER'),
(0xfa7b, 0xfa7b, "SEND A '+' TO CONSOLE OUT IF VDA <>0"),
(0xfa7e, 0xfa7e, 'GET CURRENT INPUT CHARACTER'),
(0xfa80, 0xfa80, 'EXIT PRINT USING IF END OF LINE'),
(0xfa84, 0xfa84, 'SAVE REMAINDER FORMAT STRING LENGTH'),
(0xfa86, 0xfa86, 'EVALUATE EXPRESSION'),
(0xfa89, 0xfa89, "'TM' ERROR IF NUMERIC VARIABLE"),
(0xfa8c, 0xfa8c, '* GET ITEM - LIST DESCRIPTOR ADDRESS'),
(0xfa8e, 0xfa8e, '* AND SAVE IT IN V4D'),
(0xfa90, 0xfa90, 'GET SPACES COUNTER'),
(0xfa92, 0xfa92, 'PUT ACCB BYTES INTO STRING SPACE & PUT DESCRIPTOR ON STRING STACK'),
(0xfa95, 0xfa95, 'PRINT THE FORMATTED STRING TO CONSOLE OUT'),
(0xfa98, 0xfa98, 'POINT X TO FORMATTED STRING DESCRIPTOR ADDRESS'),
(0xfa9a, 0xfa9a, 'GET SPACES COUNTER'),
(0xfa9c, 0xfa9c, 'SUBTRACT LENGTH OF FORMATTED STRING'),
(0xfa9e, 0xfa9e, 'DECREMENT DIFFERENCE'),
(0xfa9f, 0xfa9f, 'GO INTERPRET ANOTHER ITEM - LIST'),
(0xfaa3, 0xfaa3, 'PAD FORMAT STRING WITH A SPACE'),
(0xfaa6, 0xfaa6, 'KEEP PADDING'),
(0xfaa8, 0xfaa8, '* SAVE THE CURRENT FORMAT STRING'),
(0xfaaa, 0xfaaa, '* COUNTER AND POINTER'),
(0xfaac, 0xfaac, 'INITIAL SPACES COUNTER = 2'),
(0xfaae, 0xfaae, 'SAVE IN SPACES COUNTER'),
(0xfab0, 0xfab0, 'GET A CHARACTER FROM FORMAT STRING'),
(0xfab2, 0xfab2, 'COMPARE TO TERMINATOR CHARACTER'),
(0xfab4, 0xfab4, 'BRANCH IF END OF SPACES COMMAND'),
(0xfab6, 0xfab6, 'BLANK'),
(0xfab8, 0xfab8, 'BRANCH IF ILLEGAL CHARACTER'),
(0xfaba, 0xfaba, 'ADD ONE TO SPACES COUNTER'),
(0xfabc, 0xfabc, 'MOVE FORMAT POINTER UP ONE'),
(0xfabe, 0xfabe, 'DECREMENT LENGTH COUNTER'),
(0xfabf, 0xfabf, 'BRANCH IF NOT END OF FORMAT STRING'),
(0xfac1, 0xfac1, '* RESTORE CURRENT FORMAT STRING COUNTER'),
(0xfac3, 0xfac3, '* AND POINTER TO POSITION BEFORE SPACES COMMAND'),
(0xfac5, 0xfac5, "SEND A '%' TO CONSOLE OUT AS A DEBUGGING AID"),
(0xfac7, 0xfac7, "SEND A '+' TO CONSOLE OUT IF VDA <> 0"),
(0xfaca, 0xfaca, 'SEND CHARACTER TO CONSOLE OUT'),
(0xfacd, 0xfacd, 'GET NEXT CHARACTER IN FORMAT STRING'),
(0xfacf, 0xfacf, 'USING TOKEN'),
(0xfad1, 0xfad1, 'BRANCH IF PRINT USING'),
(0xfad4, 0xfad4, 'PURGE RETURN ADDRESS OFF THE STACK'),
(0xfad6, 0xfad6, 'EVALUATE FORMAT STRING'),
(0xfad9, 0xfad9, "'TM' ERROR IF VARIABLE TYPE = NUMERIC"),
(0xfadc, 0xfadc, 'CHECK FOR ITEM LIST SEPARATOR'),
(0xfade, 0xfade, 'SYNTAX CHECK FOR ;'),
(0xfae1, 0xfae1, '* GET FORMAT STRING DESCRIPTOR ADDRESS'),
(0xfae3, 0xfae3, '* AND SAVE IT IN VD5'),
(0xfae5, 0xfae5, 'GO PROCESS FORMAT STRING'),
(0xfae7, 0xfae7, '*CHECK NEXT PRINT ITEM FLAG AND'),
(0xfae9, 0xfae9, "*'FC' ERROR IF NO FURTHER PRINT ITEMS"),
(0xfaeb, 0xfaeb, 'RESET FORMAT STRING POINTER TO START OF STRING'),
(0xfaed, 0xfaed, 'RESET NEXT PRINT ITEM FLAG'),
(0xfaef, 0xfaef, 'GET LENGTH OF FORMAT STRING'),
(0xfaf1, 0xfaf1, 'INTERPRET FORMAT STRING IF LENGTH > 0'),
(0xfaf3, 0xfaf3, "'FC' ERROR IF FORMAT STRING = NULL"),
(0xfaf6, 0xfaf6, 'POINT X TO START OF FORMAT STRING'),
(0xfaf8, 0xfaf8, 'CLEAR THE STATUS BYTE'),
(0xfafa, 0xfafa, 'CLEAR LEFT DIGIT COUNTER'),
(0xfafc, 0xfafc, 'GET A CHARACTER FROM FORMAT STRING'),
(0xfafe, 0xfafe, 'EXCLAMATION POINT?'),
(0xfb00, 0xfb00, 'YES - STRING TYPE FORMAT'),
(0xfb04, 0xfb04, 'NUMBER SIGN? (DIGIT LOCATOR)'),
(0xfb06, 0xfb06, 'YES - NUMERIC TYPE FORMAT'),
(0xfb08, 0xfb08, 'DECREMENT FORMAT STRING LENGTH'),
(0xfb09, 0xfb09, 'BRANCH IF NOT DONE'),
(0xfb0b, 0xfb0b, "SEND A '+' TO CONSOLE OUT IF VDA <> 0"),
(0xfb0e, 0xfb0e, 'SEND CHARACTER TO CONSOLE OUT'),
(0xfb11, 0xfb11, 'GET CURRENT CHARACTER FROM BASIC'),
(0xfb13, 0xfb13, 'BRANCH IF NOT END OF LINE'),
(0xfb15, 0xfb15, 'GET NEXT PRINT ITEM FLAG'),
(0xfb17, 0xfb17, 'BRANCH IF MORE PRINT ITEMS'),
(0xfb19, 0xfb19, 'SEND A CARRIAGE RETURN TO CONSOLE OUT'),
(0xfb1c, 0xfb1c, 'POINT X TO FORMAT STRING DESCRIPTOR'),
(0xfb1e, 0xfb1e, 'RETURN ADDRESS AND LENGTH OF FORMAT STRING - EXIT PRINT USING'),
(0xfb21, 0xfb21, "CHECK FOR '+' (PRE-SIGN FORCE)"),
(0xfb23, 0xfb23, 'NO PLUS'),
(0xfb25, 0xfb25, "SEND A '+' TO CONSOLE OUT IF VDA <> 0"),
(0xfb28, 0xfb28, '* LOAD THE STATUS BYTE WITH 8;'),
(0xfb2a, 0xfb2a, '* PRE-SIGN FORCE FLAG'),
(0xfb2c, 0xfb2c, 'INTERPRET THE REST OF THE FORMAT STRING'),
(0xfb2e, 0xfb2e, 'DECIMAL POINT?'),
(0xfb30, 0xfb30, 'YES'),
(0xfb32, 0xfb32, 'PERCENT SIGN?'),
(0xfb34, 0xfb34, 'YES'),
(0xfb38, 0xfb38, 'COMPARE THE PRESENT FORMAT STRING INPUT'),
(0xfb3a, 0xfb3a, 'NO MATCH - ILLEGAL CHARACTER'),
(0xfb3c, 0xfb3c, 'DOLLAR SIGN?'),
(0xfb3e, 0xfb3e, 'YES - MAKE THE DOLLAR SIGN FLOAT'),
(0xfb40, 0xfb40, 'ASTERISK?'),
(0xfb42, 0xfb42, 'NO - ILLEGAL CHARACTER'),
(0xfb44, 0xfb44, '* GRAB THE STATUS BYTE AND BET BIT 5'),
(0xfb46, 0xfb46, '* TO INDICATE THAT THE OUTPUT WILL'),
(0xfb48, 0xfb48, '* BE LEFT PADDED WITH ASTERISKS'),
(0xfb4a, 0xfb4a, '* CHECK TO SEE IF THE $$ ARE THE LAST TWO'),
(0xfb4c, 0xfb4c, '* CHARACTERS IN THE FORMAT STRING AND BRANCH IF SO'),
(0xfb4e, 0xfb4e, 'GET THE NEXT CHARACTER AFTER **'),
(0xfb50, 0xfb50, 'CHECK FOR **$'),
(0xfb52, 0xfb52, 'CHECK FOR MORE CHARACTERS'),
(0xfb54, 0xfb54, 'DECREMENT STRING LENGTH COUNTER'),
(0xfb55, 0xfb55, 'MOVE FORMAT STRING POINTER UP ONE'),
(0xfb57, 0xfb57, 'ADD ONE TO LEFT DIGIT COUNTER - FOR ASTERISK PAD AND'),
(0xfb59, 0xfb59, '* GET THE STATUS BYTE AND SET'),
(0xfb5b, 0xfb5b, '* BIT 4 TO INDICATE A'),
(0xfb5d, 0xfb5d, '* FLOATING DOLLAR SIGN'),
(0xfb5f, 0xfb5f, 'MOVE FORMAT STRING POINTER UP ONE'),
(0xfb61, 0xfb61, 'ADD ONE TO LEFT DIGIT (FLOATING $ OR ASTERISK PAD)'),
(0xfb63, 0xfb63, 'CLEAR THE RIGHT DIGIT COUNTER'),
(0xfb65, 0xfb65, 'ADD ONE TO LEFT DIGIT COUNTER'),
(0xfb67, 0xfb67, 'DECREMENT FORMAT STRING LENGTH COUNTER'),
(0xfb68, 0xfb68, 'BRANCH IF END OF FORMAT STRING'),
(0xfb6a, 0xfb6a, 'GET THE NEXT FORMAT CHARACTER'),
(0xfb6c, 0xfb6c, 'DECIMAL POINT?'),
(0xfb6e, 0xfb6e, 'YES'),
(0xfb70, 0xfb70, 'NUMBER SIGN?'),
(0xfb72, 0xfb72, 'YES'),
(0xfb74, 0xfb74, 'COMMA?'),
(0xfb76, 0xfb76, 'NO'),
(0xfb78, 0xfb78, '* GET THE STATUS BYTE'),
(0xfb7a, 0xfb7a, '* AND SET BIT 6 WHICH IS THE'),
(0xfb7c, 0xfb7c, '* COMMA SEPARATOR FLAG'),
(0xfb7e, 0xfb7e, 'PROCESS MORE CHARACTERS TO LEFT OF DECIMAL POINT'),
(0xfb80, 0xfb80, 'GET NEXT FORMAT CHARACTER'),
(0xfb82, 0xfb82, 'IS IT A NUMBER SIGN?'),
(0xfb84, 0xfb84, 'NO'),
(0xfb88, 0xfb88, '* SET THE RIGHT DIGIT COUNTER TO 1 -'),
(0xfb8a, 0xfb8a, '* ALLOW ONE SPOT FOR DECIMAL POINT'),
(0xfb8c, 0xfb8c, 'MOVE FORMAT POINTER UP ONE'),
(0xfb8e, 0xfb8e, 'ADD ONE TO RIGHT DIGIT COUNTER'),
(0xfb90, 0xfb90, 'DECREMENT FORMAT LENGTH COUNTER'),
(0xfb91, 0xfb91, 'BRANCH IF END OF FORMAT STRING'),
(0xfb93, 0xfb93, 'GET A CHARACTER FROM FORMAT STRING'),
(0xfb95, 0xfb95, 'IS IT NUMBER SIGN?'),
(0xfb97, 0xfb97, 'YES - KEEP CHECKING'),
(0xfb99, 0xfb99, 'CHECK FOR UP ARROW'),
(0xfb9b, 0xfb9b, 'NO UP ARROW'),
(0xfb9d, 0xfb9d, 'IS THE NEXT CHARACTER AN UP ARROW?'),
(0xfb9f, 0xfb9f, 'NO'),
(0xfba1, 0xfba1, 'AND THE NEXT CHARACTER?'),
(0xfba3, 0xfba3, 'NO'),
(0xfba5, 0xfba5, 'HOW ABOUT THE 4TH CHARACTER?'),
(0xfba7, 0xfba7, 'NO, ALSO'),
(0xfba9, 0xfba9, '* CHECK TO SEE IF THE 4 UP ARROWS ARE IN THE'),
(0xfbab, 0xfbab, '* FORMAT STRING AND BRANCH IF NOT'),
(0xfbad, 0xfbad, '* MOVE POINTER UP 4 AND SUBTRACT'),
(0xfbaf, 0xfbaf, '* FOUR FROM LENGTH'),
(0xfbb1, 0xfbb1, 'STATUS BYTE - EXPONENTIAL FORM'),
(0xfbb3, 0xfbb3, 'MOVE POINTER BACK ONE'),
(0xfbb5, 0xfbb5, 'ADD ONE TO LEFT DIGIT COUNTER FOR PRE-SIGN FORCE'),
(0xfbb7, 0xfbb7, '* PRE-SIGN'),
(0xfbb9, 0xfbb9, '* FORCE AND'),
(0xfbbb, 0xfbbb, '* BRANCH IF SET'),
(0xfbbd, 0xfbbd, 'DECREMENT LEFT DIGIT - NO PRE-SIGN FORCE'),
(0xfbbf, 0xfbbf, 'LENGTH COUNTER AND BRANCH'),
(0xfbc0, 0xfbc0, '* IF END OF FORMAT STRING'),
(0xfbc2, 0xfbc2, 'GET NEXT FORMAT STRING CHARACTER'),
(0xfbc4, 0xfbc4, 'CHECK FOR MINUS SIGN'),
(0xfbc6, 0xfbc6, 'BRANCH IF MINUS SIGN'),
(0xfbc8, 0xfbc8, "* WAS CMPA #('+')-('-')"),
(0xfbca, 0xfbca, 'BRANCH IF NO PLUS SIGN'),
(0xfbcc, 0xfbcc, 'GET THE PRE-SIGN FORCE FLAG'),
(0xfbce, 0xfbce, "'OR' IN POST-SIGN FORCE FLAG"),
(0xfbd0, 0xfbd0, "'OR' IN THE STATUS BYTE"),
(0xfbd2, 0xfbd2, 'SAVE THE STATUS BYTE'),
(0xfbd4, 0xfbd4, 'DECREMENT FORMAT STRING LENGTH'),
(0xfbd5, 0xfbd5, 'GET CURRENT CHARACTER'),
(0xfbd7, 0xfbd7, 'BRANCH IF END OF LINE'),
(0xfbdb, 0xfbdb, 'SAVE FORMAT STRING LENGTH WHEN FORMAT EVALUATION ENDED'),
(0xfbdd, 0xfbdd, 'EVALUATE EXPRESSION'),
(0xfbe0, 0xfbe0, 'GET THE LEFT DIGIT COUNTER'),
(0xfbe2, 0xfbe2, 'ADD IT TO THE RIGHT DIGIT COUNTER'),
(0xfbe6, 0xfbe6, "*'FC' ERROR IF MORE THAN 16 DIGITS AND DECIMAL POiNT"),
(0xfbea, 0xfbea, 'CONVERT ITEM-LIST TO FORMATTED ASCII STRING'),
(0xfbed, 0xfbed, 'MOVE BUFFER POINTER BACK ONE'),
(0xfbef, 0xfbef, 'DISPLAY THE FORMATTED STRING TO CONSOLE OUT'),
(0xfbf2, 0xfbf2, 'RESET NEXT PRINT ITEM FLAG'),
(0xfbf4, 0xfbf4, 'GET CURRENT INPUT CHARACTER'),
(0xfbf6, 0xfbf6, 'BRANCH IF END OF LINE'),
(0xfbf8, 0xfbf8, 'SAVE CURRENT CHARACTER (<>0) IN NEXT PRINT ITEM FLAG'),
(0xfbfa, 0xfbfa, '* CHECK FOR ; - ITEM-LIST SEPARATOR AND'),
(0xfbfc, 0xfbfc, '* BRANCH IF SEMICOLON'),
(0xfbfe, 0xfbfe, 'SYNTAX CHECK FOR COMMA'),
(0xfc01, 0xfc01, 'PROCESS NEXT PRINT ITEM'),
(0xfc03, 0xfc03, 'GET NEXT INPUT CHARACTER'),
(0xfc05, 0xfc05, 'GET FORMAT STRING DESCRIPTOR ADDRESS'),
(0xfc07, 0xfc07, 'GET LENGTH OF FORMAT STRING'),
(0xfc09, 0xfc09, 'SUBTRACT AMOUNT OF FORMAT STRING LEFT AFTER LAST PRINT ITEM'),
(0xfc0b, 0xfc0b, '*GET FORMAT STRING START ADDRESS AND ADVANCE'),
(0xfc0d, 0xfc0d, 'TO START OF UNUSED FORMAT STRING'),
(0xfc0e, 0xfc0e, '* GET AMOUNT OF UNUSED FORMAT STRING'),
(0xfc10, 0xfc10, '* REINTERPRET FORMAT STRING FROM THAT POINT'),
(0xfc14, 0xfc14, 'REINTERPRET FORMAT STRING FROM THE START IF ENTIRELY'),
(0xfc17, 0xfc17, 'RESTORE ACCA AND RETURN'),
(0xfc19, 0xfc19, 'GET ASCII PLUS SIGN'),
(0xfc1b, 0xfc1b, '* CHECK THE STATUS BYTE AND'),
(0xfc1d, 0xfc1d, '* RETURN IF = 0'),
(0xfc1f, 0xfc1f, 'SEND A CHARACTER TO CONSOLE OUT'),
(0xfc22, 0xfc22, 'RETURN ACCA AND RETURN'),
(0xfc24, 0xfc24, 'POINT U TO STRING BUFFER'),
(0xfc27, 0xfc27, 'BLANK'),
(0xfc29, 0xfc29, '* GET THE STATUS FLAG AND'),
(0xfc2b, 0xfc2b, '* CHECK FOR A PRE-SIGN FORCE'),
(0xfc2d, 0xfc2d, '* BRANCH IF NO PRE-SIGN FORCE'),
(0xfc2f, 0xfc2f, 'PLUS SIGN'),
(0xfc31, 0xfc31, 'CHECK THE SIGN OF FPA0'),
(0xfc33, 0xfc33, 'BRANCH IF POSITIVE'),
(0xfc35, 0xfc35, 'FORCE FPA0 SIGN TO BE POSITIVE'),
(0xfc37, 0xfc37, 'MINUS SIGN'),
(0xfc39, 0xfc39, 'SAVE THE SIGN IN BUFFER'),
(0xfc3b, 0xfc3b, '* PUT A ZERO INTO THE BUFFER'),
(0xfc3f, 0xfc3f, '* CHECK THE EXPONENTIAL FORCE FLAG IN'),
(0xfc41, 0xfc41, '* THE STATUS BYTE - BRANCH IF ACTIVE'),
(0xfc45, 0xfc45, 'POINT X TO FLOATING POINT 1E + 09'),
(0xfc48, 0xfc48, 'COMPARE FPA0 TO (X)'),
(0xfc4b, 0xfc4b, 'BRANCH IF FPA0 < 1E+09'),
(0xfc4d, 0xfc4d, 'CONVERT FP NUMBER TO ASCII STRING'),
(0xfc50, 0xfc50, '* ADVANCE POINTER TO END OF'),
(0xfc52, 0xfc52, '* ASCII STRING (ZERO BYTE)'),
(0xfc54, 0xfc54, 'MOVE THE'),
(0xfc56, 0xfc56, 'ENTIRE STRING'),
(0xfc58, 0xfc58, 'UP ONE'),
(0xfc5b, 0xfc5b, 'BYTE'),
(0xfc5d, 0xfc5d, '* INSERT A % SIGN AT START OF'),
(0xfc5f, 0xfc5f, '* STRING - OVERFLOW ERROR'),
(0xfc62, 0xfc62, 'GET EXPONENT OF FPA0'),
(0xfc64, 0xfc64, 'AND SAVE IT IN V74'),
(0xfc66, 0xfc66, 'BRANCH IF FPA0 = 0'),
(0xfc68, 0xfc68, 'CONVERT FPA0 TO NUMBER WITH 9 SIGNIFICANT'),
(0xfc6b, 0xfc6b, 'GET BASE 10 EXPONENT OFFSET'),
(0xfc6d, 0xfc6d, 'BRANCH IF FPA0 < 100,000,000'),
(0xfc71, 0xfc71, '* CALCULATE THE NUMBER OF LEADING ZEROES TO INSERT -'),
(0xfc72, 0xfc72, '* SUBTRACT BASE 10 EXPONENT OFFSET AND 9 (FPA0 HAS'),
(0xfc74, 0xfc74, '* 9 PLACES TO LEFT OF EXPONENT) FROM LEFT DIGIT COUNTER'),
(0xfc76, 0xfc76, 'PUT ACCA ZEROES IN STRING BUFFER'),
(0xfc79, 0xfc79, 'INITIALIZE DECIMAL POINT AND COMMA COUNTERS'),
(0xfc7c, 0xfc7c, 'CONVERT FPA0 TO DECIMAL ASCII IN THE STRING BUFFER'),
(0xfc7f, 0xfc7f, '* GET BASE 10 EXPONENT AND PUT THAT MANY'),
(0xfc81, 0xfc81, '* ZEROES IN STRING BUFFER - STOP AT DECIMAL POINT'),
(0xfc84, 0xfc84, 'WASTED INSTRUCTION - SERVES NO PURPOSE'),
(0xfc86, 0xfc86, 'CHECK FOR DECIMAL POINT'),
(0xfc89, 0xfc89, 'GET THE RIGHT DIGIT COUNTER'),
(0xfc8b, 0xfc8b, 'BRANCH IF RIGHT DIGlT COUNTER <> 0'),
(0xfc8d, 0xfc8d, '* MOVE BUFFER POINTER BACK ONE - DELETE'),
(0xfc8f, 0xfc8f, 'SUBTRACT ONE (DECIMAL POINT)'),
(0xfc90, 0xfc90, 'PUT ACCA ZEROES INTO BUFFER (TRAILING ZEROES)'),
(0xfc93, 0xfc93, 'INSERT ASTERISK PADDING, FLOATING $, AND POST-SIGN'),
(0xfc96, 0xfc96, 'WAS THERE A POST-SIGN?'),
(0xfc97, 0xfc97, 'NO'),
(0xfc99, 0xfc99, 'IS THE FIRST CHARACTER AN $?'),
(0xfc9b, 0xfc9b, 'YES'),
(0xfc9d, 0xfc9d, 'STORE THE POST-SIGN'),
(0xfc9f, 0xfc9f, 'CLEAR THE LAST CHARACTER IN THE BUFFER'),
(0xfca1, 0xfca1, 'POINT X TO THE START OF THE BUFFER'),
(0xfca4, 0xfca4, 'MOVE BUFFER POINTER UP ONE'),
(0xfca6, 0xfca6, 'SAVE BUFFER POINTER IN TEMPTR'),
(0xfca8, 0xfca8, '* GET ADDRESS OF DECIMAL POINT IN BUFFER, SUBTRACT'),
(0xfcaa, 0xfcaa, '* CURRENT POSITION AND SUBTRACT LEFT DIGIT COUNTER -'),
(0xfcac, 0xfcac, '* THE RESULT WILL BE ZERO WHEN TEMPTR+1 IS POINTING'),
(0xfcae, 0xfcae, 'RETURN IF NO DIGITS TO LEFT OF THE DECiMAL POINT'),
(0xfcb0, 0xfcb0, 'GET THE CURRENT BUFFER CHARACTER'),
(0xfcb2, 0xfcb2, 'SPACE?'),
(0xfcb4, 0xfcb4, 'YES - ADVANCE POINTER'),
(0xfcb6, 0xfcb6, 'ASTERISK?'),
(0xfcb8, 0xfcb8, 'YES - ADVANCE POINTER'),
(0xfcba, 0xfcba, 'A ZERO ON THE STACK IS END OF DATA POINTER'),
(0xfcbb, 0xfcbb, 'PUSH A CHARACTER ONTO THE STACK'),
(0xfcbd, 0xfcbd, 'GET NEXT CHARACTER FROM BUFFER'),
(0xfcbf, 0xfcbf, 'MINUS SIGN?'),
(0xfcc1, 0xfcc1, 'YES'),
(0xfcc3, 0xfcc3, 'PLUS SIGN?'),
(0xfcc5, 0xfcc5, 'YES'),
(0xfcc7, 0xfcc7, 'DOLLAR SIGN?'),
(0xfcc9, 0xfcc9, 'YES'),
(0xfccb, 0xfccb, 'ZERO?'),
(0xfccd, 0xfccd, 'NO - ERROR'),
(0xfccf, 0xfccf, 'GET CHARACTER FOLLOWING ZERO'),
(0xfcd1, 0xfcd1, 'CLEAR CARRY IF NUMERIC'),
(0xfcd3, 0xfcd3, 'BRANCH IF NOT A NUMERIC CHARACTER - ERROR'),
(0xfcd5, 0xfcd5, '* PULL A CHARACTER OFF OF THE STACK'),
(0xfcd7, 0xfcd7, '* AND PUT IT BACK IN THE STRING BUFFER'),
(0xfcd9, 0xfcd9, '* KEEP GOING UNTIL ZERO FLAG'),
(0xfcdb, 0xfcdb, 'KEEP CLEANING UP THE INPUT BUFFER'),
(0xfcdf, 0xfcdf, '* THE STACK AND EXIT WHEN'),
(0xfce0, 0xfce0, '* ZERO FLAG FOUND'),
(0xfce2, 0xfce2, 'GET THE STRING BUFFER START POINTER'),
(0xfce4, 0xfce4, '* PUT A % SIGN BEFORE THE ERROR POSITION TO'),
(0xfce6, 0xfce6, '* INDICATE AN ERROR'),
(0xfce9, 0xfce9, 'ASCII ZERO'),
(0xfceb, 0xfceb, 'RETURN IF ACCA < ASCII 0'),
(0xfced, 0xfced, "* #'9'+1"),
(0xfcef, 0xfcef, "* #-('9'+1) CARRY CLEAR IF NUMERIC"),
(0xfcf2, 0xfcf2, 'GET RIGHT DIGIT COUNTER'),
(0xfcf4, 0xfcf4, 'BRANCH IF NO FORMATTED DIGITS TO THE RIGHT OF DECIMAL PT'),
(0xfcf6, 0xfcf6, 'SUBTRACT ONE FOR DECIMAL POINT'),
(0xfcf7, 0xfcf7, '*ADD THE BASE 10 EXPONENT OFFSET - ACCA CONTAINS THE'),
(0xfcf9, 0xfcf9, 'IF ACCA >= 0 THEN NO SHIFTS ARE REQUIRED'),
(0xfcfb, 0xfcfb, 'FORCE SHIFT COUNTER = 0'),
(0xfcfc, 0xfcfc, 'SAVE INITIAL SHIFT COUNTER ON THE STACK'),
(0xfcfe, 0xfcfe, 'EXIT ROUTINE IF POSITIVE'),
(0xfd00, 0xfd00, 'SAVE SHIFT COUNTER ON STACK'),
(0xfd02, 0xfd02, 'DIVIDE FPA0 BY 10 - SHIFT ONE DIGIT TO RIGHT'),
(0xfd05, 0xfd05, 'GET SHIFT COUNTER FROM THE STACK'),
(0xfd07, 0xfd07, 'BUMP SHIFT COUNTER UP BY ONE'),
(0xfd08, 0xfd08, 'CHECK FOR FURTHER DIVISION'),
(0xfd0a, 0xfd0a, '* GET BASE 10 EXPONENT OFFSET, ADD INITIAL SHIFT COUNTER'),
(0xfd0c, 0xfd0c, '* AND SAVE NEW BASE 10 EXPONENT OFFSET - BECAUSE'),
(0xfd0e, 0xfd0e, '* FPA0 WAS SHIFTED ABOVE'),
(0xfd10, 0xfd10, '* ADD NINE (SIGNIFICANT PLACES) AND BRANCH IF THERE ARE NO'),
(0xfd12, 0xfd12, '* ZEROES TO THE LEFT OF THE DECIMAL POINT IN THIS PRINT ITEM'),
(0xfd14, 0xfd14, '*DETERMINE HOW MANY FILLER ZEROES TO THE LEFT OF THE DECIMAL'),
(0xfd16, 0xfd16, '*POINT. GET THE NUMBER OF FORMAT PLACES TO LEFT OF DECIMAL'),
(0xfd18, 0xfd18, '*POINT, SUBTRACT THE BASE 10 EXPONENT OFFSET AND THE CONSTANT 9'),
(0xfd1a, 0xfd1a, '*(UNNORMALIZATION)-THEN OUTPUT THAT MANY ZEROES TO THE BUFFER'),
(0xfd1c, 0xfd1c, 'INITIALIZE DECIMAL POINT AND COMMA COUNTERS'),
(0xfd1f, 0xfd1f, 'PROCESS THE REMAINDER OF THE PRINT ITEM'),
(0xfd21, 0xfd21, 'SAVE ZERO COUNTER'),
(0xfd23, 0xfd23, '* INSERT A ZERO INTO'),
(0xfd25, 0xfd25, '* THE BUFFER'),
(0xfd27, 0xfd27, 'RESTORE ZERO COUNTER'),
(0xfd29, 0xfd29, 'DECREMENT ZERO COUNTER'),
(0xfd2a, 0xfd2a, 'BRANCH IF NOT DONE'),
(0xfd2d, 0xfd2d, '* GET THE LEFT DIGIT COUNTER AND PUT'),
(0xfd2f, 0xfd2f, '* THAT MANY ZEROES IN THE STRiNG BUFFER'),
(0xfd31, 0xfd31, 'PUT THE DECIMAL POINT IN THE STRING BUFFER'),
(0xfd34, 0xfd34, '*DETERMINE HOW MANY FILLER ZEROES BETWEEN THE DECIMAL POINT'),
(0xfd36, 0xfd36, '*AND SIGNIFICANT DATA. SUBTRACT BASE 10 EXPONENT FROM -9'),
(0xfd38, 0xfd38, '*(UNNORMALIZATION) AND OUTPUT THAT MANY ZEROES TO BUFFER'),
(0xfd3a, 0xfd3a, 'CLEAR THE DECIMAL POINT COUNTER - SUPPRESS THE DECIMAL POINT'),
(0xfd3c, 0xfd3c, 'CLEAR THE COMMA COUNTER - SUPPRESS COMMAS'),
(0xfd3e, 0xfd3e, 'DECODE FPA0 INTO A DECIMAL ASCII STRING'),
(0xfd41, 0xfd41, 'GET THE RIGHT DIGIT COUNTER'),
(0xfd43, 0xfd43, 'BRANCH IF RIGHT DIGIT COUNTER <> 0'),
(0xfd45, 0xfd45, 'RESET BUFFER PTR TO THE DECIMAL POINT IF NO DIGITS TO RIGHT'),
(0xfd47, 0xfd47, '*ADD BASE 10 EXPONENT - A POSITIVE ACCA WILL CAUSE THAT MANY'),
(0xfd49, 0xfd49, 'INSERT LEADING ASTERISKS, FLOATING DOLLAR SIGN, ETC'),
(0xfd4c, 0xfd4c, '* GET EXPONENT OF FPA0 AND'),
(0xfd4e, 0xfd4e, '* SAVE IT ON THE STACK'),
(0xfd50, 0xfd50, 'BRANCH IF FPA0 = 0'),
(0xfd52, 0xfd52, '*CONVERT FPA0 INTO A NUMBER WITH 9 SIGNIFICANT'),
(0xfd55, 0xfd55, 'GET THE RIGHT DIGIT COUNTER'),
(0xfd57, 0xfd57, 'BRANCH IF NO FORMATTED DIGITS TO THE RIGHT'),
(0xfd59, 0xfd59, 'SUBTRACT ONE FOR THE DECIMAL POINT'),
(0xfd5a, 0xfd5a, 'ADD TO THE LEFT DIGIT COUNTER'),
(0xfd5c, 0xfd5c, 'CLEAR BUFFER BYTE AS TEMPORARY STORAGE LOCATION'),
(0xfd5f, 0xfd5f, '* GET THE STATUS BYTE FOR A'),
(0xfd61, 0xfd61, '* POST-BYTE FORCE; BRANCH IF'),
(0xfd63, 0xfd63, '* A POST-BYTE FORCE'),
(0xfd65, 0xfd65, 'TOGGLE BUFFER BYTE TO -1 IF NO POST-BYTE FORCE'),
(0xfd68, 0xfd68, 'SUBTRACT 1 IF NO POST BYTE FORCE'),
(0xfd6b, 0xfd6b, '*SUBTRACT 9 (DUE TO THE CONVERSION TO 9'),
(0xfd6d, 0xfd6d, '* SAVE SHIFT COUNTER ON THE STACK - ACCA CONTAINS THE NUMBER'),
(0xfd6f, 0xfd6f, 'NO MORE SHIFTS WHEN ACCA >= 0'),
(0xfd71, 0xfd71, 'SAVE SHIFT COUNTER'),
(0xfd73, 0xfd73, 'DIVIDE FPA0 BY 10 - SHIFT TO RIGHT ONE'),
(0xfd76, 0xfd76, 'RESTORE THE SHIFT COUNTER'),
(0xfd78, 0xfd78, 'ADD 1 TO SHIFT COUNTER'),
(0xfd79, 0xfd79, 'CHECK FOR FURTHER SHIFTING (DIVISION)'),
(0xfd7b, 0xfd7b, '*GET THE INITIAL VALUE OF THE SHIFT COUNTER'),
(0xfd7d, 0xfd7d, '*AND BRANCH IF SHIFTING HAS TAKEN PLACE'),
(0xfd7f, 0xfd7f, 'RESET ACCA IF NO SHIFTING HAS TAKEN PLACE'),
(0xfd80, 0xfd80, '*CALCULATE THE POSITION OF THE DECIMAL POINT BY'),
(0xfd81, 0xfd81, '*NEGATING SHIFT COUNTER, ADDING THE LEFT DIGIT COUNTER'),
(0xfd83, 0xfd83, '*PLUS ONE AND THE POST-BYTE POSlTION, IF USED'),
(0xfd87, 0xfd87, 'SAVE DECIMAL POINT COUNTER'),
(0xfd89, 0xfd89, 'CLEAR COMMA COUNTER - NO COMMAS INSERTED'),
(0xfd8b, 0xfd8b, 'CONVERT FPA0 INTO ASCII DECIMAL STRING'),
(0xfd8e, 0xfd8e, '* GET THE INITIAL VALUE OF SHIFT COUNTER AND'),
(0xfd90, 0xfd90, '* INSERT THAT MANY ZEROES INTO THE BUFFER'),
(0xfd93, 0xfd93, '*GET THE RIGHT DIGIT COUNTER AND BRANCH'),
(0xfd95, 0xfd95, '*IF NOT ZERO'),
(0xfd97, 0xfd97, 'MOVE BUFFER POINTER BACK ONE'),
(0xfd99, 0xfd99, 'GET ORIGINAL EXPONENT OF FPA0'),
(0xfd9b, 0xfd9b, 'BRANCH IF EXPONENT = 0'),
(0xfd9d, 0xfd9d, 'GET BASE 10 EXPONENT'),
(0xfd9f, 0xfd9f, 'ADD 9 FOR 9 SIGNIFICANT DIGIT CONVERSION'),
(0xfda1, 0xfda1, 'SUBTRACT LEFT DIGIT COUNTER'),
(0xfda3, 0xfda3, 'ADD ONE TO EXPONENT IF POST-SIGN FORCE'),
(0xfda6, 0xfda6, 'PLUS SIGN'),
(0xfda8, 0xfda8, 'XPONENT'),
(0xfda9, 0xfda9, 'BRANCH IF POSITIVE EXPONENT'),
(0xfdab, 0xfdab, 'MINUS SIGN'),
(0xfdad, 0xfdad, 'CONVERT EXPONENT TO POSITIVE NUMBER'),
(0xfdae, 0xfdae, 'PUT SIGN OF EXPONENT IN STRING BUFFER'),
(0xfdb0, 0xfdb0, "* PUT AN 'E' (EXPONENTIATION FLAG) IN"),
(0xfdb2, 0xfdb2, '* BUFFER AND SKIP OVER THE SIGN'),
(0xfdb4, 0xfdb4, "* WAS LDA #'0'-1"),
(0xfdb6, 0xfdb6, 'ADD ONE TO TENS DIGIT COUNTER'),
(0xfdb7, 0xfdb7, '*SUBTRACT 10 FROM EXPONENT AND ADD ONE TO TENS'),
(0xfdb9, 0xfdb9, '* DIGIT IF NO CARRY. TENS DIGIT DONE IF THERE IS A CARRY'),
(0xfdbb, 0xfdbb, "WAS ADDB #'9'+1"),
(0xfdbd, 0xfdbd, 'SAVE EXPONENT IN BUFFER'),
(0xfdbf, 0xfdbf, 'CLEAR FINAL BYTE IN BUFFER - PRINT TERMINATOR'),
(0xfdc1, 0xfdc1, 'INSERT ASTERISK PADDING, FLOATING DOLLAR SIGN, ETC.'),
(0xfdc4, 0xfdc4, 'POINT X TO START OF PRINT ITEM BUFFER'),
(0xfdc7, 0xfdc7, '* GET SIGN BYTE OF ITEM-LIST BUFFER'),
(0xfdc9, 0xfdc9, '* AND SAVE IT ON THE STACK'),
(0xfdcb, 0xfdcb, 'DEFAULT PAD WITH BLANKS'),
(0xfdcd, 0xfdcd, '* GET STATUS BYTE AND CHECK FOR'),
(0xfdcf, 0xfdcf, '* ASTERISK LEFT PADDING'),
(0xfdd1, 0xfdd1, 'GET SIGN BYTE AGAIN'),
(0xfdd3, 0xfdd3, 'BRANCH IF NO PADDING'),
(0xfdd5, 0xfdd5, 'PAD WITH ASTERISK'),
(0xfdd7, 0xfdd7, 'WAS THE FIRST BYTE A BLANK (POSITIVE)?'),
(0xfdd9, 0xfdd9, 'NO'),
(0xfddb, 0xfddb, 'TRANSFER PAD CHARACTER TO ACCB'),
(0xfddd, 0xfddd, 'SAVE FIRST CHARACTER ON STACK'),
(0xfddf, 0xfddf, 'STORE PAD CHARACTER IN BUFFER'),
(0xfde1, 0xfde1, 'GET NEXT CHARACTER IN BUFFER'),
(0xfde3, 0xfde3, 'INSERT A ZERO IF END OF BUFFER'),
(0xfde5, 0xfde5, "* CHECK FOR AN 'E' AND"),
(0xfde7, 0xfde7, '* PUT A ZERO BEFORE IT'),
(0xfde9, 0xfde9, '* REPLACE LEADING ZEROES WITH'),
(0xfdeb, 0xfdeb, '* PAD CHARACTERS'),
(0xfded, 0xfded, '* REPLACE LEADING COMMAS'),
(0xfdef, 0xfdef, '* WITH PAD CHARACTERS'),
(0xfdf1, 0xfdf1, '* CHECK FOR DECIMAL POINT'),
(0xfdf3, 0xfdf3, "* AND DON'T PUT A ZERO BEFORE IT"),
(0xfdf5, 0xfdf5, '* REPLACE PREVIOUS CHARACTER'),
(0xfdf7, 0xfdf7, '* WITH A ZERO'),
(0xfdf9, 0xfdf9, '* GET STATUS BYTE, CHECK'),
(0xfdfb, 0xfdfb, '* FOR FLOATING $'),
(0xfdfd, 0xfdfd, '* BRANCH IF NO FLOATING $'),
(0xfdff, 0xfdff, '* STORE A $ IN'),
(0xfe01, 0xfe01, '* BUFFER'),
(0xfe03, 0xfe03, 'CHECK PRE-SIGN FLAG'),
(0xfe05, 0xfe05, 'GET SIGN CHARACTER'),
(0xfe07, 0xfe07, 'RETURN IF POST-SIGN REQUIRED'),
(0xfe09, 0xfe09, 'STORE FIRST CHARACTER'),
(0xfe0c, 0xfe0c, 'SAVE BUFFER POINTER'),
(0xfe0e, 0xfe0e, 'INITIAL EXPONENT OFFSET = 0'),
(0xfe0f, 0xfe0f, 'SAVE EXPONENT OFFSET'),
(0xfe11, 0xfe11, 'GET EXPONENT OF FPA0'),
(0xfe13, 0xfe13, '* COMPARE TO EXPONENT OF .5'),
(0xfe15, 0xfe15, '* AND BRANCH IF FPA0 > = 1.0'),
(0xfe17, 0xfe17, 'POINT X TO FP NUMBER (1E+09)'),
(0xfe1a, 0xfe1a, 'MULTIPLY FPA0 BY 1E+09'),
(0xfe1d, 0xfe1d, 'GET EXPONENT OFFSET'),
(0xfe1f, 0xfe1f, 'SUBTRACT 9 (BECAUSE WE MULTIPLIED BY 1E+09 ABOVE)'),
(0xfe21, 0xfe21, 'CHECK TO SEE IF > 1.0'),
(0xfe23, 0xfe23, 'DIVIDE FPA0 BY 10'),
(0xfe26, 0xfe26, 'INCREMENT EXPONENT OFFSET'),
(0xfe28, 0xfe28, 'POINT X TO FP NUMBER (999,999,999)'),
(0xfe2b, 0xfe2b, 'COMPARE FPA0 TO X'),
(0xfe2e, 0xfe2e, 'BRANCH IF FPA0 > 999,999,999'),
(0xfe30, 0xfe30, 'POINT X TO FP NUMBER (99,999,999.9)'),
(0xfe33, 0xfe33, 'COMPARE FPA0 TO X'),
(0xfe36, 0xfe36, 'RETURN IF 999,999,999 > FPA0 > 99,999,999.9'),
(0xfe38, 0xfe38, 'MULTIPLY FPA0 BY 10'),
(0xfe3b, 0xfe3b, 'DECREMENT EXPONENT OFFSET'),
(0xfe3d, 0xfe3d, 'KEEP UNNORMALIZING'),
(0xfe3f, 0xfe3f, 'RESTORE BUFFER POINTER AND RETURN'),
(0xfe41, 0xfe41, 'SAVE BUFFER POINTER'),
(0xfe43, 0xfe43, 'ADD .5 TO FPA0 (ROUND OFF)'),
(0xfe46, 0xfe46, 'CONVERT FPA0 TO INTEGER FORMAT'),
(0xfe49, 0xfe49, 'RESTORE BUFFER POINTER'),
(0xfe4b, 0xfe4b, 'POINT X TO UNNORMALIZED POWERS OF 10'),
(0xfe4e, 0xfe4e, 'INITIALIZE DIGIT COUNTER TO 0 + $80.'),
(0xfe50, 0xfe50, 'CHECK FOR COMMA INSERTION'),
(0xfe52, 0xfe52, "* 'ADD' A POWER OF 10 MANTISSA TO FPA0."),
(0xfe54, 0xfe54, '* IF THE MANTISSA IS NEGATIVE, A SUBTRACTION'),
(0xfe56, 0xfe56, '* WILL BE WHAT REALLY TAKES PLACE.'),
(0xfe6a, 0xfe6a, 'ADD ONE TO DIGIT COUNTER'),
(0xfe6b, 0xfe6b, 'CARRY INTO BIT 7'),
(0xfe6c, 0xfe6c, '* SET OVERFLOW FLAG - BRANCH IF CARRY SET AND'),
(0xfe6d, 0xfe6d, '* ADDING MANTISSA OR CARRY CLEAR AND SUBTRACTING MANTISSA'),
(0xfe6f, 0xfe6f, 'BRANCH IF SUBTRACTING MANTISSA'),
(0xfe71, 0xfe71, 'WAS SUBB #10+1'),
(0xfe73, 0xfe73, '* IF ADDING MANTISSA'),
(0xfe74, 0xfe74, "WAS ADDB #'0'-1"),
(0xfe76, 0xfe76, 'MOVE TO NEXT POWER OF 10 MANTISSA'),
(0xfe78, 0xfe78, 'SAVE DIGIT IN ACCA'),
(0xfe7a, 0xfe7a, 'MASK OFF ADD/SUBTRACT FLAG (BIT 7)'),
(0xfe7c, 0xfe7c, 'STORE DIGIT IN BUFFER'),
(0xfe7e, 0xfe7e, 'TOGGLE ADD/SUBTRACT FLAG'),
(0xfe7f, 0xfe7f, 'MASK OFF EVERYTHING BUT ADD/SUB FLAG'),
(0xfe81, 0xfe81, 'COMPARE TO END OF UNNORMALIZED POWERS OF 10'),
(0xfe84, 0xfe84, 'BRANCH IF NOT DONE'),
(0xfe86, 0xfe86, 'PUT A ZERO AT END OF INTEGER'),
(0xfe88, 0xfe88, 'DECREMENT DECIMAL POINT COUNTER'),
(0xfe8a, 0xfe8a, 'NOT TIME FOR DECIMAL POINT'),
(0xfe8c, 0xfe8c, 'SAVE BUFFER POINTER-POSITION OF THE DECIMAL POINT'),
(0xfe8e, 0xfe8e, '* STORE A DECIMAL'),
(0xfe90, 0xfe90, '* POINT IN THE OUTPUT BUFFER'),
(0xfe92, 0xfe92, '* CLEAR COMMA COUNTER - NOW IT WILL TAKE 255'),
(0xfe95, 0xfe95, 'DECREMENT COMMA COUNTER'),
(0xfe97, 0xfe97, 'RETURN IF NOT TIME FOR COMMA'),
(0xfe99, 0xfe99, '* RESET COMMA COUNTER TO 3; THREE'),
(0xfe9b, 0xfe9b, '* DIGITS BETWEEN COMMAS'),
(0xfe9d, 0xfe9d, '* PUT A COMMA INTO'),
(0xfe9f, 0xfe9f, '* THE BUFFER'),
(0xfea2, 0xfea2, 'GET THE BASE 10 EXPONENT OFFSET'),
(0xfea4, 0xfea4, "* ADD 10 (FPA0 WAS 'NORMALIZED' TO 9 PLACES LEFT"),
(0xfea6, 0xfea6, '* OF DECIMAL POINT) - SAVE IN DECIMAL POINT COUNTER'),
(0xfea8, 0xfea8, 'ADD ONE FOR THE DECIMAL POINT'),
(0xfea9, 0xfea9, '* DIVIDE DECIMAL POINT COUNTER BY 3; LEAVE'),
(0xfeab, 0xfeab, '* THE REMAINDER IN ACCA'),
(0xfead, 0xfead, 'CONVERT REMAINDER INTO A NUMBER FROM 1-3'),
(0xfeaf, 0xfeaf, 'SAVE COMMA COUNTER'),
(0xfeb1, 0xfeb1, 'GET STATUS BYTE'),
(0xfeb3, 0xfeb3, 'CHECK FOR COMMA FLAG'),
(0xfeb5, 0xfeb5, 'BRANCH IF COMMA FLAG ACTIVE'),
(0xfeb7, 0xfeb7, 'CLEAR COMMA COUNTER - 255 DIGITS OUTPUT BEFORE A COMMA'),
(0xfeba, 0xfeba, 'SAVE ZEROES COUNTER'),
(0xfebc, 0xfebc, 'CHECK FOR DECIMAL POINT'),
(0xfebe, 0xfebe, 'RESTORE ZEROES COUNTER'),
(0xfec0, 0xfec0, '* DECREMENT ZEROES COUNTER AND'),
(0xfec1, 0xfec1, '* RETURN IF < 0'),
(0xfec3, 0xfec3, 'SAVE ZEROES COUNTER'),
(0xfec5, 0xfec5, '* PUT A ZERO INTO'),
(0xfec7, 0xfec7, '* THE BUFFER'),
(0xfec9, 0xfec9, 'RESTORE THE ZEROES COUNTER'),
(0xfecb, 0xfecb, 'BRANCH IF NOT DONE'),
(0xfece, 0xfece, "'INPUT' TOKEN"),
(0xfed0, 0xfed0, "GO DO 'LINE INPUT' COMMAND"),
(0xfed4, 0xfed4, '\'SYNTAX ERROR\' IF NOT "LINE INPUT"'),
(0xfff0, 0xfff0, 'RESERVED'),
(0xfff2, 0xfff2, 'SWI3'),
(0xfff4, 0xfff4, 'SWI2'),
(0xfff6, 0xfff6, 'FIRQ'),
(0xfff8, 0xfff8, 'IRQ'),
(0xfffa, 0xfffa, 'SWI'),
(0xfffc, 0xfffc, 'NMI'),
(0xfffe, 0xfffe, 'RESET'),
# manually inserted:
(0x0000, 0x7FFF, "32K RAM"),
(0x8000, 0x9FFF, "FREE SPACE (8K)"),
(0xA000, 0xBFFF, "SERIAL INTERFACE"),
(0xC000, 0xFFFF, "16K ROM"),
(0xDB00, 0xFFFF, "BASIC Interpreter"),
(0xdb00, 0xdb13, "Console in"),
(0xdb14, 0xdb39, "Console out"),
(0xdb3a, 0xdb45, "Wait for serial data"),
(0xdc3e, 0xdc8a, "ROUTINE THAT GETS AN INPUT LINE FOR BASIC"),
(0xe0a3, 0xe0b8, "Break check"),
(0xeb6a, 0xeb6e, "PRINT"),
)
def print_out(txt):
print(txt)
def get_simple6809_meminfo():
return Simple6809MemInfo(log.debug)
if __name__ == "__main__":
mem_info = Simple6809MemInfo(print_out)
mem_info(0xa500) # SERIAL INTERFACE
|
jedie/DragonPy
|
dragonpy/Simple6809/mem_info.py
|
Python
|
gpl-3.0
| 233,609
|
#!/usr/bin/env python
# This script searches a given folder for .h5 files, loads the first ([0] index) and evaluates the test results
from keras import backend as K
import os
import cv2
import numpy as np
import sys
import glob
from utils import load_data_rgb, display_results_RGB, enumerate2
from keras.models import load_model
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# This scripts tries to replicate the textures present in the training images
starting_dir = os.getcwd()
img_size = 360
tests_number = 200 # how many digits we will display
small_size = 23
step_size = 50
channel_number = 3
stage = 'test' # 'test'
# master_folder = '/home/gcx/lstm_sequences/autoencoder-27-simple/'
# master_folder = '/home/gcx/lstm_sequences/autoencoder-100-improved/'
# master_folder = '/home/gcx/lstm_sequences/autoencoder-200/'
# master_folder = '/home/gcx/lstm_sequences/anomaly_dataset/'
#master_folder = '/home/gcx/lstm_sequences/anomaly_subset/'
# master_folder = '/home/gcx/lstm_sequences/sampled_dataset/'
master_folder = '/home/gcx/lstm_sequences/forth_dataset/'
os.chdir(starting_dir)
model_to_load = glob.glob('models/vgg_multiscale/*.h5')
if len(model_to_load) > 1:
sys.exit('Multiple files in folder. Do not know what to choose. Exiting!')
elif len(model_to_load) == 0:
sys.exit('No model file found. Exiting!')
print('loading model: ', model_to_load)
best_model = load_model(model_to_load[0])
print(best_model.summary())
print(' \n')
# Load image one by one
#os.chdir(master_folder + 'test/' + 'x_' + 'test')
os.chdir(master_folder + stage + '/' + 'x_' + stage)
print os.getcwd()
x = np.zeros((tests_number, img_size, img_size, channel_number))
y = np.ones((tests_number, img_size, img_size))
# print(sorted(glob.glob('*.png'), key=lambda name: int(name[:-4])))
ptr = 0
for i, file_ptr in enumerate2(glob.glob('*.tiff'), start=0, step=step_size, stop= step_size * tests_number):
_img_tmp = cv2.imread(file_ptr)
_img_tmp = cv2.resize(_img_tmp, (img_size, img_size), interpolation=cv2.INTER_NEAREST) / 255.
# _img_tmp = np.average(np.asarray(_img_tmp, np.float), axis=2)
# print('max', np.max(np.max(_img_tmp)))
x[ptr, :, :, :] = _img_tmp
# print(_folder + stage + 'y_' + stage + os.path.split(file_ptr)[1])
# _gt_tmp = cv2.imread(master_folder + 'test/' + 'y_' + 'test/' + os.path.split(file_ptr)[1][:-4] + '.tiff')
#_gt_tmp = cv2.imread(master_folder + 'test/' + 'y_' + 'test/' + os.path.split(file_ptr)[1])
_gt_tmp = cv2.imread(master_folder + stage + '/' + 'y_' + stage + '/' + os.path.split(file_ptr)[1])
_gt_tmp = cv2.resize(_gt_tmp, (img_size, img_size), interpolation=cv2.INTER_NEAREST)
_gt_tmp = np.average(np.asarray(_gt_tmp, np.float), axis=2) / 255.
# print('gt max', np.max(np.max(_gt_tmp)))
y[ptr, :, :] = _gt_tmp
ptr += 1
print('i, ptr:', i, ptr)
os.chdir(starting_dir)
# print('results with best model: ', best_model.evaluate(x_test_rgb, y_test_vectorized, batch_size=1, verbose=1))
reconstructed_imgs = best_model.predict(x, batch_size=2, verbose=1)
# x_test_rgb = np.zeros((img_size, img_size, ste, 3))
# reconstructed_imgs = reconstructed_imgs.reshape(-1, img_size, img_size) # If the output is img_size x img_size
reconstructed_imgs = reconstructed_imgs.reshape(-1, small_size, small_size) # If the output is less than img_size x img_size
print(' \n')
# plt.figure(figsize=(n, 3))
display_results_RGB(x, y, reconstructed_imgs, img_size, small_size, tests_number, stage=stage, figure_name=os.path.split(model_to_load[0])[-1][:-3], scale_y=True)
cv2.namedWindow('test')
cv2.waitKey(1000)
print('finished script!')
cv2.destroyAllWindows()
|
charterscruz/auto-encoder-tests
|
evaluate_best_model_rgb_forth_scale_.py
|
Python
|
mit
| 3,718
|
#!/usr/bin/env python3
'''Fix backlinks after moving a page.
Usage: {0} <page> <src> <dest> <edit-summary> [minor]
Example: {0} P A B "Fix links: A was moved to B" m
The example above will modify links in page P following these rules:
* [A] -> [B|A]
* [A|B] -> [B]
* [A|C] -> [B|C]
'''
import re
from bot import Bot, main
class BacklinkBot(Bot):
rules = [
(r'({rsrc})', r'{dest}|\1'),
(r'(?:{rsrc})\|({rdest})', r'\1'),
(r'(?:{rsrc})\|([^\]|]+)', r'{dest}|\1'),
]
def __call__(self, title, src, dest, edit_summary, minor=False):
'Iterate through backlinks.'
super().__call__(edit_summary, minor)
rsrc = '|'.join(map(re.escape, self._dialects(src)))
self._replace(self.site.pages[title], rsrc, dest, raw=True)
def _replace(self, page, src, dest, raw=False):
'Do regular expression substitute.'
rsrc = src if raw else re.escape(src)
rdest = re.escape(dest) if dest else '(?!)' # disable the second rule
contents = page.text()
for pattern, replace in self.rules:
pattern = r'\[\[%s\]\]' % pattern.format(**locals())
replace = r'[[%s]]' % replace.format(**locals()) if dest else r'\1'
contents = re.sub(pattern, replace, contents)
self._save(page, contents, verbose=True)
if __name__ == '__main__':
main(BacklinkBot, argc=5)
|
Arnie97/wiki-bot
|
backlink.py
|
Python
|
gpl-3.0
| 1,394
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a seam for user-related services."""
__author__ = 'Sean Lip'
import utils
from google.appengine.api import users
from google.appengine.ext import ndb
def create_login_url(slug):
"""Creates a login url."""
return users.create_login_url(slug)
def create_logout_url(slug):
"""Creates a logout url."""
return users.create_logout_url(slug)
def get_current_user(request):
"""Returns the current user."""
return users.get_current_user()
def is_super_admin(user_id, request):
"""Checks whether the user with the given user_id owns this app.
For GAE, the user in question is also required to be the current user.
"""
user = users.get_current_user()
if user is None:
return False
return user.user_id() == user_id and users.is_current_user_admin()
def get_user_id_from_email(email):
"""Given an email address, returns a user id.
Returns None if the email address does not correspond to a valid user id.
"""
class _FakeUser(ndb.Model):
_use_memcache = False
_use_cache = False
user = ndb.UserProperty(required=True)
try:
u = users.User(email)
except users.UserNotFoundError:
raise utils.InvalidInputException(
'User with email address %s not found' % email)
key = _FakeUser(user=u).put()
obj = _FakeUser.get_by_id(key.id())
user_id = obj.user.user_id()
if user_id:
return unicode(user_id)
else:
return None
def get_user_id(user):
""" Given an user object, get the user id. """
return user.user_id()
def get_user_email(user):
""" Given an user object, get the user's email. """
return user.email()
|
mindpin/mindpin_oppia
|
core/platform/users/gae_current_user_services.py
|
Python
|
apache-2.0
| 2,327
|
"""
Publishes the Primary Package
===============================================================================
usage: orc [common-opts] publish [options] <semver> <comments>
orc [common-opts] publish [options] help
orc [common-opts] publish [options]
Arguments:
<semver> The Semantic version information for the Published version
<comments> Publish comments
help Display the recommended steps/process for publishing a
package
Options:
--edit Updates the 'current' publish information in place, i.e.
does not create a new version. The date/time fields are
NOT updated.
--edithist N Updates the 'N' entry in the History array. N is zero
based index. The date/time fields are NOT updated.
-w Suppress warning about missing files
--nodirs Skip updating the pkg-dirs.lst when publishing
-h, --help Display help for this command
Common Options:
See 'orc --help'
Notes:
o Publishing simply associates a comment/date/semantic-version with
package at a point in time (i.e. only the package.json file is updated).
o The user is responsible for archiving/labeling/tagging the package
in its native SCM environment.
o Publishing is not require for package management. However, life is
better with respect to managing dependencies if packages are published.
"""
import os, json, sys
import utils
from docopt.docopt import docopt
from my_globals import PACKAGE_INFO_DIR
from my_globals import PACKAGE_ROOT
from my_globals import PKG_DIRS_FILE
from my_globals import IGNORE_DIRS_FILE
help_text = \
""" The following are the recommend steps for publishing an Outcast2 package:
1 The source-code/changes are completed and ready to submit a pull request.
[2] Use the 'orc info' command to set the package's basic information.
NOTE: this step only needs to be performed ONCE during the life of the
package (or on change of the package's information).
[3] If the package is intended to be adopted as an 'overlay' package, use
the 'orc dirs set' command to set the package's primary directories.
NOTE: this step only needs to be performed ONCE during the life of the
package (or on change of the package's primary directories ).
[4] If the package is intended to be adopted as an 'overlay' package, use
the 'orc dirs xset' command to set the package's 'adopted-extra' dirs.
NOTE: this step only needs to be performed ONCE during the life of the
package (or on change of the package's adopted-extra directories ).
[5] If the package is intended to be adopted as an 'overlay' package, edit
(or create) the package's 'ignore-dirs.lst' file. Populate the file as
needed.
NOTE: this step only needs to be performed ONCE during the life of the
package (or on change of the package's ignore directories ).
6 Run the 'orc publish' command to specify the semantic version information
and brief description of what is being published.
7 Commit and push changes to the package's Outcast files.
8 Create the pull request for publishing the source-code/changes.
9 Complete the pull request process and merge the PR into its parent
branch.
[10] If the parent branch is not a 'release' branch, then propagate the merged
changes to the 'release' branch
11 Create a Tag/label - with the same semantic version information from
step 6 - and apply the tag/label to the 'release' branch from step
9 (or 10).
Note: If using 'git' make sure that the tag gets pushed back to the
package's origin (e.g. git push origin MY-TAG-Maj.Min.Patch)
"""
#---------------------------------------------------------------------------------------------------------
def display_summary():
print("{:<13}{}".format( 'publish', 'Publishes the Primary package' ))
#------------------------------------------------------------------------------
def run( common_args, cmd_argv ):
args = docopt(__doc__, argv=cmd_argv)
# Help on the publish sequence
if ( args['help'] ):
print( help_text )
sys.exit(0)
# Get the package data
json_dict = utils.load_package_file()
# Edit in place
if ( args['--edit'] ):
prev = utils.json_get_current_version( json_dict )
v = utils.json_create_version_entry( args['<comments>'], args['<semver>'], prev['date'] )
utils.json_update_current_version( json_dict, v )
# Edit a history entry
elif ( args['--edithist'] ):
utils.json_update_history_version( json_dict, args['--edithist'], args['<comments>'], args['<semver>'] )
# Create new entry
elif ( args['<semver>'] ):
v = utils.json_create_version_entry( args['<comments>'], args['<semver>'] )
utils.json_add_new_version( json_dict, v )
# Important files...
dirs_file = os.path.join(PACKAGE_INFO_DIR(), PKG_DIRS_FILE() )
ignore_file = os.path.join(PACKAGE_INFO_DIR(), IGNORE_DIRS_FILE() )
# Ensure the 'dirs list' is up to date
if ( not args['--nodirs'] and args['<semver>'] ):
if ( os.path.isfile( ignore_file ) ):
owndirs = utils.get_owned_dirs( PACKAGE_ROOT() )
utils.save_dirs_list_file( owndirs )
# display publish info
p = utils.json_get_published( json_dict )
print( json.dumps(p, indent=2) )
# Display warnings
if ( not args['-w'] ):
warning = False
if ( not os.path.isfile( dirs_file )):
print( f"Warning: NO {dirs_file} file has been created for the package. See the 'orc dirs' command")
warning = True
if ( not os.path.isfile( ignore_file )):
print( f"Warning: NO {ignore_file} file has been created for the package. Create using a text editor. The file has same semantics as a .gitignore file.")
warning = True
if ( warning ):
print()
print( f"The above warning(s) can be ignored if the package is NOT intended to be to be adopted as an 'overlay' package." )
|
johnttaylor/Outcast
|
bin/commands/publish.py
|
Python
|
bsd-3-clause
| 6,194
|
from shelf.logger_creator import LoggerCreator
from shelf.hook.background.container import Container
import json
def execute_command(command, log_level, event, uri, meta_uri, cwd="."):
"""
Entry point to execute a command hook.
Args:
command(string) The command to execute. This can include arguments.
log_level(int) One of the log levels defined on the logging module.
event(string) The event that triggered this action. Should be one of
shelf.hook.event.Event
uri(string) The full URI to the artifact that was affected by the event.
meta_uri(string) The full URI to the artifacts metadata resource.
cwd(string) The directory we want to start at when executing the command.
Returns:
bool If we were successful or not.
"""
container = create_container(log_level)
logger = container.logger
runner = container.create_command_runner(cwd)
environment = {
"SHELF_EVENT": event,
"SHELF_URI": uri,
"SHELF_META_URI": meta_uri
}
logger.info("Executing command \"{0}\" with environment data '{1}'".format(
command,
json.dumps(environment, indent=4)
))
result = runner.run(command, environment)
if result.success:
logger.info("Command \"{0}\" executed successfully.".format(command))
else:
logger.error("Command \"{0}\" failed. stdout=\"{1}\" stderr=\"{2}\"".format(
command,
result.stdout,
result.stderr
))
return result.success
def create_container(log_level):
logger = create_background_logger(log_level)
container = Container(logger)
return container
def create_background_logger(level):
return LoggerCreator("BackgroundAction") \
.background_format() \
.level(level) \
.get()
|
not-nexus/shelf
|
shelf/hook/background/action.py
|
Python
|
mit
| 1,898
|
from flask import Flask
from flask import jsonify, Response, render_template, abort, make_response
from flask import request
from cookbook_manager import CookbookManager
from barista import Barista
import json
import httplib
from threading import Thread
from utils import channel
from utils import json_config
# ===============================================================================
#
# Global Variables
#
# ===============================================================================
app = Flask(__name__)
cmgr = CookbookManager()
config = json_config.parse_json('config.json')
barista = Barista()
@app.route('/')
def index():
return render_template('index.jinja2')
# ===============================================================================
#
# Cookbook Manager API
#
# ===============================================================================
@app.route('/cookbooks', methods=['GET'])
def list_cookbooks():
"""
{
"cookbook1":{
"name": "cookbook1",
"description": "cookbook description"
}
}
"""
cookbooks = cmgr.list()
resp = {}
for cookbook in cookbooks:
resp[cookbook.name] = {
'name': cookbook.name,
'description': cookbook.description
}
return jsonify(resp)
@app.route('/cookbooks/<string:name>', methods=['GET'])
def read_cookbook(name):
"""
{
"name": "cookbook1",
"date": "",
"description": ""
}
"""
cookbook = cmgr.get(name)
data = {
'name': name,
'description': cookbook.description
}
return jsonify(data)
@app.route('/cookbooks/<string:name>', methods=['PUT'])
def update_cookbook(name):
"""
{
"name": "new_cookbook_name"
}
"""
if request.data:
params = json.loads(request.data)
else:
params = {}
if 'name' in params:
new_name = params['name']
cmgr.rename(name, new_name)
else:
# If no name params in the params, create a new cookbook
cmgr.update(name)
resp = make_response()
resp.status_code = httplib.CREATED
return resp
@app.route('/cookbooks/<string:name>/content', methods=['GET'])
def read_cookbook_content(name):
cookbook = cmgr.get(name)
return cookbook.content
@app.route('/cookbooks/<string:name>/content', methods=['PUT'])
def update_cookbook_content(name):
new_content = request.data
cmgr.update(name, new_content)
resp = make_response()
resp.status_code = httplib.OK
return resp
@app.route('/cookbooks/<string:name>', methods=['DELETE'])
def delete_cookbook(name):
cmgr.delete(name)
resp = make_response()
resp.status_code = httplib.NO_CONTENT
return resp
# ===============================================================================
#
# Barista API
#
# ===============================================================================
@app.route('/barista', methods=['GET'])
def get_barista_status():
"""
{
"State": "Brewing",
"Now steps": "Step title",
"Now steps index": 3,
"Now process": "Process title",
"Now process index": 1,
"Now cookbook name": "Test",
"Temperature": 90,
"Is water full": true,
"Total commands": 1000,
"Progress": 834
}
"""
status = {
'State': barista.state,
'Now steps': barista.now_step,
'Now steps index': barista.now_step_index,
'Now process': barista.now_process,
'Now process index': barista.now_process_index,
'Now cookbook name': barista.now_cookbook_name,
'Temperature': barista.heater_temperature,
'Is water full': barista.is_water_full,
'Total commands': barista.total_cmd,
'Progress': barista.printer_progress
}
return jsonify(status)
@app.route('/barista', methods=['PUT'])
def brew():
"""
{
"Command": "Start|Pause|Resume|Stop",
"Name": "Cookbook"
}
"""
params = json.loads(request.data)
cmd = params['Command']
name = params['Name']
app.logger.debug('{} {} ...'.format(cmd, name))
if cmd == 'Start':
barista.brew(name)
elif cmd == 'Stop':
barista.stop_brew()
resp = make_response()
resp.status_code = httplib.OK
return resp
# ===============================================================================
#
# Printer API
#
# ===============================================================================
@app.route('/printer', methods=['GET'])
def get_printer_status():
"""
{
"state": "Printing",
"progress": 198,
"total": 3000
}
"""
status = {
'state': barista.printer_state_string,
'progress': barista.printer_progress,
'total': barista.total_cmd
}
return jsonify(status)
@app.route('/printer/home', methods=['PUT'])
def go_home():
barista.go_home()
resp = make_response()
resp.status_code = httplib.CREATED
return resp
@app.route('/printer/jog', methods=['PUT'])
def control_printer():
"""
{
"X": 0,
"Y": 0,
"Z": 0,
"E1": 100,
"E2": 100,
"F": 100
}
"""
params = json.loads(request.data)
barista.printer_jog(params.get('X', None),
params.get('Y', None),
params.get('Z', None),
params.get('E1', None),
params.get('E2', None),
params.get('F', None))
resp = make_response()
resp.status_code = httplib.CREATED
return resp
# ===============================================================================
#
# Heater API
#
# ===============================================================================
@app.route('/heater', methods=['GET'])
def get_heater_status():
"""
{
"duty_cycle": 100 ,
"set_point": 80,
"temperature": 24.32,
"update_time": 147998232.38,
"is_water_full": true
}
"""
status = {
'duty_cycle': barista.heater_duty_cycle,
'set_point': barista.heater_set_point,
'temperature': barista.heater_temperature,
'update_time': barista.heater_update_time,
'is_water_full': barista.is_water_full
}
return jsonify(status)
@app.route('/heater', methods=['PUT'])
def control_heater():
"""
{
"Set Point": 80
}
"""
if request.data:
params = json.loads(request.data)
else:
params = {}
set_point = params.get('Set Point', None)
if set_point is not None:
barista.set_temperature(float(set_point))
resp = make_response()
resp.status_code = httplib.OK
return resp
# ===============================================================================
#
# Refill API
#
# ===============================================================================
@app.route('/refill', methods=['GET'])
def get_refill_status():
return jsonify({'full': barista.is_water_full})
@app.route('/refill', methods=['PUT'])
def control_refill():
"""
{
"Command": "Start|Stop"
}
"""
pass
if __name__ == '__main__':
# Close the werkzeug logger
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.ERROR)
app.run(host='0.0.0.0')
|
Swind/TuringCoffee
|
src/api_server.py
|
Python
|
mit
| 7,419
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
A simple client to create a CLA anomaly detection model for hotgym.
The script prints out all records that have an abnoramlly high anomaly
score.
"""
import csv
import datetime
import logging
from nupic.data.datasethelpers import findDataset
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
import model_params
_LOGGER = logging.getLogger(__name__)
_DATA_PATH = "extra/hotgym/rec-center-hourly.csv"
_ANOMALY_THRESHOLD = 0.8
def createModel():
return ModelFactory.create(model_params.MODEL_PARAMS)
def runHotgymAnomaly():
model = createModel()
model.enableInference({'predictedField': 'consumption'})
with open (findDataset(_DATA_PATH)) as fin:
reader = csv.reader(fin)
headers = reader.next()
reader.next()
reader.next()
for i, record in enumerate(reader, start=1):
modelInput = dict(zip(headers, record))
modelInput["consumption"] = float(modelInput["consumption"])
modelInput["timestamp"] = datetime.datetime.strptime(
modelInput["timestamp"], "%m/%d/%y %H:%M")
result = model.run(modelInput)
anomalyScore = result.inferences['anomalyScore']
if anomalyScore > _ANOMALY_THRESHOLD:
_LOGGER.info("Anomaly detected at [%s]. Anomaly score: %f.",
result.rawInput["timestamp"], anomalyScore)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
runHotgymAnomaly()
|
jkthompson/nupic
|
examples/opf/clients/hotgym_anomaly/hotgym_anomaly.py
|
Python
|
gpl-3.0
| 2,483
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-01-04 19:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('infrastructure', '0004_auto_20180104_1901'),
]
operations = [
migrations.RemoveField(
model_name='devicedefinition',
name='subdevice_role',
),
]
|
zapcoop/vertex
|
vertex_api/infrastructure/migrations/0005_remove_devicedefinition_subdevice_role.py
|
Python
|
agpl-3.0
| 418
|
from pypwrctrl.pypwrctrl import Plug, PlugDevice, PlugMaster
|
Innovailable/pypwrctrl
|
pypwrctrl/__init__.py
|
Python
|
gpl-3.0
| 61
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal
)
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
def test_subclass(self):
class sub(np.ndarray): pass
# one dimensional
x1d = np.array([1, 2]).view(sub)
assert_equal(repr(x1d), 'sub([1, 2])')
# two dimensional
x2d = np.array([[1, 2], [3, 4]]).view(sub)
assert_equal(repr(x2d),
'sub([[1, 2],\n'
' [3, 4]])')
# two dimensional with flexible dtype
xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]],\n"
" dtype=[('a', '<i4')])"
)
def test_self_containing(self):
arr0d = np.array(None)
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
first = np.array(None)
second = np.array(None)
first[()] = second
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
arr1d = np.array([None, None])
arr1d[0] = [1, 2]
arr1d[1] = [3]
assert_equal(repr(arr1d),
'array([list([1, 2]), list([3])], dtype=object)')
class TestComplexArray(TestCase):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
'[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
'[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
'[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
'[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
'[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
'[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
'[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
'[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
'[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
'[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
'[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
'[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
'[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
'[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
'[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
'[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
'[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
'[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
'[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
'[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
'[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
'[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
assert_(res == val)
class TestArray2String(TestCase):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_equal(np.array2string(x),
"[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]")
# for issue #5692
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
A[5:].fill(np.datetime64('NaT'))
assert_equal(np.array2string(A),
"[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " +
"('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " +
"('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " +
"('NaT',) ('NaT',) ('NaT',)]")
# See #8160
struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
assert_equal(np.array2string(struct_int),
"[([ 1, -1],) ([123, 1],)]")
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
dtype=[('B', 'i4', (2, 2))])
assert_equal(np.array2string(struct_2dint),
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
# See #8172
array_scalar = np.array(
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
class TestPrintOptions:
"""Test getting and setting global print options."""
def setUp(self):
self.oldopts = np.get_printoptions()
def tearDown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
def test_precision_zero(self):
np.set_printoptions(precision=0)
for values, string in (
([0.], " 0."), ([.3], " 0."), ([-.3], "-0."), ([.7], " 1."),
([1.5], " 2."), ([-1.5], "-2."), ([-15.34], "-15."),
([100.], " 100."), ([.2, -1, 122.51], " 0., -1., 123."),
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], " 0.-1.j")):
x = np.array(values)
assert_equal(repr(x), "array([%s])" % string)
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def test_0d_arrays(self):
assert_equal(repr(np.datetime64('2005-02-25')[...]),
"array('2005-02-25', dtype='datetime64[D]')")
x = np.array(1)
np.set_printoptions(formatter={'all':lambda x: "test"})
assert_equal(repr(x), "array(test)")
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
x = np.array([u'\xe9'], dtype=object)
assert_equal(repr(x), expected)
if __name__ == "__main__":
run_module_suite()
|
bringingheavendown/numpy
|
numpy/core/tests/test_arrayprint.py
|
Python
|
bsd-3-clause
| 10,221
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Sun Jan 31 10:48:54 2010
"""
==================================
simGUI.py - Experiment Monitor GUI
==================================
A basic user interface for watching the state of the robot during simulation/experiment,
and pausing/resuming execution.
"""
import math, time, sys, os, re
import wxversion
import wx, wx.richtext, wx.grid
import threading
# Climb the tree to find out where we are
p = os.path.abspath(__file__)
t = ""
while t != "src":
(p, t) = os.path.split(p)
if p == "":
print "I have no idea where I am; this is ridiculous"
sys.exit(1)
sys.path.append(os.path.join(p,"src","lib"))
import project, mapRenderer, regions
import handlerSubsystem
import socket
import copy
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
import random
# begin wxGlade: extracode
# end wxGlade
class SimGUI_Frame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: SimGUI_Frame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.window_1 = wx.SplitterWindow(self, -1, style=wx.SP_3D|wx.SP_BORDER|wx.SP_LIVE_UPDATE)
self.window_1_pane_2 = wx.Panel(self.window_1, -1)
self.notebook_1 = wx.Notebook(self.window_1_pane_2, -1, style=0)
self.notebook_1_pane_2 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, -1)
self.window_1_pane_1 = wx.Panel(self.window_1, -1)
self.text_ctrl_sim_log = wx.richtext.RichTextCtrl(self.notebook_1_pane_1, -1, "", style=wx.TE_MULTILINE|wx.TE_READONLY)
self.text_ctrl_slurpout = wx.richtext.RichTextCtrl(self.notebook_1_pane_2, -1, "", style=wx.TE_MULTILINE|wx.TE_READONLY)
self.text_ctrl_slurpin = wx.TextCtrl(self.notebook_1_pane_2, -1, "", style=wx.TE_PROCESS_ENTER)
self.button_SLURPsubmit = wx.Button(self.notebook_1_pane_2, -1, "Submit")
self.button_sim_startPause = wx.Button(self.window_1_pane_2, -1, "Start")
self.button_sim_log_clear = wx.Button(self.window_1_pane_2, -1, "Clear Log")
self.button_sim_log_export = wx.Button(self.window_1_pane_2, -1, "Export Log...")
self.label_1 = wx.StaticText(self.window_1_pane_2, -1, "Show log messages for:")
self.checkbox_statusLog_targetRegion = wx.CheckBox(self.window_1_pane_2, -1, "Target region announcements")
self.checkbox_statusLog_propChange = wx.CheckBox(self.window_1_pane_2, -1, "System proposition changes")
self.checkbox_statusLog_border = wx.CheckBox(self.window_1_pane_2, -1, "Region border crossings")
self.checkbox_statusLog_other = wx.CheckBox(self.window_1_pane_2, -1, "Other debugging messages")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TEXT_ENTER, self.onSLURPSubmit, self.text_ctrl_slurpin)
self.Bind(wx.EVT_BUTTON, self.onSLURPSubmit, self.button_SLURPsubmit)
self.Bind(wx.EVT_BUTTON, self.onSimStartPause, self.button_sim_startPause)
self.Bind(wx.EVT_BUTTON, self.onSimClear, self.button_sim_log_clear)
self.Bind(wx.EVT_BUTTON, self.onSimExport, self.button_sim_log_export)
self.Bind(wx.EVT_SPLITTER_SASH_POS_CHANGED, self.onResize, self.window_1)
# end wxGlade
self.window_1_pane_1.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.mapBitmap = None
self.window_1_pane_1.Bind(wx.EVT_PAINT, self.onPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.onEraseBG)
self.proj = project.Project()
self.proj.setSilent(True)
# Make status bar at bottom.
self.sb = wx.StatusBar(self)
self.SetStatusBar(self.sb)
self.sb.SetFieldsCount(1)
self.sb.SetStatusText("PAUSED")
self.button_sim_log_export.Enable(False)
# Connect to executor
try:
executor_port = int(sys.argv[1])
except ValueError:
print "ERROR: Invalid port '{}'".format(arg)
sys.exit(2)
self.executorProxy = xmlrpclib.ServerProxy("http://127.0.0.1:{}".format(executor_port), allow_none=True)
# Create the XML-RPC server
# Search for a port we can successfully bind to
while True:
listen_port = random.randint(10000, 65535)
try:
self.xmlrpc_server = SimpleXMLRPCServer(("127.0.0.1", listen_port), logRequests=False, allow_none=True)
except socket.error as e:
pass
else:
break
# Register functions with the XML-RPC server
self.xmlrpc_server.register_function(self.handleEvent)
# Kick off the XML-RPC server thread
self.XMLRPCServerThread = threading.Thread(target=self.xmlrpc_server.serve_forever)
self.XMLRPCServerThread.daemon = True
self.XMLRPCServerThread.start()
print "SimGUI listening for XML-RPC calls on http://127.0.0.1:{} ...".format(listen_port)
# Register with executor for event callbacks
self.executorProxy.registerExternalEventTarget("http://127.0.0.1:{}".format(listen_port))
self.robotPos = None
self.robotVel = (0,0)
self.markerPos = None
self.dialogueManager = None
self.currentGoal = None
self.Bind(wx.EVT_CLOSE, self.onClose)
def loadRegionFile(self, filename):
self.proj.rfi = regions.RegionFileInterface()
self.proj.rfi.readFile(filename)
self.Bind(wx.EVT_SIZE, self.onResize, self)
self.onResize()
def loadSpecFile(self, filename):
self.proj.loadProject(filename)
self.hsub = handlerSubsystem.HandlerSubsystem(None, self.proj.project_root)
config, success = self.hsub.loadConfigFile(self.proj.current_config)
if success: self.hsub.configs.append(config)
self.hsub.setExecutingConfig(self.proj.current_config)
self.Bind(wx.EVT_SIZE, self.onResize, self)
if self.proj.compile_options["parser"] == "slurp":
self.initDialogue()
else:
self.notebook_1.DeletePage(1)
self.onResize()
def handleEvent(self, eventType, eventData):
"""
Processes messages from the controller, and updates the GUI accordingly
"""
# Update stuff (should put these in rough order of frequency for optimal speed
if eventType == "FREQ":
wx.CallAfter(self.sb.SetStatusText, "Running at approximately {}Hz...".format(eventData), 0)
elif eventType == "POSE":
self.robotPos = eventData
wx.CallAfter(self.onPaint)
elif eventType == "MARKER":
self.markerPos = eventData
wx.CallAfter(self.onPaint)
elif eventType == "VEL":
# We can't plot anything before we have a map
if self.mapBitmap is None:
print "Received drawing command before map. You probably have an old execute.py process running; please kill it and try again."
return
[x,y] = eventData
[x,y] = map(int, (self.mapScale*x, self.mapScale*y))
self.robotVel = (x, y)
elif eventType == "PAUSE":
wx.CallAfter(self.sb.SetStatusText, "PAUSED.", 0)
elif eventType == "SPEC":
wx.CallAfter(self.loadSpecFile, eventData)
elif eventType == "REGIONS":
wx.CallAfter(self.loadRegionFile, eventData)
else:
if isinstance(eventData, basestring):
if eventData.startswith("Output proposition"):
if self.checkbox_statusLog_propChange.GetValue():
wx.CallAfter(self.appendLog, eventData + "\n", color="GREEN")
elif eventData.startswith("Heading to"):
if self.checkbox_statusLog_targetRegion.GetValue():
wx.CallAfter(self.appendLog, eventData + "\n", color="BLUE")
elif eventData.startswith("Crossed border"):
if self.checkbox_statusLog_border.GetValue():
wx.CallAfter(self.appendLog, eventData + "\n", color="CYAN")
# Detect our current goal index
elif eventData.startswith("Currently pursuing goal"):
m = re.search(r"#(\d+)", eventData)
if m is not None:
self.currentGoal = int(m.group(1))
elif self.checkbox_statusLog_other.GetValue():
if eventData != "":
wx.CallAfter(self.appendLog, eventData + "\n", color="BLACK")
elif self.checkbox_statusLog_other.GetValue():
if eventData != "":
wx.CallAfter(self.appendLog, str(eventData) + "\n", color="BLACK")
def __set_properties(self):
# begin wxGlade: SimGUI_Frame.__set_properties
self.SetTitle("Simulation Status")
self.SetSize((836, 713))
self.button_SLURPsubmit.SetDefault()
self.checkbox_statusLog_targetRegion.SetValue(1)
self.checkbox_statusLog_propChange.SetValue(1)
self.checkbox_statusLog_border.SetValue(1)
self.checkbox_statusLog_other.SetValue(1)
# end wxGlade
def __do_layout(self):
# begin wxGlade: SimGUI_Frame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_43_copy_1 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_43_copy_copy = wx.BoxSizer(wx.VERTICAL)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5.Add((5, 30), 0, 0, 0)
sizer_43_copy_copy.Add((20, 5), 0, 0, 0)
sizer_4.Add(self.text_ctrl_sim_log, 1, wx.ALL|wx.EXPAND, 5)
self.notebook_1_pane_1.SetSizer(sizer_4)
sizer_6.Add(self.text_ctrl_slurpout, 1, wx.ALL|wx.EXPAND, 5)
sizer_7.Add(self.text_ctrl_slurpin, 1, wx.ALL|wx.EXPAND, 5)
sizer_7.Add(self.button_SLURPsubmit, 0, wx.ALL, 5)
sizer_6.Add(sizer_7, 0, wx.EXPAND, 0)
self.notebook_1_pane_2.SetSizer(sizer_6)
self.notebook_1.AddPage(self.notebook_1_pane_1, "Status Log")
self.notebook_1.AddPage(self.notebook_1_pane_2, "SLURP Dialogue")
sizer_43_copy_copy.Add(self.notebook_1, 1, wx.EXPAND, 0)
sizer_43_copy_copy.Add((20, 5), 0, 0, 0)
sizer_5.Add(sizer_43_copy_copy, 6, wx.EXPAND, 0)
sizer_5.Add((20, 30), 0, 0, 0)
sizer_43_copy_1.Add((20, 20), 0, 0, 0)
sizer_43_copy_1.Add(self.button_sim_startPause, 0, wx.LEFT|wx.RIGHT|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 20)
sizer_43_copy_1.Add((20, 10), 0, 0, 0)
sizer_43_copy_1.Add(self.button_sim_log_clear, 0, wx.LEFT|wx.RIGHT|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 20)
sizer_43_copy_1.Add((20, 10), 0, 0, 0)
sizer_43_copy_1.Add(self.button_sim_log_export, 0, wx.LEFT|wx.RIGHT|wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 20)
sizer_43_copy_1.Add((20, 10), 0, 0, 0)
sizer_3.Add((20, 20), 0, 0, 0)
sizer_3.Add(self.label_1, 0, 0, 0)
sizer_3.Add(self.checkbox_statusLog_targetRegion, 0, wx.TOP|wx.BOTTOM, 5)
sizer_3.Add(self.checkbox_statusLog_propChange, 0, wx.TOP|wx.BOTTOM, 5)
sizer_3.Add(self.checkbox_statusLog_border, 0, wx.TOP|wx.BOTTOM, 5)
sizer_3.Add(self.checkbox_statusLog_other, 0, wx.TOP|wx.BOTTOM, 5)
sizer_43_copy_1.Add(sizer_3, 1, wx.EXPAND, 0)
sizer_5.Add(sizer_43_copy_1, 3, wx.EXPAND, 0)
sizer_5.Add((20, 30), 0, 0, 0)
self.window_1_pane_2.SetSizer(sizer_5)
self.window_1.SplitHorizontally(self.window_1_pane_1, self.window_1_pane_2)
sizer_2.Add(self.window_1, 1, wx.EXPAND, 0)
sizer_2.Add((20, 20), 0, 0, 0)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
self.window_1.SetSashPosition(self.GetSize().y/2)
self.window_1_pane_1.SetBackgroundColour(wx.WHITE)
def onResize(self, event=None): # wxGlade: SimGUI_Frame.<event_handler>
size = self.window_1_pane_1.GetSize()
self.mapBitmap = wx.EmptyBitmap(size.x, size.y)
self.mapScale = mapRenderer.drawMap(self.mapBitmap, self.proj.rfi, scaleToFit=True, drawLabels=mapRenderer.LABELS_ALL_EXCEPT_OBSTACLES, memory=True)
self.Refresh()
self.Update()
if event is not None:
event.Skip()
def onEraseBG(self, event):
# Avoid unnecessary flicker by intercepting this event
pass
def onPaint(self, event=None):
if self.mapBitmap is None:
return
if event is None:
dc = wx.ClientDC(self.window_1_pane_1)
else:
pdc = wx.AutoBufferedPaintDC(self.window_1_pane_1)
try:
dc = wx.GCDC(pdc)
except:
dc = pdc
dc.BeginDrawing()
# Draw background
dc.DrawBitmap(self.mapBitmap, 0, 0)
# Draw robot
if self.robotPos is not None:
[x,y] = map(lambda x: int(self.mapScale*x), self.robotPos)
dc.DrawCircle(x, y, 5)
if self.markerPos is not None:
[m,n] = map(lambda m: int(self.mapScale*m), self.markerPos)
dc.SetBrush(wx.Brush(wx.RED))
dc.DrawCircle(m, n, 5)
# Draw velocity vector of robot (for debugging)
#dc.DrawLine(self.robotPos[0], self.robotPos[1],
# self.robotPos[0] + self.robotVel[0], self.robotPos[1] + self.robotVel[1])
dc.EndDrawing()
if event is not None:
event.Skip()
def appendLog(self, text, color="BLACK"):
# for printing everything on the log
# annotate any pXXX region names with their human-friendly name
# convert to set to avoid infinite explosion
for p_reg in set(re.findall(r'\b(p\d+)\b',text)):
for rname, subregs in self.proj.regionMapping.iteritems():
if p_reg in subregs:
break
text = re.sub(r'\b'+p_reg+r'\b', '%s (%s)' % (p_reg, rname), text)
self.text_ctrl_sim_log.SetInsertionPointEnd()
self.text_ctrl_sim_log.BeginTextColour(color)
self.text_ctrl_sim_log.WriteText("["+time.strftime("%H:%M:%S")+"] "+text)
self.text_ctrl_sim_log.EndTextColour()
self.text_ctrl_sim_log.ShowPosition(self.text_ctrl_sim_log.GetLastPosition())
self.text_ctrl_sim_log.Refresh()
def onSimStartPause(self, event): # wxGlade: SimGUI_Frame.<event_handler>
btn_label = self.button_sim_startPause.GetLabel()
if btn_label == "Start" or btn_label == "Resume":
self.button_sim_log_export.Enable(False)
self.executorProxy.resume()
self.appendLog("%s!\n" % btn_label,'GREEN')
self.button_sim_startPause.SetLabel("Pause")
else:
self.executorProxy.pause()
self.appendLog('Pause...\n','RED')
self.button_sim_log_export.Enable(True)
self.button_sim_startPause.SetLabel("Resume")
self.Refresh()
event.Skip()
def onSimExport(self, event): # wxGlade: SimGUI_Frame.<event_handler>
"""
Ask the user for a filename to save the Log as, and then save it.
"""
default = 'StatusLog'
# Get a filename
fileName = wx.FileSelector("Save File As",
os.path.join(os.getcwd(),'examples'),
default_filename=default,
default_extension="txt",
wildcard="Status Log files (*.txt)|*.txt",
flags = wx.SAVE | wx.OVERWRITE_PROMPT)
if fileName == "": return # User cancelled.
# Force a .txt extension. How mean!!!
if os.path.splitext(fileName)[1] != ".txt":
fileName = fileName + ".txt"
# Save data to the file
self.saveFile(fileName)
event.Skip()
def saveFile(self, fileName):
"""
Write all data out to a file.
"""
if fileName is None:
return
f = open(fileName,'w')
print >>f, "Experiment Status Log"
print >>f
# write the log
print >>f, str(self.text_ctrl_sim_log.GetValue())
f.close()
def onClose(self, event):
msg = wx.BusyInfo("Please wait, shutting down...")
try:
self.executorProxy.shutdown()
except socket.error:
# Executor probably crashed
pass
self.xmlrpc_server.shutdown()
self.XMLRPCServerThread.join()
#time.sleep(2)
event.Skip()
def onSimClear(self, event): # wxGlade: SimGUI_Frame.<event_handler>
self.text_ctrl_sim_log.Clear()
event.Skip()
def initDialogue(self):
# Add SLURP to path for import
p = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(p, "..", "etc", "SLURP"))
from ltlbroom.specgeneration import SpecGenerator
_SLURP_SPEC_GENERATOR = SpecGenerator()
# Filter out regions it shouldn't know about
filtered_regions = [region.name for region in self.proj.rfi.regions
if not (region.isObstacle or region.name.lower() == "boundary")]
sensorList = copy.deepcopy(self.proj.enabled_sensors)
robotPropList = self.proj.enabled_actuators + self.proj.all_customs
text = self.proj.specText
LTLspec_env, LTLspec_sys, self.proj.internal_props, internal_sensors, results, responses, traceback = \
_SLURP_SPEC_GENERATOR.generate(text, sensorList, filtered_regions, robotPropList,
self.hsub.executing_config.region_tags)
from ltlbroom.dialog import DialogManager
self.dialogueManager = DialogManager(traceback)
def onSLURPSubmit(self, event): # wxGlade: SimGUI_Frame.<event_handler>
if self.text_ctrl_slurpin.GetValue() == "":
event.Skip()
return
user_text = self.text_ctrl_slurpin.GetValue()
# echo
self.text_ctrl_slurpout.BeginBold()
self.text_ctrl_slurpout.AppendText("User: ")
self.text_ctrl_slurpout.EndBold()
self.text_ctrl_slurpout.AppendText(user_text + "\n")
self.text_ctrl_slurpout.ShowPosition(self.text_ctrl_slurpout.GetLastPosition())
self.text_ctrl_slurpout.Refresh()
self.text_ctrl_slurpin.Clear()
# response
if self.dialogueManager is None:
self.text_ctrl_slurpout.BeginBold()
self.text_ctrl_slurpout.AppendText("Error: Dialogue Manager not initialized")
self.text_ctrl_slurpout.EndBold()
else:
sys_text = self.dialogueManager.tell(user_text, self.currentGoal)
self.text_ctrl_slurpout.BeginBold()
self.text_ctrl_slurpout.AppendText("System: ")
self.text_ctrl_slurpout.EndBold()
self.text_ctrl_slurpout.AppendText(sys_text + "\n")
self.text_ctrl_slurpout.ShowPosition(self.text_ctrl_slurpout.GetLastPosition())
self.text_ctrl_slurpout.Refresh()
event.Skip()
# end of class SimGUI_Frame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
simGUI_Frame = SimGUI_Frame(None, -1, "")
app.SetTopWindow(simGUI_Frame)
simGUI_Frame.Show()
app.MainLoop()
|
wongkaiweng/LTLMoP
|
src/lib/simGUI.py
|
Python
|
gpl-3.0
| 19,795
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clone.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
sigma-geosistemas/clone
|
src/manage.py
|
Python
|
lgpl-3.0
| 248
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import os
import sys
import time
def findfile(start, name):
for relpath, dirs, files in os.walk(start):
if name in files:
full_path = os.path.join(start, relpath, name)
print(os.path.normpath(os.path.abspath(full_path)))
def modified_within(top, seconds):
now = time.time()
for path, dirs, files in os.walk(top):
for name in files:
fullpath = os.path.join(path, name)
if os.path.exists(fullpath):
mtime = os.path.getmtime(fullpath)
if mtime > (now - seconds):
print(fullpath)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('lack arguments')
raise SystemExit(1)
# findfile(sys.argv[1], sys.argv[2])
modified_within(sys.argv[1], float(sys.argv[2]))
|
xu6148152/Binea_Python_Project
|
PythonCookbook/shell_system/find_file.py
|
Python
|
mit
| 867
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# RoundTM - A Round based Tournament Manager
# Copyright (c) 2013 Rémi Alvergnat <toilal.dev@gmail.com>
#
# RoundTM is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# RoundTM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from .events import Event
class Console(object):
"""Controls an Event with console."""
def __init__(self, event):
self._event = event
@property
def next_round(self):
"""Go to next round, using the provided strategy"""
return self._event.next_round()
@property
def ranking(self):
"""Display current ranking"""
i = 1
for stat in self._event.ranking:
print "[%s] %s" % (i, stat)
i += 1
def load(path):
"""Load a project in console mode.
:return: class `Console`, a control object for console usage (IPython)
"""
return Console(Event(path))
|
Toilal/roundtm
|
roundtm/console.py
|
Python
|
lgpl-3.0
| 1,499
|
import os
import pymatbridge as pymat
from pymatbridge.compat import text_type
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestRunCode:
# Start a Matlab session before running any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after running all the tests
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Running 'disp()' in Matlab command window
def test_disp(self):
result1 = self.mlab.run_code("disp('Hello world')")['content']['stdout']
result2 = self.mlab.run_code("disp(' ')")['content']['stdout']
result3 = self.mlab.run_code("disp('')")['content']['stdout']
npt.assert_equal(result1, "Hello world\n")
npt.assert_equal(result2, " \n")
if tu.on_octave():
npt.assert_equal(result3, '\n')
else:
npt.assert_equal(result3, "")
# Make some assignments and run basic operations
def test_basic_operation(self):
result_assignment_a = self.mlab.run_code("a = 21.23452261")['content']['stdout']
result_assignment_b = self.mlab.run_code("b = 347.745")['content']['stdout']
result_sum = self.mlab.run_code("a + b")['content']['stdout']
result_diff = self.mlab.run_code("a - b")['content']['stdout']
result_product = self.mlab.run_code("a * b")['content']['stdout']
result_division = self.mlab.run_code("c = a / b")['content']['stdout']
if tu.on_octave():
npt.assert_equal(result_assignment_a, text_type("a = 21.235\n"))
npt.assert_equal(result_assignment_b, text_type("b = 347.75\n"))
npt.assert_equal(result_sum, text_type("ans = 368.98\n"))
npt.assert_equal(result_diff, text_type("ans = -326.51\n"))
npt.assert_equal(result_product, text_type("ans = 7384.2\n"))
npt.assert_equal(result_division, text_type("c = 0.061063\n"))
else:
npt.assert_equal(result_assignment_a, text_type("\na =\n\n 21.2345\n\n"))
npt.assert_equal(result_assignment_b, text_type("\nb =\n\n 347.7450\n\n"))
npt.assert_equal(result_sum, text_type("\nans =\n\n 368.9795\n\n"))
npt.assert_equal(result_diff, text_type("\nans =\n\n -326.5105\n\n"))
npt.assert_equal(result_product, text_type("\nans =\n\n 7.3842e+03\n\n"))
npt.assert_equal(result_division, text_type("\nc =\n\n 0.0611\n\n"))
# Put in some undefined code
def test_undefined_code(self):
success = self.mlab.run_code("this_is_nonsense")['success']
message = self.mlab.run_code("this_is_nonsense")['content']['stdout']
assert not success
if tu.on_octave():
npt.assert_equal(message, "'this_is_nonsense' undefined near line 1 column 1")
else:
npt.assert_equal(message, "Undefined function or variable 'this_is_nonsense'.")
def test_stack_traces(self):
this_dir = os.path.abspath(os.path.dirname(__file__))
test_file = os.path.join(this_dir, 'test_stack_trace.m')
self.mlab.run_code("addpath('%s')" % this_dir)
response = self.mlab.run_code('test_stack_trace(10)')
npt.assert_equal(response['stack'], [
{'name': 'baz', 'line': 14, 'file': test_file},
{'name': 'bar', 'line': 10, 'file': test_file},
{'name': 'foo', 'line': 6, 'file': test_file},
{'name': 'test_stack_trace', 'line': 2, 'file': test_file}
])
response = self.mlab.run_code('x = 2')
npt.assert_equal(response['stack'], [])
|
arokem/python-matlab-bridge
|
pymatbridge/tests/test_run_code.py
|
Python
|
bsd-2-clause
| 3,685
|
# Given a collection of numbers that might contain duplicates, return all possible unique permutations.
# For example,
# [1,1,2] have the following unique permutations:
# [1,1,2], [1,2,1], and [2,1,1].
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def permuteUnique(self, nums):
if not nums:
return nums
res = {(nums[0],)}
for i in range(1, len(nums)):
tmp = set()
while res:
base = res.pop()
for j in range(len(base)+ 1):
tmp.add(tuple(base[:j]) + (nums[i],) + tuple(base[j:]))
res = tmp
return [ list(t) for t in res ]
# dic = {str([nums[0]]):1}
# res = [[nums[0]]]
# for i in range(1, len(nums)):
# for j in range(len(res)):
# base = res.pop(0)
# dic.pop(str(base))
# for k in range(len(base)+ 1):
# tmp = base[:k] + [nums[i]] + base[k:]
# if str(tmp) not in dic:
# res.append(base[:k] + [nums[i]] + base[k:])
# dic[str(tmp)] = 1
# return res
|
abawchen/leetcode
|
solutions/047_permutations_ii.py
|
Python
|
mit
| 1,205
|
import capstone as _capstone
try:
import unicorn as _unicorn
except ImportError:
_unicorn = None
from .arch import Arch
# TODO: determine proper base register (if it exists)
# TODO: handle multiple return registers?
# TODO: which endianness should be default?
class ArchARM(Arch):
def __init__(self, endness="Iend_LE"):
super(ArchARM, self).__init__(endness)
if endness == 'Iend_BE':
self.function_prologs = {
r"\xe9\x2d[\x00-\xff][\x00-\xff]", # stmfd sp!, {xxxxx}
r"\xe5\x2d\xe0\x04", # push {lr}
}
self.function_epilogs = {
r"\xe8\xbd[\x00-\xff]{2}\xe1\x2f\xff\x1e" # pop {xxx}; bx lr
r"\xe4\x9d\xe0\x04\xe1\x2f\xff\x1e" # pop {xxx}; bx lr
}
# ArchARM will match with any ARM, but ArchARMEL/ArchARMHF is a mismatch
def __eq__(self, other):
# pylint: disable=unidiomatic-typecheck
if not isinstance(other, ArchARM):
return False
if self.memory_endness != other.memory_endness or self.bits != other.bits:
return False
if type(self) is type(other):
return True
if type(self) is ArchARM or type(other) is ArchARM:
return True
return False
def __getstate__(self):
self._cs = None
self._cs_thumb = None
return self.__dict__
def __setstate__(self, data):
self.__dict__.update(data)
@property
def capstone(self):
if self._cs is None:
self._cs = _capstone.Cs(self.cs_arch, self.cs_mode + _capstone.CS_MODE_ARM)
self._cs.detail = True
return self._cs
@property
def capstone_thumb(self):
if self._cs_thumb is None:
self._cs_thumb = _capstone.Cs(self.cs_arch, self.cs_mode + _capstone.CS_MODE_THUMB)
self._cs_thumb.detail = True
return self._cs_thumb
@property
def unicorn(self):
return _unicorn.Uc(self.uc_arch, self.uc_mode + _unicorn.UC_MODE_ARM) if _unicorn is not None else None
@property
def unicorn_thumb(self):
return _unicorn.Uc(self.uc_arch, self.uc_mode + _unicorn.UC_MODE_THUMB) if _unicorn is not None else None
bits = 32
vex_arch = "VexArchARM"
name = "ARMEL"
qemu_name = 'arm'
ida_processor = 'armb'
linux_name = 'arm'
triplet = 'arm-linux-gnueabihf'
max_inst_bytes = 4
ip_offset = 68
sp_offset = 60
bp_offset = 60
ret_offset = 8
vex_conditional_helpers = True
syscall_num_offset = 36
call_pushes_ret = False
stack_change = -4
memory_endness = 'Iend_LE'
register_endness = 'Iend_LE'
sizeof = {'short': 16, 'int': 32, 'long': 32, 'long long': 64}
cs_arch = _capstone.CS_ARCH_ARM
cs_mode = _capstone.CS_MODE_LITTLE_ENDIAN
_cs_thumb = None
uc_arch = _unicorn.UC_ARCH_ARM if _unicorn else None
uc_mode = _unicorn.UC_MODE_LITTLE_ENDIAN if _unicorn else None
uc_const = _unicorn.arm_const if _unicorn else None
uc_prefix = "UC_ARM_" if _unicorn else None
#self.ret_instruction = "\x0E\xF0\xA0\xE1" # this is mov pc, lr
ret_instruction = "\x1E\xFF\x2F\xE1" # this is bx lr
nop_instruction = "\x00\x00\x00\x00"
function_prologs = {
r"[\x00-\xff][\x00-\xff]\x2d\xe9", # stmfd sp!, {xxxxx}
r"\x04\xe0\x2d\xe5", # push {lr}
}
function_epilogs = {
r"[\x00-\xff]{2}\xbd\xe8\x1e\xff\x2f\xe1" # pop {xxx}; bx lr
r"\x04\xe0\x9d\xe4\x1e\xff\x2f\xe1" # pop {xxx}; bx lr
}
instruction_alignment = 4
concretize_unique_registers = {64}
default_register_values = [
( 'sp', Arch.initial_sp, True, 'global' ), # the stack
( 'itstate', 0x00000000, False, None ) # part of the thumb conditional flags
]
entry_register_values = {
'r0': 'ld_destructor'
}
default_symbolic_registers = [ 'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12',
'sp', 'lr', 'pc' ]
register_names = {
8: 'r0',
12: 'r1',
16: 'r2',
20: 'r3',
24: 'r4',
28: 'r5',
32: 'r6',
36: 'r7',
40: 'r8',
44: 'r9',
48: 'r10',
52: 'r11',
56: 'r12',
# stack pointer
60: 'sp',
# link register
64: 'lr',
# program counter
68: 'pc',
# condition stuff
72: 'cc_op',
76: 'cc_dep1',
80: 'cc_dep2',
84: 'cc_ndep',
88: 'qflag32',
92: 'geflag0',
96: 'geflag1',
100: 'geflag2',
104: 'geflag3',
108: 'emnote',
112: 'cmstart',
116: 'cmlen',
120: 'nraddr',
124: 'ip_at_syscall',
128: 'd0',
136: 'd1',
144: 'd2',
152: 'd3',
160: 'd4',
168: 'd5',
176: 'd6',
184: 'd7',
192: 'd8',
200: 'd9',
208: 'd10',
216: 'd11',
224: 'd12',
232: 'd13',
240: 'd14',
248: 'd15',
256: 'd16',
264: 'd17',
272: 'd18',
280: 'd19',
288: 'd20',
296: 'd21',
304: 'd22',
312: 'd23',
320: 'd24',
328: 'd25',
336: 'd26',
344: 'd27',
352: 'd28',
360: 'd29',
368: 'd30',
376: 'd31',
384: 'fpscr',
388: 'tpidruro',
392: 'itstate'
}
registers = {
# GPRs
'r0': (8, 4),
'r1': (12, 4),
'r2': (16, 4),
'r3': (20, 4),
'r4': (24, 4),
'r5': (28, 4),
'r6': (32, 4),
'r7': (36, 4),
'r8': (40, 4),
'r9': (44, 4),
'r10': (48, 4),
'r11': (52, 4),
'r12': (56, 4),
# stack pointer
'sp': (60, 4), 'bp': (60, 4),
'r13': (60, 4),
# link register
'r14': (64, 4),
'lr': (64, 4),
# program counter
'r15': (68, 4),
'pc': (68, 4),
'ip': (68, 4),
# condition stuff
'cc_op': (72, 4),
'cc_dep1': (76, 4),
'cc_dep2': (80, 4),
'cc_ndep': (84, 4),
'qflag32': (88, 4),
'geflag0': (92, 4),
'geflag1': (96, 4),
'geflag2': (100, 4),
'geflag3': (104, 4),
'emnote': (108, 4),
'cmstart': (112, 4),
'cmlen': (116, 4),
'nraddr': (120, 4),
'ip_at_syscall': (124, 4),
'd0': (128, 8),
'd1': (136, 8),
'd2': (144, 8),
'd3': (152, 8),
'd4': (160, 8),
'd5': (168, 8),
'd6': (176, 8),
'd7': (184, 8),
'd8': (192, 8),
'd9': (200, 8),
'd10': (208, 8),
'd11': (216, 8),
'd12': (224, 8),
'd13': (232, 8),
'd14': (240, 8),
'd15': (248, 8),
'd16': (256, 8),
'd17': (264, 8),
'd18': (272, 8),
'd19': (280, 8),
'd20': (288, 8),
'd21': (296, 8),
'd22': (304, 8),
'd23': (312, 8),
'd24': (320, 8),
'd25': (328, 8),
'd26': (336, 8),
'd27': (344, 8),
'd28': (352, 8),
'd29': (360, 8),
'd30': (368, 8),
'd31': (376, 8),
'fpscr': (384, 4),
'tpidruro': (388, 4),
'itstate': (392, 4)
}
argument_registers = {
registers['r0'][0],
registers['r1'][0],
registers['r2'][0],
registers['r3'][0],
registers['r4'][0],
registers['r5'][0],
registers['r6'][0],
registers['r7'][0],
registers['r8'][0],
registers['r9'][0],
registers['r10'][0],
registers['r11'][0],
registers['r12'][0]
}
got_section_name = '.got'
ld_linux_name = 'ld-linux.so.3'
class ArchARMHF(ArchARM):
name = 'ARMHF'
triplet = 'arm-linux-gnueabihf'
ld_linux_name = 'ld-linux-armhf.so.3'
class ArchARMEL(ArchARM):
name = 'ARMEL'
triplet = 'arm-linux-gnueabi'
ld_linux_name = 'ld-linux.so.3'
|
chubbymaggie/archinfo
|
archinfo/arch_arm.py
|
Python
|
bsd-2-clause
| 8,208
|
#!/usr/bin/env python
'''
Copyright (C) 2010 Aurelio A. Heckert, aurium (a) gmail dot com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex
def is_empty(val):
if val is None:
return True
else:
return len(str(val)) == 0
class WebSlicer_Effect(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
def get_slicer_layer(self, force_creation=False):
# Test if webslicer-layer layer existis
layer = self.document.xpath(
'//*[@id="webslicer-layer" and @inkscape:groupmode="layer"]',
namespaces=inkex.NSS)
if len(layer) is 0:
if force_creation:
# Create a new layer
layer = inkex.etree.SubElement(self.document.getroot(), 'g')
layer.set('id', 'webslicer-layer')
layer.set(inkex.addNS('label', 'inkscape'), 'Web Slicer')
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
else:
layer = None
else:
layer = layer[0]
return layer
def get_conf_text_from_list(self, conf_atts):
conf_list = []
for att in conf_atts:
if not is_empty(getattr(self.options, att)):
conf_list.append(
att.replace('_','-') +': '+ str(getattr(self.options, att))
)
return "\n".join( conf_list )
|
mattdangerw/inkscape
|
share/extensions/webslicer_effect.py
|
Python
|
gpl-2.0
| 2,071
|
import sys
import os.path
import pprint
sys.path.append(os.path.abspath(__file__ + "\..\.."))
import windows
import windows.test
import windows.debug
import windows.native_exec.simple_x86 as x86
from windows.generated_def.winstructs import *
class MyDebugger(windows.debug.Debugger):
def __init__(self, *args, **kwargs):
super(MyDebugger, self).__init__(*args, **kwargs)
self.single_step_counter = 0
def on_exception(self, exception):
code = exception.ExceptionRecord.ExceptionCode
addr = exception.ExceptionRecord.ExceptionAddress
print("Got exception {0} at 0x{1:x}".format(code, addr))
def on_single_step(self, exception):
code = exception.ExceptionRecord.ExceptionCode
addr = exception.ExceptionRecord.ExceptionAddress
print("Got single_step {0} at 0x{1:x}".format(code, addr))
self.single_step_counter -= 1
if self.single_step_counter > 0:
return self.single_step()
else:
print("No more single step: exiting")
self.current_process.exit()
class SingleStepOnWrite(windows.debug.MemoryBreakpoint):
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
eip = dbg.current_thread.context.pc
print("Instruction at <{0:#x}> wrote at <{1:#x}>".format(eip, fault_addr))
dbg.single_step_counter = 4
return dbg.single_step()
calc = windows.test.pop_proc_32(dwCreationFlags=DEBUG_PROCESS)
d = MyDebugger(calc)
code = calc.virtual_alloc(0x1000)
data = calc.virtual_alloc(0x1000)
injected = x86.MultipleInstr()
injected += x86.Mov("EAX", 0)
injected += x86.Mov(x86.deref(data), "EAX")
injected += x86.Add("EAX", 4)
injected += x86.Mov(x86.deref(data + 4), "EAX")
injected += x86.Add("EAX", 8)
injected += x86.Mov(x86.deref(data + 8), "EAX")
injected += x86.Nop()
injected += x86.Nop()
injected += x86.Ret()
calc.write_memory(code, injected.get_code())
d.add_bp(SingleStepOnWrite(data, size=8, events="W"))
calc.create_thread(code, 0)
d.loop()
|
hakril/PythonForWindows
|
samples/debug/debugger_membp_singlestep.py
|
Python
|
bsd-3-clause
| 2,140
|
# Variational Bayes for binary logistic regression
# Written by Amazasp Shaumyan
#https://github.com/AmazaspShumik/sklearn-bayes/blob/master/ipython_notebooks_tutorials/linear_models/bayesian_logistic_regression_demo.ipynb
import superimport
#from skbayes.linear_models import EBLogisticRegression,VBLogisticRegression
from bayes_logistic import EBLogisticRegression, VBLogisticRegression
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from scipy import stats
from matplotlib import cm
# create data set
np.random.seed(0)
n_samples = 500
x = np.random.randn(n_samples,2)
x[0:250,0] = x[0:250,0] + 3
x[0:250,1] = x[0:250,1] - 3
y = -1*np.ones(500)
y[0:250] = 1
eblr = EBLogisticRegression(tol_solver = 1e-3)
vblr = VBLogisticRegression()
eblr.fit(x,y)
vblr.fit(x,y)
# create grid for heatmap
n_grid = 500
max_x = np.max(x,axis = 0)
min_x = np.min(x,axis = 0)
X1 = np.linspace(min_x[0],max_x[0],n_grid)
X2 = np.linspace(min_x[1],max_x[1],n_grid)
x1,x2 = np.meshgrid(X1,X2)
Xgrid = np.zeros([n_grid**2,2])
Xgrid[:,0] = np.reshape(x1,(n_grid**2,))
Xgrid[:,1] = np.reshape(x2,(n_grid**2,))
eblr_grid = eblr.predict_proba(Xgrid)[:,1]
vblr_grid = vblr.predict_proba(Xgrid)[:,1]
grids = [eblr_grid, vblr_grid]
lev = np.linspace(0,1,11)
titles = ['Type II Bayesian Logistic Regression', 'Variational Logistic Regression']
for title, grid in zip(titles, grids):
plt.figure(figsize=(8,6))
plt.contourf(X1,X2,np.reshape(grid,(n_grid,n_grid)),
levels = lev,cmap=cm.coolwarm)
plt.plot(x[y==-1,0],x[y==-1,1],"bo", markersize = 3)
plt.plot(x[y==1,0],x[y==1,1],"ro", markersize = 3)
plt.colorbar()
plt.title(title)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
probml/pyprobml
|
scripts/vb_logreg_2d_demo.py
|
Python
|
mit
| 1,811
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import random
import time
from pyquery import PyQuery as pq
from mongodb import db, conn
from requests.exceptions import ConnectionError
from chem_log import log
# urls = [
# 'http://www.sigmaaldrich.com/china-mainland/zh/analytical-chromatography/analytical-chromatography-catalog.html',
# 'http://www.sigmaaldrich.com/china-mainland/chemistry-product.html',
# 'http://www.sigmaaldrich.com/china-mainland/zh/materials-science/material-science-products.html?TablePage=9540636'
# ]
base_url = 'http://www.sigmaaldrich.com'
chromatography_db_collection = {
0: db.sigma_chromatography_urls_0,
1: db.sigma_chromatography_urls_1,
2: db.sigma_chromatography_urls_2,
3: db.sigma_chromatography_urls_3,
4: db.sigma_chromatography_urls_4,
5: db.sigma_chromatography_urls_5,
6: db.sigma_chromatography_urls_6,
7: db.sigma_chromatography_urls_7,
8: db.sigma_chromatography_urls_8,
9: db.sigma_chromatography_urls_9
}
def get_chromatography_base_urls():
"""
分析/色谱, 基本url
:return:
"""
url = 'http://www.sigmaaldrich.com/china-mainland/zh/analytical-chromatography/analytical-chromatography-catalog.html'
res = get_res(url)
if res:
p = pq(res.content)
section = p('div.text.parbase.section').eq(0)
tables = pq(section).find('table.normal')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td a').attr('href')
d = {'url': base_url+href}
db.sigma_chromatography_urls.update(d, d, upsert=True)
def get_chromatography_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(4, 10):
if i == 0:
base_urls = db.sigma_chromatography_urls.find(timeout=False)
else:
base_urls = chromatography_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
chromatography_db_collection[i].update({'url': item}, {'url': item}, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
chromatography_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def extract_li(p):
"""
提取出非具体产品列表页的产品分类url
:param p: pyquery 对象
:return:
"""
url_list = []
uls = p('div.opcContainer table#opcmaintable table ul.opcsectionlist')
for ul in uls:
lis = pq(ul).find('li')
for li in lis:
url_list.append(base_url + pq(li)('a').attr('href'))
return url_list
def chromatography_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
d = {'url': base_url+href}
db.sigma_chromatography_product_urls.update(d, d, upsert=True)
def get_product_detail():
"""
抓取产品详情
:return: None
"""
# urls = db.sigma_chromatography_product_urls.find(timeout=False)
urls = [{'url': 'http://www.sigmaaldrich.com/catalog/product/aldrich/452238?lang=zh®ion=CN'}]
print 2323232
for url in urls:
res = get_res(url['url'])
print 124
if res:
p = pq(res.text)
print p
pro_list = get_pro_list(p) # 获取产品具体规格
def get_pro_list(p):
url = 'http://www.sigmaaldrich.com/catalog/PricingAvailability.do?productNumber=452238&brandKey=ALDRICH&divId=pricingContainerMessage'
container_message = p('div#pricingContainer div#pricingContainerMessage div.product-discontinued li.longMessageContainer').text()
pro_trs = list(p('div#pricingContainer div#pricingContainerMessage table').find('tr'))[1:]
print container_message
print pro_trs
chemistry_db_collection = {
0: db.sigma_chemistry_urls_0,
1: db.sigma_chemistry_urls_1,
2: db.sigma_chemistry_urls_2,
3: db.sigma_chemistry_urls_3,
4: db.sigma_chemistry_urls_4,
5: db.sigma_chemistry_urls_5,
6: db.sigma_chemistry_urls_6,
7: db.sigma_chemistry_urls_7,
8: db.sigma_chemistry_urls_8,
9: db.sigma_chemistry_urls_9
}
def get_chemistry_base_urls():
"""
化学
一级一级获取url,直到最终的产品页面,获取到每个产品详情页的url
:return:
"""
url = 'http://www.sigmaaldrich.com/china-mainland/chemistry-product.html'
res = get_res(url)
if res:
p = pq(res.content)
# print res.content
section = p('#duoamidcol div.sides div.parsys.mainpar div.parbase.section').eq(1)
trs = pq(section)('table').find('tr')
for t in trs:
td_0 = pq(t)('td').eq(0)
td_1 = pq(t)('td').eq(2)
td_2 = pq(t)('td').eq(4)
for td in [td_0, td_1, td_2]:
lis = pq(td)('ul').find('li')
for li in lis:
href = pq(li)('a').attr('href')
if href:
d = {'url': base_url + href}
db.sigma_chemistry_urls.update(d, d, upsert=True)
more = pq(td)('div.one a').attr('href')
if more:
d_1 = {'url': base_url + more}
db.sigma_chemistry_urls.update(d_1, d_1, upsert=True)
def get_chemistry_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(5, 10):
if i == 0:
base_urls = db.sigma_chemistry_urls.find(timeout=False)
else:
base_urls = chemistry_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
d = {'url': item}
chemistry_db_collection[i].update(d, d, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
chemistry_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def chemistry_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
db.sigma_chemistry_product_urls.update({'url': base_url+href}, {'url': base_url+href}, upsert=True)
materials_db_collection = {
0: db.sigma_materials_urls_0,
1: db.sigma_materials_urls_1,
2: db.sigma_materials_urls_2,
3: db.sigma_materials_urls_3,
4: db.sigma_materials_urls_4,
5: db.sigma_materials_urls_5,
6: db.sigma_materials_urls_6,
7: db.sigma_materials_urls_7,
8: db.sigma_materials_urls_8,
9: db.sigma_materials_urls_9
}
def get_materials_urls():
"""
根据基本的url,一步步进入,若不是最终的产品页面,则保存进对应级别的url,否则保存具体产品的url
:return:
"""
# for i in range(10):
for i in range(6, 10):
if i == 0:
base_urls = [{
'url': 'http://www.sigmaaldrich.com/china-mainland/zh/materials-science/material-science-products.html?TablePage=9540636'
}]
else:
base_urls = materials_db_collection[i-1].find(timeout=False)
print base_urls.count(), '\n'
if base_urls:
for url in base_urls:
res = get_res(url['url'])
if res:
if 'Product #' not in res.content:
p = pq(res.text)
url_list = extract_li(p)
for item in url_list:
# 保存进mongodb
materials_db_collection[i].update({'url': item}, {'url': item}, upsert=True)
else:
# 是具体产品页面,保存url进具体产品url表
materials_extract_product_url(i, pq(res.content))
conn.close()
else:
conn.close()
break
def materials_extract_product_url(i, p):
"""
获取产品列表页的产品url
:param p: pyquery对象
:return:
"""
tables = p('table.opcTable')
for t in tables:
trs = pq(t)('tbody').find('tr')
for tr in trs:
href = pq(tr)('td:first a').attr('href')
if href:
d = {'url': base_url+href}
db.sigma_materials_product_urls.update(d, d, upsert=True)
def get_res(url):
"""
使用requests获取结果
:param url:
:return:
"""
try:
requests.adapters.DEFAULT_RETRIES = 5
res = requests.get(url)
time.sleep(random.randint(0, 3))
if res.status_code == 200:
return res
return None
except Exception, e:
time.sleep(20)
log.debug(str(e) + ' error')
return None
|
mutoulbj/chem_spider
|
chem_spider/sigma_urls.py
|
Python
|
mit
| 10,348
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Security order client"""
from eclcli.api import eclsdk
DEFAULT_API_VERSION = '2'
API_VERSION_OPTION = ''
API_NAME = 'security_order'
API_VERSIONS = {
'2': 'eclcli.security_order.v2',
}
def make_client(instance):
"""Returns a security order client."""
client = eclsdk.ConnectionManager()
return client
def build_option_parser(parser):
"""Hook to add global options"""
return parser
|
nttcom/eclcli
|
eclcli/security_order/client.py
|
Python
|
apache-2.0
| 1,013
|
#!/usr/bin/python
# Copyright 2015 Huawei Devices USA Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors:
# Chuk Orakwue <chuk.orakwue@huawei.com>
import re
from ftrace.common import ParserError
from .register import register_parser
from collections import namedtuple
#from ftrace.third_party.cnamedtuple import namedtuple
TRACEPOINT = 'workqueue_activate_work'
__all__ = [TRACEPOINT]
WorkqueueQueueActivateWorkBase = namedtuple(TRACEPOINT,
[
'work_struct',
]
)
class WorkqueueQueueActivateWork(WorkqueueQueueActivateWorkBase):
__slots__ = ()
def __new__(cls, work_struct):
return super(cls, WorkqueueQueueActivateWork).__new__(
cls,
work_struct=work_struct
)
workqueue_activate_work_pattern = re.compile(
r"""
work struct (?P<work_struct>.+)
""",
re.X|re.M
)
@register_parser
def workqueue_activate_work(payload):
"""Parser for `workqueue_activate_work` tracepoint"""
try:
match = re.match(workqueue_activate_work_pattern, payload)
if match:
match_group_dict = match.groupdict()
return WorkqueueQueueActivateWork(**match_group_dict)
except Exception, e:
raise ParserError(e.message)
|
corakwue/ftrace
|
ftrace/parsers/workqueue_activate_work.py
|
Python
|
apache-2.0
| 1,803
|
import _plotly_utils.basevalidators
class ValuesuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="valuesuffix", parent_name="sankey", **kwargs):
super(ValuesuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sankey/_valuesuffix.py
|
Python
|
mit
| 453
|
# solution for http://rosalind.info/problems/grph/
import re
Strings = {}
Prefixes = {}
Suffixes = {}
index = ""
f = open('fasta.txt', 'r')
for line in f:
match = re.match(r">", line)
#match = str(line).find(">")
if match:
index = line.strip()
index = index[1:len(index)]
Strings[index] = ""
Prefixes[index] = ""
Suffixes[index] = ""
else:
Strings[index] = Strings[index] + line.strip()
Prefixes[index] = Strings[index][:3]
Suffixes[index] = Strings[index][-3:]
for v, i in enumerate(Strings):
for v2, i2 in enumerate(Strings):
if Prefixes[i] == Suffixes[i2]:
if i != i2:
print i2, i
|
guylaor/rosalind
|
strings/OverlapGraphs.py
|
Python
|
mpl-2.0
| 643
|
from setuptools import setup, find_packages
version = "0.2.0"
requires = [
"pyapi-gitlab==6.2.1",
]
with open("README.rst") as f:
readme = f.read()
setup(
name="git-lab",
version=version,
description="sub-command of git for access to gitlab",
long_description=readme,
author="kamekoopa",
author_email="hogehugo@gmail.com",
url="https://github.com/kamekoopa/git-lab",
packages=find_packages(),
scripts=["git-lab"],
license="Apache 2.0",
install_requires=requires,
)
|
kamekoopa/git-lab
|
setup.py
|
Python
|
apache-2.0
| 521
|
import sys
import socket
import string
import os
import traceback
try:
import irc.bot
except ImportError:
raise ImportError("Requires irclib; pip install irc")
from giotto.controllers import GiottoController
from giotto.exceptions import ProgramNotFound
from giotto.utils import parse_kwargs
irc_execution_snippet = """
parser = argparse.ArgumentParser(description='Giotto IRC Controller')
parser.add_argument('--model-mock', action='store_true', help='Mock out the model')
args = parser.parse_args()
config = {
'host': '',
'port': 6667,
'nick': '',
'ident': 'giotto',
'realname': 'Giotto IRC Bot',
'owner': '',
'channels': '', # comma seperated
'magic_token': '!giotto ',
}
from giotto.controllers.irc_ import listen
listen(manifest, config, model_mock=args.model_mock)"""
class IRCController(GiottoController):
name = 'irc'
default_mimetype = 'text/x-irc'
def get_invocation(self):
return self.request.program
def get_controller_name(self):
return 'irc'
def get_raw_data(self):
kwargs = self.request.args
return parse_kwargs(kwargs)
def get_concrete_response(self):
try:
result = self.get_data_response()
except ProgramNotFound:
result = {'body': "Program not found"}
# convert to a format appropriate to the IRC Response api.
return dict(
response=result['body'],
say_to=self.request.sent_to,
)
def get_primitive(self, primitive):
if primitive == 'RAW_PAYLOAD':
return self.get_data()
class IRCRequest(object):
# the program name requested
program = ''
# the usr/channel the message was sent to
sent_to = ''
# PRIVMSG or whatever else...
msg_type = ''
# The message after the magic token has been removed
# eg: !giotto multiply --x=1 --y=2 --> multiply --x=1 --y=2
# note, invocations given through private message have no magic token
# so this value will be the same as `message`
message_token_removed = ''
# the raw message with the magic token still attached
raw_message = ''
# boolean, was this request sent to a channel (True) or through private msg?
channel_msg = None
# opposite of `channel_msg`
private_msg = None
# the username of the person who made the request
username = ''
# the ident of the user who made the request
ident = ''
def __init__(self, event, magic_token, nick):
self.ident = event.source
self.username = self.ident.split("!")[0]
self.msg_type = event.type
self.sent_to = event.target
self.private_message = self.msg_type == "privmsg"
self.raw_message = event.arguments[0]
self.program, self.args = self.get_program_and_args(self.raw_message,magic_token)
def get_program_and_args(self, message, magic_token):
if self.private_message == True:
program = message.split()[0]
args = message.split()[1:]
else:
# channel invocationa
l = len(magic_token)
parsed_message = message[l:]
args = parsed_message.split()[1:]
program = parsed_message.split()[0]
return program, args
def __repr__(self):
return "program: %s, args: %s" % (self.program, self.args)
class IrcBot(irc.bot.SingleServerIRCBot):
def __init__(self, config):
if not config['host']:
raise SystemExit('Error: IRC controller needs to be configured with a hostname')
if not config['nick']:
raise SystemExit('Error: IRC controller needs to be configured with a nick')
print("Connecting to %s:%s as %s" % (config['host'],config['port'], config['nick']))
irc.bot.SingleServerIRCBot.__init__(
self,
[(config['host'],config['port'])],
config['nick'],
config['realname']
)
channels = config['channels']
if channels:
self.channel = channels
print("Joining Channels: %s" % channels)
self.config = config
def on_nicknameinuse(self, connection, event):
connection.nick(connection.get_nickname + "_")
def on_welcome(self, connection, event):
connection.join(self.channel)
def on_privmsg(self, connection, event):
self.process_message(connection, event)
def on_pubmsg(self, connection, event):
if event.arguments[0].startswith(self.config['magic_token']) == False:
return
self.process_message(connection, event)
def process_message(self, connection, event):
request = IRCRequest(
event,
self.config['magic_token'],
connection.get_nickname()
)
try:
controller = IRCController(request, self.config['manifest'], self.config['model_mock'])
result = controller.get_response()
except Exception as exc:
cls = exc.__class__.__name__
connection.privmsg(request.sent_to, "\x0304%s - %s: %s" % (request.program, cls, exc))
traceback.print_exc(file=sys.stdout)
else:
msg = "%s: %s" % (request.username, result['response'])
if request.private_message:
for m in msg.split('\n'):
connection.privmsg(request.username, m)
else:
for m in msg.split('\n'):
connection.privmsg(request.sent_to, m)
def listen(manifest, config, model_mock=False):
"""
IRC listening process.
"""
config['manifest'] = manifest
config['model_mock'] = model_mock
IRC = IrcBot(config)
try:
IRC.start()
except KeyboardInterrupt:
pass
|
priestc/giotto
|
giotto/controllers/irc_.py
|
Python
|
bsd-2-clause
| 5,822
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
from webnotes import msgprint, _
import json
from webnotes.utils import flt, cstr, nowdate, add_days, cint
from webnotes.defaults import get_global_default
from webnotes.utils.email_lib import sendmail
class UserNotAllowedForWarehouse(webnotes.ValidationError): pass
class InvalidWarehouseCompany(webnotes.ValidationError): pass
def get_stock_balance_on(warehouse, posting_date=None):
if not posting_date: posting_date = nowdate()
stock_ledger_entries = webnotes.conn.sql("""
SELECT
item_code, stock_value
FROM
`tabStock Ledger Entry`
WHERE
warehouse=%s AND posting_date <= %s
ORDER BY timestamp(posting_date, posting_time) DESC, name DESC
""", (warehouse, posting_date), as_dict=1)
sle_map = {}
for sle in stock_ledger_entries:
sle_map.setdefault(sle.item_code, flt(sle.stock_value))
return sum(sle_map.values())
def get_latest_stock_balance():
bin_map = {}
for d in webnotes.conn.sql("""SELECT item_code, warehouse, stock_value as stock_value
FROM tabBin""", as_dict=1):
bin_map.setdefault(d.warehouse, {}).setdefault(d.item_code, flt(d.stock_value))
return bin_map
def get_bin(item_code, warehouse):
bin = webnotes.conn.get_value("Bin", {"item_code": item_code, "warehouse": warehouse})
if not bin:
bin_wrapper = webnotes.bean([{
"doctype": "Bin",
"item_code": item_code,
"warehouse": warehouse,
}])
bin_wrapper.ignore_permissions = 1
bin_wrapper.insert()
bin_obj = bin_wrapper.make_controller()
else:
from webnotes.model.code import get_obj
bin_obj = get_obj('Bin', bin)
return bin_obj
def update_bin(args):
is_stock_item = webnotes.conn.get_value('Item', args.get("item_code"), 'is_stock_item')
if is_stock_item == 'Yes':
bin = get_bin(args.get("item_code"), args.get("warehouse"))
#webnotes.errprint(['in update bin',args])
bin.update_stock(args)
#webnotes.errprint(['bin',bin])
return bin
else:
msgprint("[Stock Update] Ignored %s since it is not a stock item"
% args.get("item_code"))
def validate_end_of_life(item_code, end_of_life=None, verbose=1):
if not end_of_life:
end_of_life = webnotes.conn.get_value("Item", item_code, "end_of_life")
from webnotes.utils import getdate, now_datetime, formatdate
if end_of_life and getdate(end_of_life) <= now_datetime().date():
msg = (_("Item") + " %(item_code)s: " + _("reached its end of life on") + \
" %(date)s. " + _("Please check") + ": %(end_of_life_label)s " + \
"in Item master") % {
"item_code": item_code,
"date": formatdate(end_of_life),
"end_of_life_label": webnotes.get_doctype("Item").get_label("end_of_life")
}
_msgprint(msg, verbose)
def validate_is_stock_item(item_code, is_stock_item=None, verbose=1):
if not is_stock_item:
is_stock_item = webnotes.conn.get_value("Item", item_code, "is_stock_item")
if is_stock_item != "Yes":
msg = (_("Item") + " %(item_code)s: " + _("is not a Stock Item")) % {
"item_code": item_code,
}
_msgprint(msg, verbose)
def validate_cancelled_item(item_code, docstatus=None, verbose=1):
if docstatus is None:
docstatus = webnotes.conn.get_value("Item", item_code, "docstatus")
if docstatus == 2:
msg = (_("Item") + " %(item_code)s: " + _("is a cancelled Item")) % {
"item_code": item_code,
}
_msgprint(msg, verbose)
def _msgprint(msg, verbose):
if verbose:
msgprint(msg, raise_exception=True)
else:
raise webnotes.ValidationError, msg
def get_incoming_rate(args):
"""Get Incoming Rate based on valuation method"""
from stock.stock_ledger import get_previous_sle
in_rate = 0
if args.get("serial_no"):
in_rate = get_avg_purchase_rate(args.get("serial_no"))
elif args.get("bom_no"):
result = webnotes.conn.sql("""select ifnull(total_cost, 0) / ifnull(quantity, 1)
from `tabBOM` where name = %s and docstatus=1 and is_active=1""", args.get("bom_no"))
in_rate = result and flt(result[0][0]) or 0
else:
valuation_method = get_valuation_method(args.get("item_code"))
previous_sle = get_previous_sle(args)
if valuation_method == 'FIFO':
if not previous_sle:
return 0.0
previous_stock_queue = json.loads(previous_sle.get('stock_queue', '[]') or '[]')
in_rate = previous_stock_queue and \
get_fifo_rate(previous_stock_queue, args.get("qty") or 0) or 0
elif valuation_method == 'Moving Average':
in_rate = previous_sle.get('valuation_rate') or 0
return in_rate
def get_avg_purchase_rate(serial_nos):
"""get average value of serial numbers"""
serial_nos = get_valid_serial_nos(serial_nos)
return flt(webnotes.conn.sql("""select avg(ifnull(purchase_rate, 0)) from `tabSerial No`
where name in (%s)""" % ", ".join(["%s"] * len(serial_nos)),
tuple(serial_nos))[0][0])
def get_valuation_method(item_code):
"""get valuation method from item or default"""
val_method = webnotes.conn.get_value('Item', item_code, 'valuation_method')
if not val_method:
val_method = get_global_default('valuation_method') or "FIFO"
return val_method
def get_fifo_rate(previous_stock_queue, qty):
"""get FIFO (average) Rate from Queue"""
if qty >= 0:
total = sum(f[0] for f in previous_stock_queue)
return total and sum(f[0] * f[1] for f in previous_stock_queue) / flt(total) or 0.0
else:
outgoing_cost = 0
qty_to_pop = abs(qty)
while qty_to_pop and previous_stock_queue:
batch = previous_stock_queue[0]
if 0 < batch[0] <= qty_to_pop:
# if batch qty > 0
# not enough or exactly same qty in current batch, clear batch
outgoing_cost += flt(batch[0]) * flt(batch[1])
qty_to_pop -= batch[0]
previous_stock_queue.pop(0)
else:
# all from current batch
outgoing_cost += flt(qty_to_pop) * flt(batch[1])
batch[0] -= qty_to_pop
qty_to_pop = 0
# if queue gets blank and qty_to_pop remaining, get average rate of full queue
return outgoing_cost / abs(qty) - qty_to_pop
def get_valid_serial_nos(sr_nos, qty=0, item_code=''):
"""split serial nos, validate and return list of valid serial nos"""
# TODO: remove duplicates in client side
serial_nos = cstr(sr_nos).strip().replace(',', '\n').split('\n')
valid_serial_nos = []
for val in serial_nos:
if val:
val = val.strip()
if val in valid_serial_nos:
msgprint("You have entered duplicate serial no: '%s'" % val, raise_exception=1)
else:
valid_serial_nos.append(val)
if qty and len(valid_serial_nos) != abs(qty):
msgprint("Please enter serial nos for "
+ cstr(abs(qty)) + " quantity against item code: " + item_code,
raise_exception=1)
return valid_serial_nos
def get_warehouse_list(doctype, txt, searchfield, start, page_len, filters):
"""used in search queries"""
wlist = []
for w in webnotes.conn.sql_list("""select name from tabWarehouse
where name like '%%%s%%'""" % txt):
if webnotes.session.user=="Administrator":
wlist.append([w])
else:
warehouse_users = webnotes.conn.sql_list("""select user from `tabWarehouse User`
where parent=%s""", w)
if not warehouse_users:
wlist.append([w])
elif webnotes.session.user in warehouse_users:
wlist.append([w])
return wlist
def validate_warehouse_user(warehouse):
if webnotes.session.user=="Administrator" or not warehouse:
return
warehouse_users = [p[0] for p in webnotes.conn.sql("""select user from `tabWarehouse User`
where parent=%s""", warehouse)]
if warehouse_users and not (webnotes.session.user in warehouse_users):
webnotes.throw(_("Not allowed entry in Warehouse") \
+ ": " + warehouse, UserNotAllowedForWarehouse)
def validate_warehouse_company(warehouse, company):
warehouse_company = webnotes.conn.get_value("Warehouse", warehouse, "company")
if warehouse_company and warehouse_company != company:
webnotes.msgprint(_("Warehouse does not belong to company.") + " (" + \
warehouse + ", " + company +")", raise_exception=InvalidWarehouseCompany)
def get_sales_bom_buying_amount(item_code, warehouse, voucher_type, voucher_no, voucher_detail_no,
stock_ledger_entries, item_sales_bom):
# sales bom item
buying_amount = 0.0
for bom_item in item_sales_bom[item_code]:
if bom_item.get("parent_detail_docname")==voucher_detail_no:
buying_amount += get_buying_amount(voucher_type, voucher_no, voucher_detail_no,
stock_ledger_entries.get((bom_item.item_code, warehouse), []))
return buying_amount
def get_buying_amount(voucher_type, voucher_no, item_row, stock_ledger_entries):
# IMP NOTE
# stock_ledger_entries should already be filtered by item_code and warehouse and
# sorted by posting_date desc, posting_time desc
for i, sle in enumerate(stock_ledger_entries):
if sle.voucher_type == voucher_type and sle.voucher_no == voucher_no and \
sle.voucher_detail_no == item_row:
previous_stock_value = len(stock_ledger_entries) > i+1 and \
flt(stock_ledger_entries[i+1].stock_value) or 0.0
buying_amount = previous_stock_value - flt(sle.stock_value)
return buying_amount
return 0.0
def reorder_item():
""" Reorder item if stock reaches reorder level"""
if getattr(webnotes.local, "auto_indent", None) is None:
webnotes.local.auto_indent = cint(webnotes.conn.get_value('Stock Settings', None, 'auto_indent'))
if webnotes.local.auto_indent:
material_requests = {}
bin_list = webnotes.conn.sql("""select item_code, warehouse, projected_qty
from tabBin where ifnull(item_code, '') != '' and ifnull(warehouse, '') != ''
and exists (select name from `tabItem`
where `tabItem`.name = `tabBin`.item_code and
is_stock_item='Yes' and (is_purchase_item='Yes' or is_sub_contracted_item='Yes') and
(ifnull(end_of_life, '')='' or end_of_life > now()))""", as_dict=True)
for bin in bin_list:
#check if re-order is required
item_reorder = webnotes.conn.get("Item Reorder",
{"parent": bin.item_code, "warehouse": bin.warehouse})
if item_reorder:
reorder_level = item_reorder.warehouse_reorder_level
reorder_qty = item_reorder.warehouse_reorder_qty
material_request_type = item_reorder.material_request_type or "Purchase"
else:
reorder_level, reorder_qty = webnotes.conn.get_value("Item", bin.item_code,
["re_order_level", "re_order_qty"])
material_request_type = "Purchase"
if flt(reorder_level) and flt(bin.projected_qty) < flt(reorder_level):
if flt(reorder_level) - flt(bin.projected_qty) > flt(reorder_qty):
reorder_qty = flt(reorder_level) - flt(bin.projected_qty)
company = webnotes.conn.get_value("Warehouse", bin.warehouse, "company") or \
webnotes.defaults.get_defaults()["company"] or \
webnotes.conn.sql("""select name from tabCompany limit 1""")[0][0]
material_requests.setdefault(material_request_type, webnotes._dict()).setdefault(
company, []).append(webnotes._dict({
"item_code": bin.item_code,
"warehouse": bin.warehouse,
"reorder_qty": reorder_qty
})
)
create_material_request(material_requests)
def create_material_request(material_requests):
""" Create indent on reaching reorder level """
mr_list = []
defaults = webnotes.defaults.get_defaults()
exceptions_list = []
from accounts.utils import get_fiscal_year
current_fiscal_year = get_fiscal_year(nowdate())[0] or defaults.fiscal_year
for request_type in material_requests:
for company in material_requests[request_type]:
try:
items = material_requests[request_type][company]
if not items:
continue
mr = [{
"doctype": "Material Request",
"company": company,
"fiscal_year": current_fiscal_year,
"transaction_date": nowdate(),
"material_request_type": request_type
}]
for d in items:
item = webnotes.doc("Item", d.item_code)
mr.append({
"doctype": "Material Request Item",
"parenttype": "Material Request",
"parentfield": "indent_details",
"item_code": d.item_code,
"schedule_date": add_days(nowdate(),cint(item.lead_time_days)),
"uom": item.stock_uom,
"warehouse": d.warehouse,
"item_name": item.item_name,
"description": item.description,
"item_group": item.item_group,
"qty": d.reorder_qty,
"brand": item.brand,
})
mr_bean = webnotes.bean(mr)
mr_bean.insert()
mr_bean.submit()
mr_list.append(mr_bean)
except:
if webnotes.local.message_log:
exceptions_list.append([] + webnotes.local.message_log)
webnotes.local.message_log = []
else:
exceptions_list.append(webnotes.getTraceback())
if mr_list:
if getattr(webnotes.local, "reorder_email_notify", None) is None:
webnotes.local.reorder_email_notify = cint(webnotes.conn.get_value('Stock Settings', None,
'reorder_email_notify'))
if(webnotes.local.reorder_email_notify):
send_email_notification(mr_list)
if exceptions_list:
notify_errors(exceptions_list)
def send_email_notification(mr_list):
""" Notify user about auto creation of indent"""
email_list = webnotes.conn.sql_list("""select distinct r.parent
from tabUserRole r, tabProfile p
where p.name = r.parent and p.enabled = 1 and p.docstatus < 2
and r.role in ('Purchase Manager','Material Manager')
and p.name not in ('Administrator', 'All', 'Guest')""")
msg="""<h3>Following Material Requests has been raised automatically \
based on item reorder level:</h3>"""
for mr in mr_list:
msg += "<p><b><u>" + mr.doc.name + """</u></b></p><table class='table table-bordered'><tr>
<th>Item Code</th><th>Warehouse</th><th>Qty</th><th>UOM</th></tr>"""
for item in mr.doclist.get({"parentfield": "indent_details"}):
msg += "<tr><td>" + item.item_code + "</td><td>" + item.warehouse + "</td><td>" + \
cstr(item.qty) + "</td><td>" + cstr(item.uom) + "</td></tr>"
msg += "</table>"
sendmail(email_list, subject='Auto Material Request Generation Notification', msg = msg)
def notify_errors(exceptions_list):
subject = "[Important] [ERPNext] Error(s) while creating Material Requests based on Re-order Levels"
msg = """Dear System Manager,
An error occured for certain Items while creating Material Requests based on Re-order level.
Please rectify these issues:
---
%s
---
Regards,
Administrator""" % ("\n\n".join(["\n".join(msg) for msg in exceptions_list]),)
from webnotes.profile import get_system_managers
sendmail(get_system_managers(), subject=subject, msg=msg)
|
Tejal011089/med2-app
|
stock/utils.py
|
Python
|
agpl-3.0
| 14,450
|
from __future__ import division
'''
consensus.py
This is the collation and aggregation code for the Radio Galaxy Zoo project. The main
intent of the code is to take in the raw classifications generated by Ouroboros
(as a MongoDB file), combine classifications by independent users, and generate
a single consensus answer. The results are stored in both MongoDB and as a
set of static CSV output files.
Originally developed by Kyle Willett (University of Minnesota), 2014-2016.
'''
# Local RGZ modules
import collinearity
from load_contours import get_contours,make_pathdict
# Packges (installed by default with Python)
import datetime
import operator
from collections import Counter
import cStringIO
import urllib
import json
import os.path
import time
import shutil
import logging
# Other packages (may need to install separately)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.ndimage.filters import maximum_filter
from scipy import stats
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.linalg.basic import LinAlgError
from astropy.io import fits
from pymongo import ASCENDING
from pymongo import MongoClient
from PIL import Image
# Load data from MongoDB; collections are used in almost every module, so make them global variables.
client = MongoClient('localhost', 27017)
db = client['radio']
# Select which version of the catalog to use
version = '_2019-05-06'
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
consensus = db['consensus{}'.format(version)] # consensus = output of this program
user_weights = db['user_weights{}'.format(version)]
logfile = 'consensus{}.log'.format(version)
# Parameters for the RGZ project
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
# Different image scales and sizes, depending on the survey being analyzed
img_params = {
'first':{
'IMG_HEIGHT_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the y axis
'IMG_WIDTH_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the x axis
'IMG_HEIGHT_NEW':500.0 , # number of pixels in the downloaded JPG image along the y axis
'IMG_WIDTH_NEW':500.0 , # number of pixels in the downloaded JPG image along the x axis
'FITS_HEIGHT':132.0 , # number of pixels in the FITS image along the y axis (radio only)
'FITS_WIDTH':132.0 , # number of pixels in the FITS image along the y axis (radio only)
'PIXEL_SIZE':1.3748 # the number of arcseconds per pixel in the radio FITS image (from CDELT1)
},
'atlas':{
'IMG_HEIGHT_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the y axis
'IMG_WIDTH_OLD':424.0 , # number of pixels in the IR coordinate system (as recorded in Mongo) along the x axis
'IMG_HEIGHT_NEW':500.0 , # number of pixels in the downloaded PNG image along the y axis
'IMG_WIDTH_NEW':500.0 , # number of pixels in the downloaded PNG image along the x axis
'FITS_HEIGHT':201.0 , # number of pixels in the FITS image along the y axis (both IR and radio)
'FITS_WIDTH':201.0 , # number of pixels in the FITS image along the x axis (both IR and radio)
'PIXEL_SIZE':0.6000 # the number of arcseconds per pixel in the radio FITS image (from CDELT1)
}
}
# These fields are annoyingly stored in the same raw data structure as the actual
# annotations of the image. They're removed from all analysis.
bad_keys = ('finished_at','started_at','user_agent','lang','pending')
# Local directory paths. Add paths below depending on your local source for:
# - raw FITS images of radio data (data_path)
# - PNG images of radio and IR subjects (data_path)
# - contour data for radio flux (data_path)
# - your current working directory (rgz_path)
def determine_paths(paths):
found_path = False
for path in paths:
if os.path.exists(path):
found_path = True
return path
if found_path == False:
print "Unable to find the hardcoded local path:"
print paths
return None
rgz_path = determine_paths(('/data/tabernacle/larry/RGZdata/rgz-analysis','/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'))
#rgz_path = '/home/garon/Documents/RGZdata/rgz-analysis'
data_path = determine_paths(('/data/tabernacle/larry/RGZdata/rawdata','/data/extragal/willett','/Volumes/REISEPASS','/Volumes/3TB'))
plot_path = "{0}/rgz/plots".format(data_path)
pathdict = make_pathdict()
########################################
# Begin the actual code
########################################
def checksum(zid,experts_only=False,excluded=[],no_anonymous=False,include_peak_data=True,weights=0,scheme='scaling'):
# Find the consensus for all users who have classified a subject
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
survey = sub['metadata']['survey']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}
# Only get the consensus classification for the science team members
if experts_only:
class_params['expert'] = True
# If comparing a particular volunteer (such as an expert), don't include self-comparison
if len(excluded) > 0:
class_params['user_name'] = {"$nin":excluded}
# To exclude anonymous classifications (registered users only):
if no_anonymous:
if class_params.has_key('user_name'):
class_params['user_name']["$exists"] = True
else:
class_params['user_name'] = {"$exists":True}
_c = classifications.find(class_params)
# Empty dicts and lists
cdict = {}
unique_users = set()
clen_start = 0
clist_all = []
listcount = []
# Compute the most popular combination for each NUMBER of galaxies identified in image
for c in _c:
clist_all.append(c)
clen_start += 1
# Skip classification if they already did one. This assumes the latest classification
# is always the best (or at least the one that will be recorded here).
try:
user_name = c['user_name']
except KeyError:
user_name = 'Anonymous'
# Check the answer, as long as they haven't already done one.
if user_name not in unique_users or user_name is 'Anonymous':
unique_users.add(user_name)
listcount.append(True)
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if (x.keys()[0] not in bad_keys)]
n_galaxies = len(goodann)
# There must be at least one galaxy!
if n_galaxies > 0:
for idx,ann in enumerate(goodann):
xmaxlist = []
try:
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
except KeyError:
# No radio data for this classification
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together
# as a crude hash. This is not an ideal method and is potentially
# subject to rounding errors - could be significantly improved.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = sum(sumlist)
else:
# No galaxies in this classification
checksum = -99
c['checksum'] = checksum
c['n_galaxies'] = n_galaxies
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
else:
listcount.append(False)
# Remove duplicates and classifications for "No Object"
clist = [c for lc,c in zip(listcount,clist_all) if lc and c['checksum'] != -99]
clist_debugged = []
for ix, c in enumerate(clist):
if ix and 'user_name' not in c and 'user_name' not in clist[ix-1]:
c0 = clist[ix-1]
if ([anno for anno in c['annotations'] if 'ir' in anno] != [anno for anno in c0['annotations'] if 'ir' in anno]) or \
(abs(c['created_at']-c0['created_at']).seconds > 30):
clist_debugged.append(c)
else:
cdict[c['n_galaxies']].remove(c['checksum'])
else:
clist_debugged.append(c)
clist = clist_debugged
# Implement the weighting scheme, if desired. Simply add duplicate classifications
# for users who have been upweighted based on their agreement with the science team
# on gold-standard subjects.
if weights > 0:
weighted_c = []
for c in clist:
if c.has_key('user_name'):
try:
weight = user_weights.find_one({'user_name':c['user_name']})['weight']
except TypeError:
weight = 0
if scheme == 'threshold' and weight == 1:
for i in range(weights):
weighted_c.append(c)
cdict[c['n_galaxies']].append(c['checksum'])
elif scheme == 'scaling' and weight > 0:
for i in range(weight):
weighted_c.append(c)
cdict[c['n_galaxies']].append(c['checksum'])
if len(weighted_c) > 0:
clist.extend(weighted_c)
maxval=0
mc_checksum = 0.
# Find the number of sources in the image that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Get a galaxy that matches the checksum so we can record the annotation data
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Necessary for objects like ARG0003par;
# one classifier recorded 22 "No IR","No Contours" in a short space.
# Shouldn't happen (some sort of system glitch), but catch it if it does.
print 'No non-zero classifications recorded for {0}'.format(zid)
logging.info('No non-zero classifications recorded for {0}'.format(zid))
return None
# Get the annotation data for galaxies that match the consensus
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['source'] = sub['metadata']['source']
cons['survey'] = survey
ir_x,ir_y = {},{}
cons['answer'] = {}
cons['n_votes'] = maxval
cons['n_total'] = len(clist)
# This will be where we store the consensus parameters
answer = cons['answer']
# Loop over the annotations and record the parameters of the bounding boxes
for k,gal in enumerate(goodann):
xmax_temp = []
bbox_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
bbox_temp.append((v['xmax'],v['ymax'],v['xmin'],v['ymin']))
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
answer[checksum2]['bbox'] = bbox_temp
except KeyError:
print gal, zid
logging.warning((gal, zid))
except AttributeError:
print 'No Sources, No IR recorded for {0}'.format(zid)
logging.warning('No Sources, No IR recorded for {0}'.format(zid))
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over all sets of classifications to get their IR counterparts
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
try:
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source if there is more than one.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
except KeyError:
print '"No radio" still appearing as valid consensus option.'
logging.warning('"No radio" still appearing as valid consensus option.')
# Perform a kernel density estimate on the data for each galaxy to find the IR peak (in pixel coordinates)
scale_ir = img_params[survey]['IMG_HEIGHT_NEW'] * 1./img_params[survey]['IMG_HEIGHT_OLD']
peak_data = []
# Remove any empty IR peaks recorded
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
ir_x.pop(xk)
if len(yv) == 0:
ir_y.pop(yk)
# Make sure that we have the same number of points for the x- and y-coordinates of the IR peaks
assert len(ir_x) == len(ir_y),'Lengths of ir_x ({0:d}) and ir_y ({1:d}) are not the same'.format(len(ir_x),len(ir_y))
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
irx
pd = {}
# Convert into the same scale as the radio coordinates
x_exists = [xt * scale_ir for xt in xv if xt != -99.0]
y_exists = [yt * scale_ir for yt in yv if yt != -99.0]
x_all = [xt * scale_ir for xt in xv]
y_all = [yt * scale_ir for yt in yv]
# Find the most common IR coordinate. We want to skip the next steps
# if they said there was no IR counterpart (-99,-99)
ir_Counter = Counter([(xx,yy) for xx,yy in zip(xv,yv)])
most_common_ir = ir_Counter.most_common(1)[0][0]
xmin = 1.
xmax = img_params[survey]['IMG_HEIGHT_NEW']
ymin = 1.
ymax = img_params[survey]['IMG_WIDTH_NEW']
# Check if there are enough IR points to attempt a kernel density estimate
if len(Counter(x_exists)) > 2 and len(Counter(y_exists)) > 2 and most_common_ir != (-99,-99):
# X,Y = grid of uniform coordinates over the IR pixel plane
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
try:
values = np.vstack([x_exists, y_exists])
except ValueError:
# Breaks on the tutorial subject. Find out why len(x) != len(y)
print zid
print 'Length of IR x array: {0:d}; Length of IR y array: {1:d}'.format(len(x_exists),len(y_exists))
logging.warning((zid, 'Length of IR x array: {0:d}; Length of IR y array: {1:d}'.format(len(x_exists),len(y_exists))))
try:
# Compute the kernel density estimate
kernel = stats.gaussian_kde(values)
except LinAlgError:
print 'LinAlgError in KD estimation for {0}'.format(zid,x_exists,y_exists)
logging.warning('LinAlgError in KD estimation for {0}'.format(zid,x_exists,y_exists))
for k,v in answer.iteritems():
if v['ind'] == xk:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
continue
# Even if there are more than 2 sets of points, if they are mutually co-linear,
# matrix can't invert and kernel returns NaNs.
kp = kernel(positions)
# Check to see if there are NaNs in the kernel (usually a sign of co-linear points).
if np.isnan(kp).sum() > 0:
acp = collinearity.collinear(x_exists,y_exists)
if len(acp) > 0:
output = 'There are {0:d} unique points for {1} (source no. {2:d} in the field), but all are co-linear; KDE estimate does not work.'.format( \
len(Counter(x_exists)),zid,xk)
else:
output = 'There are NaNs in the KDE for {0} (source no. {1:d} in the field), but points are not co-linear.'.format(zid,xk)
logging.info(output)
for k,v in answer.iteritems():
if v['ind'] == xk:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
# Kernel is finite; should be able to get a position
else:
Z = np.reshape(kp.T, X.shape)
# Find the number of peaks in the kernel
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max ^ eroded_background
npeaks = detected_peaks.sum()
pd['X'] = X
pd['Y'] = Y
pd['Z'] = Z
pd['npeaks'] = npeaks
try:
# Peak values in the kernel are what we take as the final IR location
xpeak = float(pd['X'][pd['Z']==pd['Z'].max()][0])
ypeak = float(pd['Y'][pd['Z']==pd['Z'].max()][0])
peak_height = Z[int(xpeak)-1, int(ypeak)-1]
except IndexError:
# Print results to screen if it doesn't match
print pd
print zid, clist
logging.warning((pd, zid, clist))
# For each answer in this image, record the final IR peak
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir_peak'] = (xpeak,ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 1
# Don't write to consensus for serializable JSON object
if include_peak_data:
answer[k]['peak_data'] = pd
answer[k]['ir_x'] = x_exists
answer[k]['ir_y'] = y_exists
# Couldn't attempt a KDE; too few IR points in consensus
else:
# Note: need to actually put a limit in if less than half of users selected IR counterpart.
# Right now it still IDs a source even if only, say, 1/10 users said it was there.
for k,v in answer.iteritems():
if v['ind'] == xk:
# Case 1: multiple users selected IR source, but not enough unique points to pinpoint peak
if most_common_ir != (-99,-99) and len(x_exists) > 0 and len(y_exists) > 0:
xpeak, ypeak = np.mean(x_exists), np.mean(y_exists)
answer[k]['ir'] = (xpeak, ypeak)
# Count the number of clicks within 3"
agreed = 0
for x0, y0 in zip(x_exists, y_exists):
if np.sqrt(np.square(xpeak-x0)+np.square(ypeak-y0)) <= (xmax-xmin)/60.:
agreed += 1
answer[k]['n_ir'] = agreed
answer[k]['ir_level'] = 1.0*agreed/len(xv)
answer[k]['ir_flag'] = 0
# Case 2: most users have selected No Sources
else:
answer[k]['ir'] = (-99,-99)
answer[k]['n_ir'] = xv.count(-99)
answer[k]['ir_level'] = 1.0*xv.count(-99)/len(xv)
answer[k]['ir_flag'] = 0
# Final answer
return cons
def one_answer(zid,user_name):
# Find the result for just one user and one image (a single classification)
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date},'user_name':user_name}
clist = list(classifications.find(class_params))
# Empty dicts and lists
cdict = {}
for c in clist:
# Want most popular combination for each NUMBER of galaxies identified in image
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if x.keys()[0] not in bad_keys]
n_galaxies = len(goodann)
for idx,ann in enumerate(goodann):
xmaxlist = []
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = round(sum(sumlist),3)
c['checksum'] = checksum
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
maxval=0
mc_checksum = 0.
# Find the number of galaxies that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Find a galaxy that matches the checksum (easier to keep track as a list)
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Crude way to check for No Sources and No Contours (mc_checksum = 0.)
cons = {'zid':zid,'answer':{}}
return cons
# Find IR peak for the checksummed galaxies
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['answer'] = {}
cons['n_votes'] = 1
cons['n_total'] = 1
answer = cons['answer']
ir_x,ir_y = {},{}
for k,gal in enumerate(goodann):
xmax_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
except AttributeError:
xmax_temp.append(-99)
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over the galaxies themselves
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source right now; NEEDS TO BE MODIFIED.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
for k,v in answer.iteritems():
if v['ind'] == k:
answer[k]['ir_peak'] = (xpeak,ypeak)
return cons
def check_indices(index_names):
# See if additional indices have been created (improves run time drastically)
indices = classifications.index_information()
for index_name in index_names:
if not indices.has_key("{0}_idx".format(index_name)):
subindex = classifications.create_index([(index_name,ASCENDING)],name='{0}_idx'.format(index_name))
return None
def grab_image(subject,imgtype='standard'):
# Import a JPG from the RGZ subjects. Tries to find a local version before downloading over the web
url = subject['location'][imgtype]
filename = "{0}/rgz/{1}/{2}".format(data_path,imgtype,url.split('/')[-1])
if os.path.exists(filename):
with open(filename) as f:
im = Image.open(f)
im.load()
else:
im = Image.open(cStringIO.StringIO(urllib.urlopen(url).read()))
return im
def plot_consensus(consensus,figno=1,savefig=False):
# Plot a 4-panel image of IR, radio, KDE estimate, and consensus
zid = consensus['zid']
answer = consensus['answer']
sub = subjects.find_one({'zooniverse_id':zid})
survey = sub['metadata']['survey']
# Get contour data
contours = get_contours(sub,pathdict)
# Key bit that sets the difference between surveys.
# contours['width'] = img_params[survey]['FITS_WIDTH']
# contours['height'] = img_params[survey]['FITS_HEIGHT']
sf_x = img_params[survey]['IMG_WIDTH_NEW'] * 1./contours['width']
sf_y = img_params[survey]['IMG_HEIGHT_NEW'] * 1./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
# Order of bounding box components is (xmax,ymax,xmin,ymin)
comp_xmax,comp_ymax,comp_xmin,comp_ymin = comp[0]['bbox']
# Only plot radio components identified by the users as the consensus;
# check on the xmax value to make sure
for v in answer.itervalues():
if comp_xmax in v['xmax']:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
try:
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
except AssertionError:
print 'Users found no components for consensus match of {0}'.format(zid)
# Plot the infrared results
fig = plt.figure(figno,(15,4))
fig.clf()
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
colormaparr = [cm.hot_r,cm.Blues,cm.RdPu,cm.Greens,cm.PuBu,cm.YlGn,cm.Greys][::-1]
colorarr = ['r','b','m','g','c','y','k'][::-1]
# If, in the rare case, that the consensus has more unique sources than the number of colors:
if len(answer) > len(colorarr):
colorarr *= int(len(answer)/len(colorarr))+1
colormaparr *= int(len(answer)/len(colorarr))+1
if len(answer) > 0: # At least one galaxy was identified
for idx,ans in enumerate(answer.itervalues()):
if ans.has_key('peak_data'):
xmin = 1.
xmax = img_params[survey]['IMG_HEIGHT_NEW']
ymin = 1.
ymax = img_params[survey]['IMG_WIDTH_NEW']
# Plot the KDE map
colormap = colormaparr.pop()
ax3.imshow(np.rot90(ans['peak_data']['Z']), cmap=colormap,extent=[xmin, xmax, ymin, ymax])
# Plot individual sources
color = colorarr.pop()
x_plot,y_plot = ans['ir_x'],ans['ir_y']
ax3.scatter(x_plot, y_plot, c=color, marker='o', s=10, alpha=1./len(x_plot))
ax4.plot([ans['ir_peak'][0]],[ans['ir_peak'][1]],color=color,marker='*',markersize=12)
elif ans.has_key('ir'):
color = colorarr.pop()
x_plot,y_plot = ans['ir']
ax3.plot([x_plot],[y_plot],color=color,marker='o',markersize=2)
ax4.plot([x_plot],[y_plot],color=color,marker='*',markersize=12)
else:
ax4.text(img_params[survey]['IMG_WIDTH_NEW']+50,idx*25,'#{0:d} - no IR host'.format(idx),fontsize=11)
ax3.set_xlim([0, img_params[survey]['IMG_WIDTH_NEW']])
ax3.set_ylim([img_params[survey]['IMG_HEIGHT_NEW'], 0])
ax3.set_title(zid)
ax3.set_aspect('equal')
ax4.set_xlim([0, img_params[survey]['IMG_WIDTH_NEW']])
ax4.set_ylim([img_params[survey]['IMG_HEIGHT_NEW'], 0])
ax4.set_title('Consensus ({0:d}/{1:d} users)'.format(consensus['n_votes'],consensus['n_total']))
ax4.set_aspect('equal')
# Display IR and radio images
im_standard = grab_image(sub,imgtype='standard')
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.set_title('WISE')
im_radio = grab_image(sub,imgtype='radio')
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title(sub['metadata']['source'])
ax2.get_yaxis().set_ticklabels([])
ax3.get_yaxis().set_ticklabels([])
# Plot contours identified as the consensus
if len(answer) > 0:
ax4.add_patch(patch_black)
ax4.yaxis.tick_right()
nticks = 5
ax1.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax2.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax3.get_xaxis().set_ticks(np.arange(nticks)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
ax4.get_xaxis().set_ticks(np.arange(nticks+1)*img_params[survey]['IMG_WIDTH_NEW'] * 1./nticks)
plt.subplots_adjust(wspace=0.02)
# Save hard copy of the figure
if savefig == True:
fig.savefig('{0}/{1}/{2}.pdf'.format(plot_path,consensus['survey'],zid))
else:
plt.show()
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def classifiers_per_image(zid):
# Print list of the users who classified a particular subject
sid = subjects.find_one({'zooniverse_id':zid})['_id']
c_all = classifications.find({'subject_ids':sid,'user_name':{'$exists':True,'$nin':expert_names()}}).sort([("updated_at", -1)])
clist = list(c_all)
for c in clist:
try:
name = c['user_name']
except KeyError:
name = '<<< Anonymous >>>'
print '{0:25} {1}'.format(name,c['updated_at'])
return None
def rc(zid):
# Visually compare the expert and volunteer consensus for a subject
plt.ion()
classifiers_per_image(zid)
cons = checksum(zid,excluded=expert_names(),no_anonymous=True)
plot_consensus(cons,figno=1,savefig=False)
print '\nVolunteers: {0:d} sources'.format(len(cons['answer']))
cons_ex = checksum(zid,experts_only=True)
plot_consensus(cons_ex,figno=2,savefig=False)
print ' Experts: {0:d} sources'.format(len(cons_ex['answer']))
return None
def run_sample(survey,update=True,subset=None,do_plot=False,weights=0,scheme='scaling'):
# Run the consensus algorithm on the RGZ classifications
check_indices(('subject_ids','updated_at','zooniverse_id'))
filestem = "consensus_rgz_{0}".format(survey)
if subset is not None:
'''
Only run consensus for classifications of
expert100: the sample of 100 galaxies classified by science team
goldstandard: the gold standard sample of 20 galaxies classified by all users
This only applies to FIRST subjects; no (explicit) gold standard yet for ATLAS,
although there are the manual classifications in Norris et al. (2006).
'''
assert survey == 'first', \
"Subsets only exist for the FIRST data set, not {0}.".format(survey)
assert subset in ('expert100','goldstandard'), \
"Subset is {0}; must be either 'expert100' or 'goldstandard'".format(subset)
pathd = {'expert100':'expert/expert_all_zooniverse_ids.txt',
'goldstandard':'goldstandard/gs_zids.txt'}
with open('{0}/{1}'.format(rgz_path,pathd[subset]),'rb') as f:
zooniverse_ids = [line.rstrip() for line in f]
suffix = '_{0}'.format(subset)
else:
all_completed_zids = [cz['zooniverse_id'] for cz in subjects.find({'state':'complete','metadata.survey':survey})]
if update:
'''
Check to see which subjects have already been completed --
only run on subjects without an existing consensus.
'''
master_json = '{0}/json/{1}.json'.format(rgz_path,filestem)
with open(master_json,'r') as fm:
jmaster = json.load(fm)
already_finished_zids = []
for gal in jmaster:
already_finished_zids.append(gal['zid'])
zooniverse_ids = list(set(all_completed_zids) - set(already_finished_zids))
print "\n{0:d} RGZ subjects already in master catalog".format(len(already_finished_zids))
logging.info("\n{0:d} RGZ subjects already in master catalog".format(len(already_finished_zids)))
print "{0:d} RGZ subjects completed since last consensus catalog generation on {1}".format(len(zooniverse_ids),time.ctime(os.path.getmtime(master_json)))
logging.info("{0:d} RGZ subjects completed since last consensus catalog generation on {1}".format(len(zooniverse_ids), \
time.ctime(os.path.getmtime(master_json))))
else:
# Rerun consensus for every completed subject in RGZ.
zooniverse_ids = all_completed_zids
suffix = ''
# Remove the tutorial subject
tutorial_zid = "ARG0003r15"
try:
zooniverse_ids.remove(tutorial_zid)
except ValueError:
print '\nTutorial subject {0} not in list.'.format(tutorial_zid)
logging.info('\nTutorial subject {0} not in list.'.format(tutorial_zid))
print '\nLoaded data; running consensus algorithm on {0:d} completed RGZ subjects'.format(len(zooniverse_ids))
logging.info('\nLoaded data; running consensus algorithm on {0:d} completed RGZ subjects'.format(len(zooniverse_ids)))
# Empty files and objects for CSV, JSON output
json_output = []
# CSV header
if update:
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'a')
else:
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'w')
fc.write('zooniverse_id,{0}_id,n_votes,n_total,consensus_level,n_radio,label,bbox,ir_peak,ir_level,ir_flag,n_ir\n'.format(survey))
for idx,zid in enumerate(zooniverse_ids):
# Check progress to screen
if not idx % 100:
print idx, datetime.datetime.now().strftime('%H:%M:%S.%f')
cons = checksum(zid,include_peak_data=do_plot,weights=weights,scheme=scheme)
if do_plot:
plot_consensus(cons,savefig=True)
# Save results to files
if cons is not None:
cons['consensus_level'] = (cons['n_votes']/cons['n_total'])
# JSON
# Remove peak data from saved catalog; numpy arrays are not JSON serializable (may want to adjust later).
# http://stackoverflow.com/questions/3488934/simplejson-and-numpy-array/24375113#24375113
for ans in cons['answer']:
if cons['answer'][ans].has_key('peak_data'):
popvar = cons['answer'][ans].pop('peak_data',None)
json_output.append(cons)
# CSV
for ans in cons['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
try:
fc.write('{0},{1},{2:4d},{3:4d},{4:.3f},{5:2d},{6},"{7}","{8}",{9:.3f}\n'.format( \
cons['zid'],cons['source'],cons['n_votes'],cons['n_total'],cons['consensus_level'], \
len(ans['xmax']),alphabet(ans['ind']),bbox_unravel(ans['bbox']),ir_peak,ans['ir_level'],ans['ir_flag'],ans['n_ir']))
except KeyError:
print zid
print cons
logging.warning((zid, cons))
# Mongo collection
for ans in cons['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
try:
new_con = {'zooniverse_id':cons['zid'], '{0}_id'.format(survey):cons['source'], 'n_votes':cons['n_votes'], \
'n_total':cons['n_total'], 'consensus_level':cons['consensus_level'], 'n_radio':len(ans['xmax']), \
'label':alphabet(ans['ind']), 'bbox':bbox_unravel(ans['bbox']), 'ir_peak':ir_peak, 'ir_level':ans['ir_level'], \
'ir_flag':ans['ir_flag'], 'n_ir':ans['n_ir']}
consensus.insert(new_con)
except KeyError:
print zid
print cons
logging.warning((zid, cons))
# Close the new CSV file
fc.close()
# Write and close the new JSON file
if update:
jmaster.extend(json_output)
jfinal = jmaster
else:
jfinal = json_output
with open('{0}/json/{1}{2}.json'.format(rgz_path,filestem,suffix),'w') as fj:
json.dump(jfinal,fj)
# Make 75% version for full catalog
if subset is None:
# JSON
json75 = filter(lambda a: (a['n_votes']/a['n_total']) >= 0.75, jfinal)
with open('{0}/json/{1}_75.json'.format(rgz_path,filestem),'w') as fj:
json.dump(json75,fj)
# CSV
import pandas as pd
cmaster = pd.read_csv('{0}/csv/{1}.csv'.format(rgz_path,filestem))
cmaster75 = cmaster[cmaster['consensus_level'] >= 0.75]
cmaster75.to_csv('{0}/csv/{1}_75.csv'.format(rgz_path,filestem),index=False)
print '\nCompleted consensus for {0}.'.format(survey)
logging.info('\nCompleted consensus for {0}.'.format(survey))
return None
def force_csv_update(survey='first',suffix=''):
# Force an update of the CSV file from the JSON, in case of errors.
filestem = 'consensus_rgz_{0}'.format(survey)
master_json = '{0}/json/{1}.json'.format(rgz_path,filestem)
with open(master_json,'r') as fm:
jmaster = json.load(fm)
fc = open('{0}/csv/{1}{2}.csv'.format(rgz_path,filestem,suffix),'w')
fc.write('zooniverse_id,{0}_id,n_votes,n_total,consensus_level,n_radio,label,bbox,ir_peak,ir_level,ir_flag,n_ir\n'.format(survey))
for gal in jmaster:
for ans in gal['answer'].itervalues():
try:
ir_peak = ans['ir_peak']
except KeyError:
ir_peak = ans['ir'] if ans.has_key('ir') else (-99,-99)
fc.write('{0},{1},{2:4d},{3:4d},{4:.3f},{5:2d},{6},"{7}","{8}"\n'.format(
gal['zid'],
gal['source'],
gal['n_votes'],
gal['n_total'],
gal['n_votes'] * 1./gal['n_total'],
len(ans['xmax']),
alphabet(ans['ind']),
bbox_unravel(ans['bbox']),
ir_peak,
ans['ir_level'],
ans['ir_flag'],
ans['n_ir']
)
)
fc.close()
return None
def bbox_unravel(bbox):
# Turn an array of tuple strings into floats
bboxes = []
for lobe in bbox:
t = [float(x) for x in lobe]
t = tuple(t)
bboxes.append(t)
return bboxes
def alphabet(i):
# Return a letter (or set of duplicated letters) of the alphabet for a given integer
from string import letters
# For i > 25, letters begin multiplying: alphabet(0) = 'a'
# alphabet(1) = 'b'
# ...
# alphabet(25) = 'z'
# alphabet(26) = 'aa'
# alphabet(27) = 'bb'
# ...
#
lowercase = letters[26:]
try:
letter = lowercase[i % 26]*int(i/26 + 1)
return letter
except TypeError:
raise AssertionError("Index must be an integer")
def update_experts():
# Add field to classifications made by members of the expert science team. Takes ~1 minute to run.
import dateutil.parser
# Load saved data from the test runs
json_data = open('{0}/expert/expert_params.json'.format(rgz_path)).read()
experts = json.loads(json_data)
for ex in experts:
expert_dates = (dateutil.parser.parse(ex['started_at']),dateutil.parser.parse(ex['ended_at']))
classifications.update({"updated_at": {"$gt": expert_dates[0],"$lt":expert_dates[1]},"user_name":ex['expert_user']},{'$set':{'expert':True}},multi=True)
return None
def expert_names():
# Return list of Zooniverse user names for the science team
ex = [u'42jkb', u'ivywong', u'stasmanian', u'klmasters', u'Kevin', u'akapinska', u'enno.middelberg', u'xDocR', u'vrooje', u'KWillett', u'DocR']
return ex
def update_gs_subjects():
# Add field to the Mongo database designating the gold standard subjects.
with open('{0}/goldstandard/gs_zids.txt'.format(rgz_path),'r') as f:
for gal in f:
subjects.update({'zooniverse_id':gal.strip()},{'$set':{'goldstandard':True}})
return None
def get_unique_users():
# Find the usernames for all logged-in classifiers with at least one classification
check_indices(('user_name',))
print "Finding non-anonymous classifications"
logging.info("Finding non-anonymous classifications")
non_anonymous = classifications.find({"user_name":{"$exists":True}})
print "Finding user list"
logging.info("Finding user list")
users = [n['user_name'] for n in non_anonymous]
unique_users = set(users)
return unique_users
def weight_users(unique_users, scheme, min_gs=5, min_agree=0.5, scaling=5):
# min_gs is the minimum number of gold standard subjects user must have seen to determine agreement.
# Set to prevent upweighting on low information (eg, agreeing with the science team if the user has
# only seen 1 gold standard object doesn't tell us as much than if they agreed 19/20 times).
# min_agree is the minimum level of agreement with the science team (N_agree / N_seen).
# scaling is the multiplicative factor for a sliding scale weighting scheme.
print 'Calculating weights for {} users using {} method, using parameters:'.format(len(unique_users), scheme)
logging.info('Calculating weights for {} users using {} method, using parameters:'.format(len(unique_users), scheme))
if scheme == 'threshold':
output = '\tminimum gold standard classified = {}\n\tminimum agreement level = {}'.format(min_gs, min_agree)
else:
output = '\tminimum gold standard classified = {}\n\tscaling factor = {}'.format(min_gs, scaling)
print output
logging.info(output)
# Assigns a weight to users based on their agreement with the gold standard sample as classified by RGZ science team
gs_count = subjects.find({'goldstandard':True}).count()
if gs_count < 1:
update_gs_subjects()
ex_count = classifications.find({'expert':True}).count()
if ex_count < 1:
update_experts()
# Find the science team answers:
gs_zids = [s['zooniverse_id'] for s in subjects.find({"goldstandard":True})]
science_answers = {}
for zid in gs_zids:
s = checksum(zid,experts_only=True)
science_answers[zid] = s['answer'].keys()
gs_ids = [s['_id'] for s in subjects.find({"goldstandard":True})]
count = 0
# For each user, find the gold standard subjects they saw and whether it agreed with the experts
for u in list(unique_users):
count += 1
print count, u
agreed = 0
u_str = u.encode('utf8')
zid_seen = set()
# For each match, see if they agreed with the science team. If this happened more than once, only keep first classification.
for g in classifications.find({'user_name':u, 'subject_ids':{'$in':gs_ids}}):
zid = g['subjects'][0]['zooniverse_id']
if zid not in zid_seen:
zid_seen = zid_seen.union([zid])
their_answer = one_answer(zid,u)
their_checksums = their_answer['answer'].keys()
science_checksums = science_answers[zid]
if set(their_checksums) == set(science_checksums):
agreed += 1
gs_count = len(zid_seen)
# Save output Mongo
if scheme == 'threshold' and gs_count > min_gs and (1.*agreed/gs_count) > min_agree:
weight = 1
elif scheme == 'scaling' and gs_count > min_gs:
weight = int(round(1.*scaling*agreed/gs_count))
else:
weight = 0
user_weights.update({'user_name':u_str}, {'$set':{'agreed':agreed, 'gs_seen':gs_count, 'weight':weight}}, upsert=True)
return None
def print_user_weights():
# Prints the user weights to a CSV
# Note that user names can include commas
with open('{0}/csv/user_weights{1}.csv'.format(rgz_path, version), 'w') as f:
print >> f, 'user_name,gs_seen,agreed,weight'
for user in user_weights.find():
print >> f, '"{0}",{1},{2},{3}'.format(user['user_name'].encode('utf8'), user['gs_seen'], user['agreed'], user['weight'])
if __name__ == "__main__":
# Run the consensus pipeline from the command line
logging.basicConfig(filename='{}/{}'.format(rgz_path,logfile), level=logging.DEBUG, format='%(asctime)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.captureWarnings(True)
logging.info('Consensus run from command line')
try:
if pathdict != None:
output = 'Starting at',datetime.datetime.now().strftime('%H:%M:%S.%f')
logging.info(output)
print output
# update: default = True
#
# Set as True if you want to run the consensus only on the subjects completed
# since the last time the pipeline was run. If False, it will run it on the
# entire set of completed subjects (which takes about 6 hours for 10k images).
update = False
# subset: default = None
#
# Run the sample only on some specific subjects. Pre-defined subsets include:
# 'expert100': a sample of 100 galaxies classified by science team
# 'goldstandard': the gold standard sample of 20 galaxies classified by all users
# and the science team. All galaxies in 'goldstandard' are also in
# 'expert100'.
subset = None
# do_plot: default = False
#
# Set as True if you want to make the four-panel plots of the consensus for each subject.
# Useful, but adds to the total runtime.
do_plot = False
# weights: default = 0
#
# Execute weighting of the users based on their agreement with the science team
# on the gold standard subjects. If weights = 0 or weights = 1, each user's vote
# is counted equally in the consensus. If weights > 1, then their impact is
# increased by replicating the classifications. Must be a nonnegative integer.
weights = 5
assert (type(weights) == int) and weights >= 0, 'Weight must be a nonnegative integer'
scheme = 'scaling'
assert scheme in ['threshold', 'scaling'], 'Weighting scheme must be threshold or sliding, not {}'.format(scheme)
# If you're using weights, make sure they're up to date
if weights > 1:
unique_users = get_unique_users()
weight_users(unique_users, scheme, min_gs=5, min_agree=0.5, scaling=weights)
# Run the consensus separately for different surveys, since the image parameters are different
for survey in ('atlas','first'):
run_sample(survey,update,subset,do_plot,weights,scheme)
output = 'Finished at',datetime.datetime.now().strftime('%H:%M:%S.%f')
logging.info(output)
print output
else:
# Needs to be able to find the raw image data to run the pipeline
print "\nAborting consensus.py - could not locate raw RGZ image data.\n"
logging.info("\nAborting consensus.py - could not locate raw RGZ image data.\n")
except BaseException as e:
logging.exception(e)
raise
|
willettk/rgz-analysis
|
python/consensus.py
|
Python
|
mit
| 55,278
|
# Script for creating different kind of indexes in a small space as possible.
# This is intended for testing purposes.
import tables as tb
class Descr(tb.IsDescription):
var1 = tb.StringCol(itemsize=4, shape=(), dflt='', pos=0)
var2 = tb.BoolCol(shape=(), dflt=False, pos=1)
var3 = tb.Int32Col(shape=(), dflt=0, pos=2)
var4 = tb.Float64Col(shape=(), dflt=0.0, pos=3)
# Parameters for the table and index creation
small_chunkshape = (2,)
small_blocksizes = (64, 32, 16, 8)
nrows = 43
# Create the new file
h5fname = 'indexes_2_1.h5'
h5file = tb.open_file(h5fname, 'w')
t1 = h5file.create_table(h5file.root, 'table1', Descr)
row = t1.row
for i in range(nrows):
row['var1'] = i
row['var2'] = i
row['var3'] = i
row['var4'] = i
row.append()
t1.flush()
# Do a copy of table1
t1.copy(h5file.root, 'table2')
# Create indexes of all kinds
t1.cols.var1.create_index(0, 'ultralight', _blocksizes=small_blocksizes)
t1.cols.var2.create_index(3, 'light', _blocksizes=small_blocksizes)
t1.cols.var3.create_index(6, 'medium', _blocksizes=small_blocksizes)
t1.cols.var4.create_index(9, 'full', _blocksizes=small_blocksizes)
h5file.close()
|
avalentino/PyTables
|
tables/tests/create_backcompat_indexes.py
|
Python
|
bsd-3-clause
| 1,167
|
"""
WSGI config for djello project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djello.settings")
application = DjangoWhiteNoise(get_wsgi_application())
|
commoncode/djello
|
djello/wsgi.py
|
Python
|
mit
| 455
|
#!/usr/bin/env python
# Self documented - trivial, see the source code.
# Possible alternatives: jq (c, fastest).
# ; jshon
from sys import stdin, stdout
try: # Or use anyjson.
from simplejson import load, dump
except ImportError:
try:
from json import load, dump
except ImportError:
#print("Install simplejson compatible python module.", file=stderr)
raise "Install simplejson compatible python module."
# ASCII convertion is important for some Cisco ACI configuration dumps.
dump(load(stdin), stdout, ensure_ascii=True, sort_keys=True, indent=4, separators=(',', ': '))
|
alexkross/cisco_aci
|
json-pretty.py
|
Python
|
gpl-3.0
| 611
|
# Natural Language Toolkit: Product Reviews Corpus Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for reviews corpora (syntax based on Customer Review Corpus).
- Customer Review Corpus information -
Annotated by: Minqing Hu and Bing Liu, 2004.
Department of Computer Sicence
University of Illinois at Chicago
Contact: Bing Liu, liub@cs.uic.edu
http://www.cs.uic.edu/~liub
Distributed with permission.
The "product_reviews_1" and "product_reviews_2" datasets respectively contain
annotated customer reviews of 5 and 9 products from amazon.com.
Related papers:
- Minqing Hu and Bing Liu. "Mining and summarizing customer reviews".
Proceedings of the ACM SIGKDD International Conference on Knowledge
Discovery & Data Mining (KDD-04), 2004.
- Minqing Hu and Bing Liu. "Mining Opinion Features in Customer Reviews".
Proceedings of Nineteeth National Conference on Artificial Intelligence
(AAAI-2004), 2004.
- Xiaowen Ding, Bing Liu and Philip S. Yu. "A Holistic Lexicon-Based Appraoch to
Opinion Mining." Proceedings of First ACM International Conference on Web
Search and Data Mining (WSDM-2008), Feb 11-12, 2008, Stanford University,
Stanford, California, USA.
Symbols used in the annotated reviews:
[t] : the title of the review: Each [t] tag starts a review.
xxxx[+|-n]: xxxx is a product feature.
[+n]: Positive opinion, n is the opinion strength: 3 strongest, and 1 weakest.
Note that the strength is quite subjective.
You may want ignore it, but only considering + and -
[-n]: Negative opinion
## : start of each sentence. Each line is a sentence.
[u] : feature not appeared in the sentence.
[p] : feature not appeared in the sentence. Pronoun resolution is needed.
[s] : suggestion or recommendation.
[cc]: comparison with a competing product from a different brand.
[cs]: comparison with a competing product from the same brand.
Note: Some of the files (e.g. "ipod.txt", "Canon PowerShot SD500.txt") do not
provide separation between different reviews. This is due to the fact that
the dataset was specifically designed for aspect/feature-based sentiment
analysis, for which sentence-level annotation is sufficient. For document-
level classification and analysis, this peculiarity should be taken into
consideration.
"""
from __future__ import division
import re
from nltk.corpus.reader.api import *
from nltk.tokenize import *
TITLE = re.compile(r'^\[t\](.*)$') # [t] Title
FEATURES = re.compile(r'((?:(?:\w+\s)+)?\w+)\[((?:\+|\-)\d)\]') # find 'feature' in feature[+3]
NOTES = re.compile(r'\[(?!t)(p|u|s|cc|cs)\]') # find 'p' in camera[+2][p]
SENT = re.compile(r'##(.*)$') # find tokenized sentence
@compat.python_2_unicode_compatible
class Review(object):
"""
A Review is the main block of a ReviewsCorpusReader.
"""
def __init__(self, title=None, review_lines=None):
"""
:param title: the title of the review.
:param review_lines: the list of the ReviewLines that belong to the Review.
"""
self.title = title
if review_lines is None:
self.review_lines = []
else:
self.review_lines = review_lines
def add_line(self, review_line):
"""
Add a line (ReviewLine) to the review.
:param review_line: a ReviewLine instance that belongs to the Review.
"""
assert isinstance(review_line, ReviewLine)
self.review_lines.append(review_line)
def features(self):
"""
Return a list of features in the review. Each feature is a tuple made of
the specific item feature and the opinion strength about that feature.
:return: all features of the review as a list of tuples (feat, score).
:rtype: list(tuple)
"""
features = []
for review_line in self.review_lines:
features.extend(review_line.features)
return features
def sents(self):
"""
Return all tokenized sentences in the review.
:return: all sentences of the review as lists of tokens.
:rtype: list(list(str))
"""
return [review_line.sent for review_line in self.review_lines]
def __repr__(self):
return 'Review(title=\"{}\", review_lines={})'.format(self.title, self.review_lines)
@compat.python_2_unicode_compatible
class ReviewLine(object):
"""
A ReviewLine represents a sentence of the review, together with (optional)
annotations of its features and notes about the reviewed item.
"""
def __init__(self, sent, features=None, notes=None):
self.sent = sent
if features is None:
self.features = []
else:
self.features = features
if notes is None:
self.notes = []
else:
self.notes = notes
def __repr__(self):
return ('ReviewLine(features={}, notes={}, sent={})'.format(
self.features, self.notes, self.sent))
class ReviewsCorpusReader(CorpusReader):
"""
Reader for the Customer Review Data dataset by Hu, Liu (2004).
Note: we are not applying any sentence tokenization at the moment, just word
tokenization.
>>> from nltk.corpus import product_reviews_1
>>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt')
>>> review = camera_reviews[0]
>>> review.sents()[0]
['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am',
'extremely', 'satisfied', 'with', 'the', 'purchase', '.']
>>> review.features()
[('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'),
('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'),
('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'),
('option', '+1')]
We can also reach the same information directly from the stream:
>>> product_reviews_1.features('Canon_G3.txt')
[('canon powershot g3', '+3'), ('use', '+2'), ...]
We can compute stats for specific product features:
>>> from __future__ import division
>>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture'])
>>> # We use float for backward compatibility with division in Python2.7
>>> mean = tot / n_reviews
>>> print(n_reviews, tot, mean)
15 24 1.6
"""
CorpusView = StreamBackedCorpusView
def __init__(self, root, fileids, word_tokenizer=WordPunctTokenizer(),
encoding='utf8'):
"""
:param root: The root directory for the corpus.
:param fileids: a list or regexp specifying the fileids in the corpus.
:param word_tokenizer: a tokenizer for breaking sentences or paragraphs
into words. Default: `WordPunctTokenizer`
:param encoding: the encoding that should be used to read the corpus.
"""
CorpusReader.__init__(self, root, fileids, encoding)
self._word_tokenizer = word_tokenizer
def features(self, fileids=None):
"""
Return a list of features. Each feature is a tuple made of the specific
item feature and the opinion strength about that feature.
:param fileids: a list or regexp specifying the ids of the files whose
features have to be returned.
:return: all features for the item(s) in the given file(s).
:rtype: list(tuple)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.CorpusView(fileid, self._read_features, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def raw(self, fileids=None):
"""
:param fileids: a list or regexp specifying the fileids of the files that
have to be returned as a raw string.
:return: the given file(s) as a single string.
:rtype: str
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, string_types):
fileids = [fileids]
return concat([self.open(f).read() for f in fileids])
def readme(self):
"""
Return the contents of the corpus README.txt file.
"""
return self.open("README.txt").read()
def reviews(self, fileids=None):
"""
Return all the reviews as a list of Review objects. If `fileids` is
specified, return all the reviews from each of the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
reviews have to be returned.
:return: the given file(s) as a list of reviews.
"""
if fileids is None:
fileids = self._fileids
return concat([self.CorpusView(fileid, self._read_review_block, encoding=enc)
for (fileid, enc) in self.abspaths(fileids, True)])
def sents(self, fileids=None):
"""
Return all sentences in the corpus or in the specified files.
:param fileids: a list or regexp specifying the ids of the files whose
sentences have to be returned.
:return: the given file(s) as a list of sentences, each encoded as a
list of word strings.
:rtype: list(list(str))
"""
return concat([self.CorpusView(path, self._read_sent_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def words(self, fileids=None):
"""
Return all words and punctuation symbols in the corpus or in the specified
files.
:param fileids: a list or regexp specifying the ids of the files whose
words have to be returned.
:return: the given file(s) as a list of words and punctuation symbols.
:rtype: list(str)
"""
return concat([self.CorpusView(path, self._read_word_block, encoding=enc)
for (path, enc, fileid)
in self.abspaths(fileids, True, True)])
def _read_features(self, stream):
features = []
for i in range(20):
line = stream.readline()
if not line:
return features
features.extend(re.findall(FEATURES, line))
return features
def _read_review_block(self, stream):
while True:
line = stream.readline()
if not line:
return [] # end of file.
title_match = re.match(TITLE, line)
if title_match:
review = Review(title=title_match.group(1).strip()) # We create a new review
break
# Scan until we find another line matching the regexp, or EOF.
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [review]
# Start of a new review: backup to just before it starts, and
# return the review we've already collected.
if re.match(TITLE, line):
stream.seek(oldpos)
return [review]
# Anything else is part of the review line.
feats = re.findall(FEATURES, line)
notes = re.findall(NOTES, line)
sent = re.findall(SENT, line)
if sent:
sent = self._word_tokenizer.tokenize(sent[0])
review_line = ReviewLine(sent=sent, features=feats, notes=notes)
review.add_line(review_line)
def _read_sent_block(self, stream):
sents = []
for review in self._read_review_block(stream):
sents.extend([sent for sent in review.sents()])
return sents
def _read_word_block(self, stream):
words = []
for i in range(20): # Read 20 lines at a time.
line = stream.readline()
sent = re.findall(SENT, line)
if sent:
words.extend(self._word_tokenizer.tokenize(sent[0]))
return words
|
enriquesanchezb/practica_utad_2016
|
venv/lib/python2.7/site-packages/nltk/corpus/reader/reviews.py
|
Python
|
apache-2.0
| 12,832
|
from django.db import models
from django.db.models import Sum, Count, QuerySet
__author__ = 'lberrocal'
class MaximoTimeRegisterMixin(object):
def get_employee_total_regular_hours(self, employee, date):
#qs = self.get_queryset()
return self.filter(employee=employee, date=date).aggregate(total_regular_hours=Sum('regular_hours'),
register_count=Count('regular_hours'))
def assign_projects_from_ticket(self):
#qs = self.get_queryset()
updated_count = 0
for time_register in self.filter():
if time_register.project is None:
time_register.project = time_register.ticket.project
time_register.save()
updated_count += 1
return updated_count
def sum_regular_hours(self):
return self.filter().annotate(total_regular_hours=Sum('regular_hours'))
class MaximoTimeRegisterQuerySet(QuerySet, MaximoTimeRegisterMixin):
pass
class MaximoTimeRegisterManager(models.Manager):
def get_queryset(self):
return MaximoTimeRegisterQuerySet(self.model, using=self._db)
def assign_projects_from_ticket(self):
return self.get_queryset().assign_projects_from_ticket()
def get_employee_total_regular_hours(self, employee, date):
return self.get_queryset().get_employee_total_regular_hours(employee, date)
def sum_regular_hours(self, project):
self.get_queryset().filter(project=project).sum_regular_hours()
|
luiscberrocal/homeworkpal
|
homeworkpal_project/maximo/managers.py
|
Python
|
mit
| 1,547
|
# -*- coding: utf-8 -*-
# continued from pattern.py
# defining the basic object we will be working with
# Note: makeVideo - it doesn't work yet - i haven't yet solved the issues about opencv
# so i am outputing the slides only for the moment
# 2013-09-23
##############################################################################################
#
#==== imports ================================================================================
# some of the stuff were moved to defaultParameters.py
import copy
import time
import os
import re
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
import pickle
from copy import deepcopy
try:
from scipy import signal
from scipy import interpolate
except ImportError:
#print "Scipy not installed"
pass
#==== setting up the global parameters========================================================
import defaultParameters as dp
from defaultParameters import * #bad habits but all these variables are prefixed with "default"
# or at least i try to make them to
import colourbarQPESUMS # the colourbars for the Central Weather Bureau
import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds
#==== importing pattern.py====================================================================
from . import pattern
try:
from dataStreamTools import makeVideo as mv
except ImportError:
print "import error! opencv not installed(?!)"
from dataStreamTools import kongrey as kr
dbz = pattern.DBZ
ds = pattern.DBZstream
#==== defining the classes ===================================================================
#class DataStreamSets:
class DataStreamSet: # correcting a long-standing typo 2014-03-09
"""
class dataStreamSet: DSS = dataStreamSet(ds0, ds1, ds2,...dsN)
where ds0 = observations, ds1, ds2,.. = models
with the bare basic methods of analysis and output to panel of 20+ images
"""
############################################################################
# initialisation and basic function calls
def __init__(self, ds0, *args):
self.name = ds0.name + '_' + '_'.join([v.name for v in args])
self.obs = ds0
self.wrfs = list(args)
############################################################################
# simple building block functions
def getAllDataTimes(self):
"""
get the union of the sets of dataTimes for all streams
"""
dataTimes = set([v.dataTime for v in self.obs])
for wrf in self.wrfs:
dataTimes = dataTimes.union([v.dataTime for v in wrf])
dataTimes = sorted(list(dataTimes))
return dataTimes
def getCommonDataTimes(self):
"""
get the intersection of the sets of dataTimes for all streams
"""
dataTimes = set([v.dataTime for v in self.obs])
for wrf in self.wrfs:
dataTimes = dataTimes.intersection([v.dataTime for v in wrf])
dataTimes = sorted(list(dataTimes))
return dataTimes
def backupMatrices(self):
self.obs.backupMatrices()
for wrf in self.wrfs:
wrf.backupMatrices()
def restoreMatrices(self):
self.obs.restoreMatrices()
for wrf in self.wrfs:
wrf.restoreMatrices()
############################################################################
# I/O's
def load(self, stream_key="all", verbose=False, **kwargs):
if stream_key == "all" or stream_key =="obs":
print "loading obs"
obs.load(**kwargs)
if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs":
print "loading wrfs"
for wrf in wrfs:
wrf.load(**kwargs)
def unload(self, stream_key="all", verbose=False, **kwargs):
if stream_key == "all" or stream_key =="obs":
print "unloading obs"
obs.unload(**kwargs)
if stream_key == "all" or stream_key =="wrf" or stream_key=="wrfs":
print "unloading wrfs"
for wrf in wrfs:
wrf.unload(**kwargs)
def makeVideo2(self, ordering, outputFolder=''):
"""
make video, with an ordering at each dataTime
ordering = [[1,2,3,5], [3,4,6,1], ...] - first for the first dataTime, second for the second dataTime, etc
"""
return mv.makeVideo( [self.obs] + self.wrfs, # [ds0, ds1, ds2, ds3, ds4, ...], a list of armor.pattern.DBZstream objects
panel_cols = 5, # number of colums in the panel
panel_rows = 5, # no need to be filled
fourcc = cv.CV_FOURCC('F', 'L', 'V', '1'),
fps = defaultFps,
extension= '.avi',
#fourcc = cv.CV_FOURCC('P', 'I', 'M', '1'),
outputFileName ="",
outputFolder=outputFolder,
saveFrames = True, # saving the frames as images
useCV2 = True,
ordering = ordering, # ordering of the models
)
def makeVideo1(self, ordering, outputFolder=''):
"""
make video, with a single ordering for each dataStream in its entirety
ordering = list, e.g. [2,3,4,5,1] <-- WRF2 goes first, then WRF3, WRF4, etc
"""
ordering = [ordering] * len(self.getAllDataTimes())
return self.makeVideo2(ordering, outputPath)
############################################################################
# analyses
def analyse(self, algorithm):
"""
input: algorithm
output: ordering at each dataTime
ordering = [[1,2,3,5], [3,4,6,1], ...] means WRF1, WRF2,WRF3, WRF5 for dataTime1; WRFs3,4,6,1, for the second dataTime, etc
"""
pass
def matching(self, algorithm, obsTime="", maxHourDiff=7, **kwargs):
"""
input:
algorithm - the function defining the algorithm of matching
algorithm(parameters): (obs, wrf) -> score (real number)
format of algorithm function: def alg1(a=pattern.a, ...., **kwargs):
obsTime - time at which obs is compared with the wrfs, e.g. "20140612.0200'
maxHourDiff - the maximal time difference (in hours) between obs and wrfs, e.g. 7 (hours)
kwargs - parameters for the algorithm
output:
ranking with scores and optimal timeshifts
2014-03-07
"""
if obsTime == "": # if the point for matching is not given, pick the first one
obsTime = self.obs[0].dataTime
ranking = []
obs = self.obs
wrfs = self.wrfs
for wrf in wrfs:
x = algorithm(obs, wrf, obsTime=obsTime, maxHourDiff=maxHourDiff, **kwargs)
score = x['score']
timeShift = x['timeShift']
ranking.append( {'wrf': wrf.name, 'timeShift': timeShift, #timeShift: in hours
'score': score,
'dataFolder': wrf.dataFolder,
'obsTime': obsTime,
'maxHourDiff': maxHourDiff # tag them along just in case
} ) #dataFolder = for potential disambiguation
ranking.sort(key=lambda v:v['score'], reverse=True)
return ranking
def filtering(self, algorithm, stream_key="all", name_key="", verbose=False, **kwargs):
"""
input:
algorithm - the function defining the algorithm of filtering
algorithm(parameters): changes a.matrix, a.name, no output given
format of algorithm function: def alg1(a=pattern.a, **kwargs):
stream_key - keyword for choosing the DBZstreams to be filtered
if it's "obs" we filter just all of the self.obs
if it's "wrf" or "wrfs" we filter just all of the self.wrfs
name_key - keyword for choosing the DBZ patterns to be filtered
kwargs - parameters for the algorithm
output:
ranking with scores and optimal timeshifts
2014-03-07
"""
obs = self.obs
wrfs = self.wrfs
# first filter the obs
if stream_key == "all" or stream_key == "obs" or stream_key == "OBS":
for a in obs:
if name_key in a.name:
algorithm(a, **kwargs) # key line
if verbose:
print a.name
if stream_key == "all" or stream_key == "wrf" or stream_key == "wrfs" \
or stream_key == "WRF" or stream_key == "WRFS" :
for wrf in wrfs:
for a in wrf:
if name_key in a.name:
algorithm(a, **kwargs) # key line
if verbose:
print a.name
############################################
# constants
DataStreamSets = DataStreamSet #alias; # correcting a long-standing typo 2014-03-09
DSS = DataStreamSet # alias
"""
key example: kongrey
"""
from dataStreamTools import kongrey as kr
#compref = pattern.DBZstream(dataFolder= kr.obs_folder,
# #name="COMPREF.DBZ",
# name="",
# lowerLeftCornerLatitudeLongitude = kr.obs_lowerLeft ,
# upperRightCornerLatitudeLongitude = kr.obs_upperRight ,
# outputFolder= kr.summary_folder,
# imageFolder=kr.summary_folder,
# key1="", # keywords to pick out specific files
# key2="", # used only once in the __init__
# key3="",
# preload=False,
# imageExtension = '.png',
# dataExtension = '.txt',
# )
"""
print 'loading observations'
obs = kr.constructOBSstream(dumping=False)
print 'loading models',
wrfsFolder = kr.defaultWRFdumpsFolder # '/home/k/ARMOR/data/KONG-REY/summary/WRF[regridded]'
wrfs = []
for i in range(1,21):
print i,
wrf = pickle.load(open(wrfsFolder+'dbzstream' + ('0'+str(i))[-2:] + '.pydump'))
#wrf.setDataFolder(asdfasdf) # haven't defined this function in pattern.DBZstream yet
wrfs.append(wrf)
kongreyDSS = DSS(obs, *wrfs)
"""
print 'constructing kongreyDSS'
obs = ds(name="COMPREF.DBZ", dataFolder=defaultRootFolder + 'data/KONG-REY/OBS/')
wrfs = []
for i in range(1,21):
print i,
wrfName = name='WRF'+ ('0'+str(i))[-2:]
wrf = ds(name=wrfName, key1=wrfName,
dataFolder=defaultRootFolder + 'data/KONG-REY/summary/WRF[regridded]/')
wrfs.append(wrf)
kongreyDSS = DSS(obs, *wrfs)
def constructDSS(obsFolder, wrfsFolder):
obsName = obsFolder.split("/")[-1]
wrfsName = wrfsFolder.split("/")[-1]
print 'Constructing DSS from:', obsName, ",", wrfsName
print obsFolder
print wrfsFolder
obs = ds(name=obsName, dataFolder=obsFolder)
wrfs = []
for i in range(1,21):
print i,
wrfName = name='WRF'+ ('0'+str(i))[-2:]
wrf = ds(name=wrfName, key1=wrfName,
dataFolder=wrfsFolder)
wrfs.append(wrf)
dss = DSS(obs, *wrfs)
return dss
print "constructing march11 - march13 DSS objects"
march11 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140311/")
march11.name = "Rainband_11_March_2014"
march11.obs.list= [v for v in march11.obs.list if '20140311' in v.dataTime]
march12 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140312/")
march12.name = "Rainband_12_March_2014"
march12.obs.list= [v for v in march12.obs.list if '20140312' in v.dataTime]
march13 = constructDSS(dp.defaultRootFolder+"data/march2014/QPESUMS/",
dp.defaultRootFolder+"data/march2014/WRFEPS[regridded]/20140313/")
march13.name = "Rainband_13_March_2014"
march13.obs.list= [v for v in march13.obs.list if '20140313' in v.dataTime]
print "constructing may2014 DSS objects"
may19 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS19[regridded]/")
may19.name = "Rainband_19_May_2014"
may19.obs.list= [v for v in may19.obs.list if '20140519' in v.dataTime]
may20 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS20[regridded]/")
may20.name = "Rainband_20_May_2014"
may20.obs.list= [v for v in may20.obs.list if '20140520' in v.dataTime]
may21 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS21[regridded]/")
may21.name = "Rainband_21_May_2014"
may21.obs.list= [v for v in may21.obs.list if '20140521' in v.dataTime]
may22 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS22[regridded]/")
may22.name = "Rainband_22_May_2014"
may22.obs.list= [v for v in may22.obs.list if '20140522' in v.dataTime]
may23 = constructDSS(dp.defaultRootFolder+"data/may14/QPESUMS/",
dp.defaultRootFolder+"data/may14/WRFEPS23[regridded]/")
may23.name = "Rainband_23_May_2014"
may23.obs.list= [v for v in may23.obs.list if '20140523' in v.dataTime]
|
yaukwankiu/armor
|
pattern2.py
|
Python
|
cc0-1.0
| 14,030
|
# Day 14: Random Quote Machine
# Random Quote Machine - Program should print out quotes that have been stored randomly.
# You don't need to use any API. Store some quotes in your program,
# and print one at random each time the program executes.
import random
quotes = {
0: {
'quote': "Strive not to be a success, but rather to be of value.",
'author': "Albert Einstein",
},
1: {
'quote': "Two roads diverged in a wood, and I—I took the one less traveled by, \
And that has made all the difference.",
'author': "Robert Frost",
},
2: {
'quote': "I attribute my success to this: I never gave or took any excuse.",
'author': "Florence Nightingale",
},
3: {
'quote': "You miss 100% of the shots you don’t take.",
'author': "Wayne Gretzky",
},
4: {
'quote': "The most difficult thing is the decision to act, the rest is merely tenacity.",
'author': "Amelia Earhart",
},
5: {
'quote': "Every strike brings me closer to the next home run.",
'author': "Babe Ruth",
},
}
random_number = random.randint(0,6)
quote = quotes[random_number]['quote']
author = quotes[random_number]['author']
print(quote)
print(author)
|
ConsonanceNg/100_Days_of_code_submissions
|
submissions/faeludire/Day_14/random_quote_machine.py
|
Python
|
mit
| 1,266
|
import sys
import os
import numpy as np
data_dir = sys.argv[1]
filenames = os.listdir(data_dir)
timeseries = []
preprocessing = np.load('model/preprocessing.npz')
timeseries_means = preprocessing['means']
timeseries_stds = preprocessing['stds']
change = preprocessing['change']
print "Reading files from", data_dir
for filename in filenames:
ts = np.genfromtxt(os.path.join(data_dir, filename))
if ts.ndim == 1: # csv file has only one column, ie one variable
ts = ts[:, np.newaxis]
timeseries.append(ts)
timeseries = np.array(timeseries)
if change:
timeseries = np.diff(timeseries, axis=1)
# TODO: Check that all time series have the same number of timesteps and dimensions
num_timeseries, num_timesteps, num_dims = timeseries.shape
print "Read {} time series with {} time steps and {} dimensions".format(num_timeseries, num_timesteps, num_dims)
# TODO: Make num stds as parameter for script.
def normalize(x):
return np.nan_to_num((x - timeseries_means) / timeseries_stds)
def denormalize(x):
return x * timeseries_stds + timeseries_means
# TODO: This gives 0 values if stddev = 0.
timeseries = normalize(timeseries)
print "Loading model... (this can take a while)"
from keras.models import model_from_json
from gmm import GMMActivation, gmm_loss
with open('model/model.json') as model_file:
model = model_from_json(model_file.read(), {'GMMActivation': GMMActivation, 'gmm_loss': gmm_loss})
model.load_weights('model/weights.h5')
num_mixture_components = model.get_config()['layers'][-1]['M']
# Predict next timesteps using the training data as input (i. e. not the predictions themselves!).
model.reset_states()
predicted = []
predicted.append(timeseries[:, 0]) # Take first timestep from training data so predicted and expected have the same dimensions.
print "Predicting..."
for i in range(num_timesteps - 1):
pred_parameters = model.predict_on_batch(timeseries[:, i:i+1])[0]
means = pred_parameters[:, :num_mixture_components * num_dims]
sds = pred_parameters[:, num_mixture_components * num_dims:num_mixture_components * (num_dims + 1)]
weights = pred_parameters[:, num_mixture_components * (num_dims + 1):]
# Reshape arrays to allow broadcasting of means (3-dimensional vectors) and sds/weights (scalars).
means = means.reshape(-1, num_mixture_components, num_dims)
sds = sds[:, :, np.newaxis]
weights = weights[:, :, np.newaxis]
pred = weights * np.random.normal(means, sds)
pred = np.sum(pred, axis=1)
predicted.append(pred)
predicted_dir = 'predicted'
print "Saving predicted time series to", predicted_dir
predicted = np.array(predicted)
predicted = predicted.transpose((1, 0, 2))
predicted = denormalize(predicted)
for i, pred in enumerate(predicted):
if change:
pred = np.append(np.zeros((1, num_dims)), np.cumsum(pred, axis=0), axis=0)
np.savetxt('{}/{}.dat'.format(predicted_dir, i), pred)
|
jrieke/timeseries-rnn
|
predict.py
|
Python
|
mit
| 2,936
|
# -*- coding: utf-8 -*-
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Aluno.models import Aluno
from Professor.models import Professor, Monitor
@render_to('index.html')
def index(request):
if request.user!= None:
if request.user.is_authenticated():
return redirect('dashboard')
return locals()
def aluno_dashboad(request,dic):
"prepara as variaveis do dashboard do aluno"
try:
aluno = request.user.aluno_set.get()
except Aluno.DoesNotExist:
return
avaliacoes_comecar = aluno.avaliacoes_iniciar()
avaliacoes_andamento = aluno.avaliacoes_andamento()
avaliacoes_passadas = aluno.avaliacoes_passadas()
avaliacoes_futuras = aluno.avaliacoes_futuras()
dic.update(locals())
dic.pop('dic')
def monitor_dashboad(request,dic):
"prepara as variaveis do dashboard do monitor"
try:
monitor = request.user.monitor_set.get()
except Monitor.DoesNotExist:
return
dic.update(locals())
dic.pop('dic')
def professor_dashboad(request,dic):
"prepara as variaveis do dashboard do monitor"
try:
professor = request.user.professor_set.get()
except Professor.DoesNotExist:
return
dic.update(locals())
dic.pop('dic')
@login_required
@render_to('usuarios/dashboard.html')
def dashboard(request):
#prepara um dicionario que vai ser populado com variaveis
#de cada tipo de dashboard
dic = {}
aluno_dashboad(request,dic)
monitor_dashboad(request,dic)
professor_dashboad(request,dic)
retorno = locals()
retorno.update(dic)
return redirect('perfil')
# return retorno
@login_required
@render_to('usuarios/perfil/perfil.html')
def perfil(request):
#prepara um dicionario que vai ser populado com variaveis
#de cada tipo de dashboard
dic = {}
aluno_dashboad(request,dic)
monitor_dashboad(request,dic)
professor_dashboad(request,dic)
retorno = locals()
retorno.update(dic)
return retorno
|
arruda/amao
|
AMAO/apps/Core/views.py
|
Python
|
mit
| 2,266
|
# -*- coding: utf-8 -*-
# import numpy as _np
# import pandas as _pd
# import os as _os
# import atmPy.general.timeseries as _timeseries
# import atmPy.aerosols.physics.column_optical_properties as _column_optical_properties
import atmPy.general.measurement_site as _measurement_site
# import pathlib
# import warnings as _warnings
# from atmPy.general import measurement_site as _measurement_site
# from atmPy.radiation import solar as _solar
_locations = [{'name': 'Barrow',
'state' :'AK',
'abbreviation': ['BRW',],
'lon': -156.6114,
'lat': 71.3230,
'alt' : 11,
'timezone': 9},
{'name': 'Mauna Loa',
'state' :'HI',
'abbreviation': ['MLO',],
'lon': -155.5763,
'lat': 19.5362,
'alt' : 3397.,
'timezone': 10},
{'name': 'American Samoa',
'state' :'Samoa',
'abbreviation': ['SMO',],
'lon': -170.5644,
'lat': 14.2474,
'alt' : 42.,
'timezone': 11},
{'name': '',
'state' :'',
'abbreviation': ['', ''],
'lon': 59,
'lat': 90.00,
'alt' : 2840,
'timezone': -12}]
network = _measurement_site.Network(_locations)
network.name = 'GML Observatory Operations (OBOP)'
|
hagne/atm-py
|
atmPy/data_archives/NOAA_ESRL_GMD_GRAD/baseline/baseline.py
|
Python
|
mit
| 1,433
|
input = """
a | b.
a :- b.
b :- a.
"""
output = """
{a, b}
"""
|
Yarrick13/hwasp
|
tests/asp/gringo/modelchecker.015.test.py
|
Python
|
apache-2.0
| 63
|
import collections
import math
from django.db import models
from django.urls import reverse
import data.models
class AndalusianStyle(object):
def get_style(self):
return "andalusian"
def get_object_map(self, key):
return {"performance": InstrumentPerformance,
"release": Album,
"artist": Artist,
"instrument": Instrument,
"sectionperformance": InstrumentSectionPerformance,
"orchestraperformer": OrchestraPerformer,
"recording": Recording,
"orchestra": Orchestra,
"section": Section
}[key]
class Orchestra(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
mbid = models.UUIDField(blank=True, null=True)
name = models.CharField(max_length=255)
transliterated_name = models.CharField(max_length=255, blank=True)
group_members = models.ManyToManyField('Artist', blank=True, related_name='groups', through="OrchestraPerformer")
def __str__(self):
return self.name
def get_absolute_url(self):
viewname = "%s-orchestra" % (self.get_style(), )
return reverse(viewname, args=[self.mbid])
def get_musicbrainz_url(self):
return "http://musicbrainz.org/artist/%s" % self.mbid
def performers(self):
IPClass = self.get_object_map("orchestraperformer")
performances = IPClass.objects.filter(orchestra=self)
perfs = [p.performer for p in performances]
return perfs
def recordings(self):
return self.recording_set.all()
class OrchestraAlias(models.Model):
name = models.CharField(max_length=255)
orchestra = models.ForeignKey("Orchestra", related_name="aliases", on_delete=models.CASCADE)
def __str__(self):
return self.name
class Artist(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
missing_image = "artist.jpg"
GENDER_CHOICES = (
('M', 'Male'),
('F', 'Female')
)
name = models.CharField(max_length=200)
transliterated_name = models.CharField(max_length=200, blank=True)
mbid = models.UUIDField(blank=True, null=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, blank=True, null=True)
begin = models.CharField(max_length=10, blank=True, null=True)
end = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return self.name
def get_absolute_url(self):
viewname = "%s-artist" % (self.get_style(), )
return reverse(viewname, args=[self.mbid])
def get_musicbrainz_url(self):
return "http://musicbrainz.org/artist/%s" % self.mbid
def recordings(self):
IPClass = self.get_object_map("performance")
performances = IPClass.objects.filter(performer=self)
recs = [p.recording for p in performances]
return recs
def performances(self, tab=[], nawba=[], mizan=[]):
pass
def instruments(self):
insts = []
for perf in self.instrumentperformance_set.all():
if perf.instrument.name not in insts:
insts.append(perf.instrument)
if len(insts) > 0:
return insts[0]
return None
def similar_artists(self):
pass
def collaborating_artists(self):
# Get all recordings
# For each artist on the recordings (both types), add a counter
# top 10 artist ids + the recordings they collaborate on
c = collections.Counter()
recordings = collections.defaultdict(set)
for recording in self.recordings():
for p in recording.performers():
if p.id != self.id:
recordings[p.id].add(recording)
c[p.id] += 1
return [(Artist.objects.get(pk=pk), list(recordings[pk])) for pk, count in c.most_common()]
class ArtistAlias(data.models.ArtistAlias):
pass
class AlbumRecording(models.Model):
""" Links a album to a recording with an explicit ordering """
album = models.ForeignKey('Album', on_delete=models.CASCADE)
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
# The number that the track comes in the album. Numerical 1-n
track = models.IntegerField()
class Meta:
ordering = ("track", )
def __str__(self):
return u"%s: %s from %s" % (self.track, self.recording, self.album)
class Album(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
missing_image = "album.jpg"
mbid = models.UUIDField(blank=True, null=True)
title = models.CharField(max_length=255)
transliterated_title = models.CharField(max_length=255, blank=True)
artists = models.ManyToManyField('Orchestra')
recordings = models.ManyToManyField('Recording', through="AlbumRecording")
director = models.ForeignKey('Artist', null=True, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
viewname = "%s-album" % (self.get_style(), )
return reverse(viewname, args=[self.mbid])
def get_musicbrainz_url(self):
return "http://musicbrainz.org/release/%s" % self.mbid
class Work(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
mbid = models.UUIDField(blank=True, null=True)
title = models.CharField(max_length=255)
transliterated_title = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
class Genre(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
name = models.CharField(max_length=100, blank=True)
transliterated_name = models.CharField(max_length=100, blank=True)
def __str__(self):
return self.name
class RecordingWork(models.Model):
work = models.ForeignKey('Work', on_delete=models.CASCADE)
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
sequence = models.IntegerField()
class Meta:
ordering = ("sequence", )
def __str__(self):
return u"%s: %s" % (self.sequence, self.work.title)
class RecordingManager(models.Manager):
def get_by_natural_key(self, mbid):
return self.get(mbid=mbid)
class Recording(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
mbid = models.UUIDField(blank=True, null=True)
works = models.ManyToManyField("Work", through="RecordingWork")
artists = models.ManyToManyField("Artist", through="InstrumentPerformance")
title = models.CharField(max_length=255)
transliterated_title = models.CharField(max_length=255, blank=True)
length = models.IntegerField(blank=True, null=True)
year = models.IntegerField(blank=True, null=True)
genre = models.ForeignKey('Genre', null=True, on_delete=models.CASCADE)
archive_url = models.CharField(max_length=255)
musescore_url = models.CharField(max_length=255)
poems = models.ManyToManyField("Poem", through="RecordingPoem")
objects = RecordingManager()
def natural_key(self):
return (self.mbid,)
def __str__(self):
return u"%s" % self.title
def performers(self):
return self.artists.all()
def length_format(self):
numsecs = self.length / 1000
minutes = math.floor(numsecs / 60.0)
hours = math.floor(minutes / 60.0)
minutes = math.floor(minutes - hours * 60)
seconds = math.floor(numsecs - hours * 3600 - minutes * 60)
if hours:
val = "%02d:%02d:%02d" % (hours, minutes, seconds)
else:
val = "%02d:%02d" % (minutes, seconds)
return val
def get_dict(self):
release = self.album_set.first()
title = None
if release:
title = release.title
image = "/media/images/noconcert.jpg"
artists = self.artists.values_list('name').all()[:3]
return {
"concert": title,
"mainArtists": [item for sublist in artists for item in sublist],
"name": self.title,
"image": image,
"linkToRecording": reverse("andalusian-recording", args=[self.mbid]),
"collaborators": [],
"selectedArtists": ""
}
class Instrument(AndalusianStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
percussion = models.BooleanField(default=False)
name = models.CharField(max_length=50)
mbid = models.UUIDField(blank=True, null=True)
def __str__(self):
return self.name
class InstrumentPerformance(models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
performer = models.ForeignKey('Artist', on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', on_delete=models.CASCADE)
lead = models.BooleanField(default=False)
def __str__(self):
return u"%s playing %s on %s" % (self.performer, self.instrument, self.recording)
class OrchestraPerformer(models.Model):
orchestra = models.ForeignKey('Orchestra', on_delete=models.CASCADE)
performer = models.ForeignKey('Artist', on_delete=models.CASCADE)
instruments = models.ManyToManyField('Instrument')
director = models.BooleanField(default=False)
begin = models.CharField(max_length=10, blank=True, null=True)
end = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
ret = u"%s played %s on %s" % (self.performer, u", ".join([str(i) for i in self.instruments.all()]), self.orchestra)
if self.director:
ret += u". Moreover, %s acted as the director of this orchestra" % self.performer
if self.begin:
ret += u" from %s" % self.begin
if self.end:
ret += u" until %s" % self.end
return ret
class Tab(data.models.BaseModel):
class Meta:
ordering = ['display_order', 'id']
uuid = models.UUIDField(db_index=True, null=True)
name = models.TextField()
transliterated_name = models.TextField()
# Set to 1 to force some Tab to show at the bottom when listing them
display_order = models.IntegerField(null=False, default=0)
def __str__(self):
return self.name
class Nawba(data.models.BaseModel):
class Meta:
ordering = ['display_order', 'id']
uuid = models.UUIDField(db_index=True, null=True)
name = models.TextField()
transliterated_name = models.TextField()
# Set to 1 to force some Nawba to show at the bottom when listing them
display_order = models.IntegerField(null=False, default=0)
def __str__(self):
return self.name
class Mizan(data.models.BaseModel):
class Meta:
ordering = ['display_order', 'id']
uuid = models.UUIDField(db_index=True, null=True)
name = models.TextField()
transliterated_name = models.TextField()
# Set to 1 to force some Mizan to show at the bottom when listing them
display_order = models.IntegerField(null=False, default=0)
def __str__(self):
return self.name
class Form(data.models.BaseModel):
class Meta:
ordering = ['display_order', 'id']
uuid = models.UUIDField(db_index=True, null=True)
name = models.TextField()
transliterated_name = models.TextField()
# Set to 1 to force some Form to show at the bottom when listing them
display_order = models.IntegerField(null=False, default=0)
def __str__(self):
return self.name
class Section(AndalusianStyle, data.models.BaseModel):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
start_time = models.TimeField(blank=True, null=True)
end_time = models.TimeField(blank=True, null=True)
tab = models.ForeignKey('Tab', blank=True, null=True, on_delete=models.CASCADE)
nawba = models.ForeignKey('Nawba', blank=True, null=True, on_delete=models.CASCADE)
mizan = models.ForeignKey('Mizan', blank=True, null=True, on_delete=models.CASCADE)
form = models.ForeignKey('Form', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return u"Section of %s (from %s to %s), a %s from mizan %s of tab' %s, nawba %s" % \
(self.recording, self.start_time, self.end_time,
self.form, self.mizan, self.tab, self.nawba)
class InstrumentSectionPerformance(models.Model):
section = models.ForeignKey('Section', on_delete=models.CASCADE)
performer = models.ForeignKey('Artist', on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', on_delete=models.CASCADE)
lead = models.BooleanField(default=False)
def __str__(self):
return u"%s playing %s on section (%s, %s) of recording %s" % \
(self.performer, self.instrument, self.section.start_time, self.section.end_time, self.section.recording)
class Sanaa(data.models.BaseModel):
class Meta:
ordering = ['id']
title = models.CharField(max_length=255)
transliterated_title = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.title
class PoemType(models.Model):
type = models.CharField(max_length=50)
def __str__(self):
return self.type
class Poem(data.models.BaseModel):
class Meta:
ordering = ['id']
identifier = models.CharField(max_length=100, blank=True, null=True)
first_words = models.CharField(max_length=255, blank=True, null=True)
transliterated_first_words = models.CharField(max_length=255, blank=True, null=True)
type = models.ForeignKey(PoemType, blank=True, null=True, on_delete=models.CASCADE)
text = models.TextField()
transliterated_text = models.TextField(blank=True)
title = models.CharField(max_length=255, blank=True, null=True)
transliterated_title = models.CharField(max_length=255, blank=True, null=True)
def __str__(self):
return self.identifier
class RecordingPoem(models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
poem = models.ForeignKey('Poem', on_delete=models.CASCADE)
order_number = models.IntegerField(blank=True, null=True)
|
MTG/dunya
|
andalusian/models.py
|
Python
|
agpl-3.0
| 14,269
|
import setuptools
import glob
import os
with open("README.rst", "r") as fh:
long_description = fh.read()
datafiles = [('fixgw/config', ['config/default.yaml', 'config/default.db', 'config/default.ini','config/fg_172.ini']),
('fixgw/config/canfix', ['config/canfix/map.yaml']),
# ('share/fixgw/doc', ['doc/_build/FIXGateway-html.tar.gz', 'doc/_build/latex/FIXGateway.pdf']),
('fixgw/share/fixgw', ['fixgw/plugins/fgfs/fix_fgfs.xml']),
]
setuptools.setup(
name="fixgw",
version="0.1.0",
author="Phil Birkelbach",
author_email="phil@petrasoft.net",
description="FIX-Gateway: Gateway software for the Flight Information eXchange protocols",
long_description=long_description,
#long_description_content_type="text/x-rst",
url="https://github.com/makerplane/FIX-Gateway",
packages=setuptools.find_packages(exclude=["tests.*", "tests"]),
#package_data = {'fixgw':['config/*']},
install_requires = ['pyyaml','python-daemon'],
#data_files = datafiles,
#scripts = ['bin/fixgw', 'bin/fixgwc'],
#package_data= {'fixgw': ["config/default.yaml"]},
include_package_data= True,
entry_points = {
'console_scripts': ['fixgw=fixgw.server:main', 'fixgwc=fixgw.client:main'],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)"
"Operating System :: POSIX :: Linux",
],
test_suite = 'tests',
)
|
birkelbach/FIX-Gateway
|
setup.py
|
Python
|
gpl-2.0
| 1,501
|
# Copyright 2015 Michael Rice <michael@michaelrice.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from steel_pigs.tests import PigTests
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
import steel_pigs.webapp
class TestFlaskApp(PigTests):
@classmethod
def setUpClass(cls):
steel_pigs.webapp._add_server_data(cls._create_entry_data())
def setUp(self):
steel_pigs.webapp.app.config['TESTING'] = True
self.app = steel_pigs.webapp.app.test_client()
def test_hardware_fails_with_412_when_missing_prop(self):
rv = self.app.get("/hardware")
self.assertEqual(rv.status_code, 412)
def test_hardware_serves_proper_ipxe_script(self):
rv = self.app.get("/hardware?manufacturer=Dell%20&product=r%20810")
self.assertIn("r810", rv.data)
def test_versions_json(self):
rv = self.app.get("/versions")
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.content_type, "application/json")
def test_set_operational_status_fails_when_missing_required_args(self):
rv = self.app.get("/update/opstatus")
self.assertEqual(rv.status_code, 412)
def test_set_operational_status_works_as_expected(self):
rv = self.app.get("/update/opstatus?server_number=555121&opstatus=kicking")
self.assertEqual(rv.content_type, "application/json")
data = json.loads(rv.data)
self.assertEqual(data["operation"], "success")
def test_set_boot_os_fails_with_missing_props(self):
rv = self.app.get("/update/os")
self.assertEqual(rv.status_code, 412)
def test_set_boot_os_success_with_valid_params(self):
rv = self.app.get("/update/os?boot_os=Fedora&server_number=555121")
self.assertEqual(rv.content_type, "application/json")
data = json.loads(rv.data)
self.assertEqual(data["operation"], "success")
def test_update_boot_status_fails_with_missing_params(self):
rv = self.app.get("/update/status")
self.assertEqual(rv.status_code, 412)
def test_update_boot_status_success_with_valid_params(self):
rv = self.app.get("/update/status?server_number=555121&boot_status=provision")
self.assertEqual(rv.content_type, "application/json")
data = json.loads(rv.data)
self.assertEqual(data["operation"], "success")
def test_get_versions_ipxe(self):
rv = self.app.get("/versions/ipxe")
self.assertEqual(rv.status_code, 200)
self.assertIn("set latest_version 4", rv.data)
if __name__ == '__main__':
unittest.main()
|
virtdevninja/steel_pigs
|
steel_pigs/tests/test_flask_app.py
|
Python
|
apache-2.0
| 3,200
|
"""
WSGI config for cloudlynt project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudlynt.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
drunken-pypers/cloudlynt
|
cloudlynt/wsgi.py
|
Python
|
mit
| 1,140
|
__version__ = "0.0.1a"
|
kevin-brown/drf-nested-viewsets
|
rest_nested_viewsets/__init__.py
|
Python
|
mit
| 23
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
"""
from __future__ import division, print_function, absolute_import
#TODO:
# * properly implement ``_FillValue``.
# * implement Jeff Whitaker's patch for masked variables.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
#The Scientific.IO.NetCDF API allows attributes to be added directly to
#instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
#between user-set attributes and instance attributes, user-set attributes
#are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
#otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
from operator import mul
from mmap import mmap, ACCESS_READ
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy.lib.six import integer_types
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = { NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8) }
REVERSE = { ('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR }
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w'}, optional
read-write mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`_
for more info.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`_. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
Created for a test
>>> time = f.variables['time']
>>> print(time.units)
days since 2008-01-01
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
>>> f.close()
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
>>> print(f.history)
Created for a test
"""
def __init__(self, filename, mode='r', mmap=None, version=1):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
self.fp = open(self.filename, '%sb' % mode)
if mmap is None:
mmap = True
self.use_mmap = mmap
self.version_byte = version
if not mode in 'rw':
raise ValueError("Mode must be either 'r' or 'w'.")
self.mode = mode
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._attributes = {}
if mode == 'r':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if not self.fp.closed:
try:
self.flush()
finally:
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) #convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode is 'w':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variables non-recs first, then recs. We use a DSU
# since some people use pupynere with Python 2.3.x.
deco = [ (v._shape and not v.isrec, k) for (k, v) in self.variables.items() ]
deco.sort()
variables = [ k for (unused, k) in deco ][::-1]
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([var for var in self.variables.values()
if var.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR),
]
try:
sample = values[0]
except TypeError:
sample = values
for class_, nc_type in types:
if isinstance(sample, class_): break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if not header in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0: begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
data = ndarray.__new__(ndarray, shape, dtype=dtype_,
buffer=mm, offset=begin_, order=0)
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
mm = mmap(self.fp.fileno(), begin+self._recs*self._recsize, access=ACCESS_READ)
rec_array = ndarray.__new__(ndarray, (self._recs,), dtype=dtypes,
buffer=mm, offset=begin, order=0)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,): values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions, attributes=None):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return self.data.shape and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
sargas/scipy
|
scipy/io/netcdf.py
|
Python
|
bsd-3-clause
| 30,912
|
"""helicopter URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles import views
import django.contrib.auth.views
import helicopter.home as home
urlpatterns = [
url(r'^$', home.index, name='home'),
url(r'^login/$', django.contrib.auth.views.login, name='login'),
url(r'^logout/$', django.contrib.auth.views.logout, {'next_page': '/login'}),
url(r'^plans/', include('plans.urls')),
url(r'^admin/', admin.site.urls),
url(r'^i18n/', include('django.conf.urls.i18n')),
]
|
georgesk/helicopter
|
helicopter/urls.py
|
Python
|
gpl-3.0
| 1,175
|
import scrapy
from scrapy_splash import SplashRequest
from w3lib.http import basic_auth_header
class QuotesJs2Spider(scrapy.Spider):
"""Example spider using Splash to render JavaScript-based pages.
Make sure you configure settings.py with your Splash
credentials (available on Scrapy Cloud).
"""
name = 'quotes-js-2'
def start_requests(self):
yield SplashRequest(
'http://quotes.toscrape.com/js',
splash_headers={
'Authorization': basic_auth_header(self.settings['APIKEY'], ''),
},
)
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
'author': quote.css('span small::text').extract_first(),
'tags': quote.css('div.tags a.tag::text').extract(),
}
next_page = response.css('li.next > a::attr(href)').extract_first()
if next_page:
yield SplashRequest(
response.urljoin(next_page),
splash_headers={
'Authorization': basic_auth_header(self.settings['APIKEY'], ''),
},
)
|
fpldataspiders/SPIDERS
|
splash_based_project/splash_based_project/spiders/quotes-js-2.py
|
Python
|
mit
| 1,239
|
# -*- coding: utf-8 -*-
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl
from . import mod111
|
RamonGuiuGou/l10n-spain
|
l10n_es_aeat_mod111/models/__init__.py
|
Python
|
agpl-3.0
| 108
|
#!/usr/bin/python3
"""
run_weatherlight.py Version 1.0
www.henryleach.com
Script to collect weather data from wunderground.com and set an
LED colour and pattern that hopefully gives and intuative impression
for the weather forecast. Frequency and intensity of (white) pulses represent
precipitation, base colour is temperature.
"""
import lampi_lib as ll
import RPi.GPIO as GPIO
import time
import argparse
##command line arguments for user, plus defaults and help details.
parser = argparse.ArgumentParser(description='Script to collect weather data from'
'wunderground.com and set an LED colour and'
'pattern that hopefully gives and intuative impression'
'of the weather forecast. Frequency and intensity of'
'(white) pulses represent precipitation, base colour is temperature'
'on a scale from -30C (green) to +50C (red).'
)
## APIKEY LOCATION -f foretime -r refresh_min
parser.add_argument("apikey", help="wunderground.com 16-char apikey.")
parser.add_argument("location", help="<country(or US state)>/<city>.")
parser.add_argument("-f", "--foretime", default=3, choices=range(12),
help="How far in advance to forecast, hours. Default 3", type=int)
parser.add_argument("-r ", "--refresh", default=30, choices=range(5,59),
help="Refresh time, minutes. Default 30", type=int)
args = parser.parse_args()
print("Options chosen:\n",
"API key:", args.apikey,
"Location:", args.location,".",
"Forcast distance:", args.foretime, "hours."
"Refresh every",args.refresh,"min.")
##pin numbers to match LED legs
##These match GPIO board pins used by LED board.
red_pin = 19
green_pin = 21
blue_pin = 23
run = True #If True, main light and refresh data loop will run.
##Scale for RGB intensity combinations to give the correct
##temperature/colour scale.
temp_scale = [
[-30,-17, -0, 12, 20, 26, 35, 50], #degress C
[ 0, 0, 0,100,255,255,255,255], #Red
[183, 65, 94,100,224,140, 55, 0], #Green
[ 0,178,255,100, 0, 0, 0,197], #Blue
]
##Initialise light object with correct board pins
globe = ll.light(red_pin, green_pin, blue_pin)
##Check forecase time
if args.foretime < 0 or args.foretime > 12:
args.foretime = 2 #set to sensible default
print("Forecast period out of range (1-12 hours). Set to 2 hours.")
globe.colour(255,0,255,1) #flash magenta to tell user.
##check refresh interval. We don't need to check too often, the forecast
##isn't refreshed that quickly.
if args.refresh < 5 or args.refresh > 60:
args.refresh = 15 #set to sensible default
print("Refresh time out of range (10-60 minutes). Set to 15 minutes.")
globe.colour(0,255,255,1) #flash cyan to tell user.
##Can we connect to our website?
if ll.check_connection('http://www.wunderground.com'):
print("Internet connection available.")
globe.colour(0,255,0,1) #green if true
else:
globe.colour(255,0,0,5) #red if can't connect
print("No internet connection.")
run = False #stop the run.
##Main loop. Program can be stopped by a keyboard interrupt (ctrl+c) at the consol.
try:
while run:
##Get weather data
##Download the raw 10 hour forcast JSON and convert to a dict.
raw_weather_data = ll.getUWeather(args.apikey, args.location)
if raw_weather_data == -1:
##There was a terminal error getting the data, so we have to stop.
print("Stopped. Error getting weather data.")
run = False
break
elif raw_weather_data == -2:
##Connection timeout error. This might be temporary...let's wait and try again
##until we get a positive response or a terminal error.
print("Timeout, waiting 5 minutes.")
#pulse dimly for five minutes.
ll.pulse_light(globe, 0, 0, 0, 5, 100, time.time()+300)
else:
##Extract the weather data for the forecase time we're interested in
forecast = ll.extractHourlyUWeather(raw_weather_data, args.foretime)
print("In ",args.foretime," hours the temperature will be: ",forecast['tempC'], "C", sep="")
##Set the intensities to each colour.
r = ll.lin_interp(temp_scale, 1, forecast['tempC'])
g = ll.lin_interp(temp_scale, 2, forecast['tempC'])
b = ll.lin_interp(temp_scale, 3, forecast['tempC'])
##Work out the next time to stop and refresh.
refresh_epoch = ll.next_refresh(args.refresh)
print("Condition: ", forecast['condition'], " with ", forecast['pop'],"% probability.", sep="")
##Set the number of pulses based on rain forecast
pulses, intensity = ll.pulsefreq_fromrain(forecast)
print("Pulses: ", pulses, ", Intensity: ", intensity,"\n", sep="")
##Turn LED on and off smoothly. Run colour and pulsing until
##next refresh time.
ll.ramp(globe, r, g, b, 50)
ll.pulse_light(globe, r, g, b, pulses, intensity, refresh_epoch)
ll.ramp(globe, r, g, b,-50)
globe.colour(0,0,0,0) #make sure the LED is really off during the refresh.
except KeyboardInterrupt:
pass
##Close the connections
globe.shutdown()
##Tidy up anything that might be left
GPIO.cleanup()
|
henryleach/lampi
|
run_weatherlight.py
|
Python
|
mit
| 5,564
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.