code
stringlengths
1
199k
import cv2 import numpy as np import datetime as dt faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') OPENCV_METHODS = { "Correlation": 0, "Chi-Squared": 1, "Intersection": 2, "Hellinger": 3} hist_limit = 0.6 ttl = 1 * 60 q_limit = 3 total_count = 0 prev_count = 0 total_delta = 0 stm = {} q = [] term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) video_capture = cv2.VideoCapture(0) while True: for t in list(stm): # short term memory if (dt.datetime.now() - t).seconds > ttl: stm.pop(t, None) # Capture frame-by-frame ret, frame = video_capture.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE ) count = len(faces) if len(q) >= q_limit: del q[0] q.append(count) isSame = True for c in q: # Protect from fluctuation if c != count: isSame = False if isSame is False: continue max_hist = 0 total_delta = 0 for (x, y, w, h) in faces: # Draw a rectangle around the faces cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) if count == prev_count: continue # set up the ROI face = frame[y: y + h, x: x + w] hsv_roi = cv2.cvtColor(face, cv2.COLOR_BGR2HSV) mask = cv2.inRange(face, np.array((0., 60., 32.)), np.array((180., 255., 255.))) face_hist = cv2.calcHist([face], [0], mask, [180], [0, 180]) cv2.normalize(face_hist, face_hist, 0, 255, cv2.NORM_MINMAX) isFound = False for t in stm: hist_compare = cv2.compareHist(stm[t], face_hist, OPENCV_METHODS["Correlation"]) if hist_compare > max_hist: max_hist = hist_compare if hist_compare >= hist_limit: isFound = True if (len(stm) == 0) or (isFound is False and max_hist > 0): total_delta += 1 stm[dt.datetime.now()] = face_hist if prev_count != count: total_count += total_delta print("", count, " > ", total_count) prev_count = count # Display the resulting frame cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() cv2.destroyAllWindows()
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.home, name='home'), url(r'^clients/$', views.clients, name='clients'), url(r'^clients/(?P<id>\d+)/$', views.client_detail, name='client_detail'), url(r'^clients/new/$', views.client_new, name='client_new'), url(r'^clients/(?P<id>\d+)/edit/$', views.client_edit, name='client_edit'), url(r'^clients/sevices/$', views.clients_services_count, name='clients_services_count'), url(r'^clients/bills/(?P<id>\d+)/$', views.all_clients_bills, name='all_clients_bills'), url(r'^clients/bills/$', views.fresh_clients, name='fresh_clients'), url(r'^clients/del/(?P<id>\d+)/$', views.delete_client, name='delete_client'), url(r'^contracts/$', views.contracts, name='contracts'), url(r'^contracts/(?P<id>\d+)/$', views.contract_detail, name='contract_detail'), url(r'^contracts/new/$', views.contract_new, name='contract_new'), url(r'^contracts/(?P<id>\d+)/edit/$', views.contract_edit, name='contract_edit'), url(r'^contracts/list/(?P<id>\d+)/$', views.all_clients_contracts, name='all_clients_contracts'), url(r'^contracts/list/$', views.contracts_services, name='contracts_services'), url(r'^contracts/del/(?P<id>\d+)/$', views.delete_contract, name='delete_contract'), url(r'^manager/$', views.managers, name='managers'), url(r'^manager/(?P<id>\d+)/$', views.manager_detail, name='manager_detail'), url(r'^manager/new/$', views.manager_new, name='manager_new'), url(r'^manager/(?P<id>\d+)/edit/$', views.manager_edit, name='manager_edit'), url(r'^manager/clients/$', views.managers_clients_count, name='managers_clients_count'), url(r'^managers/del/(?P<id>\d+)/$', views.delete_manager, name='delete_manager'), url(r'^briefs/$', views.brief, name='briefs'), url(r'^briefs/(?P<id>\d+)/$', views.brief_detail, name='brief_detail'), url(r'^briefs/new/$', views.brief_new, name='brief_new'), url(r'^briefs/(?P<id>\d+)/edit/$', views.brief_edit, name='brief_edit'), url(r'^briefs/del/(?P<id>\d+)/$', views.delete_brief, name='delete_brief'), url(r'^briefs/list/(?P<id>\d+)/$', views.all_clients_briefs, name='all_clients_briefs'), url(r'^services/$', views.services, name='services'), url(r'^services/(?P<id>\d+)/$', views.service_detail, name='service_detail'), url(r'^services/new/$', views.services_new, name='services_new'), url(r'^services/(?P<id>\d+)/edit/$', views.service_edit, name='service_edit'), url(r'^services/table/(?P<id>\d+)/$', views.service_all_clients, name='service_all_clients'), url(r'^services/del/(?P<id>\d+)/$', views.delete_service, name='delete_service'), url(r'^contractors/$', views.contractors, name='contractors'), url(r'^contractors/(?P<id>\d+)/$', views.contractor_detail, name='contractor_detail'), url(r'^contractors/new/$', views.contractors_new, name='contractors_new'), url(r'^contractors/(?P<id>\d+)/edit/$', views.contractor_edit, name='contractor_edit'), url(r'^contractors/newest/$', views.newest_contractors, name='newest_contractors'), url(r'^contractors/del/(?P<id>\d+)/$', views.delete_contractor, name='delete_contractor'), url(r'^acts/$', views.acts, name='acts'), url(r'^acts/(?P<id>\d+)/$', views.act_detail, name='act_detail'), url(r'^acts/new/$', views.act_new, name='act_new'), url(r'^acts/(?P<id>\d+)/edit/$', views.act_edit, name='act_edit'), url(r'^acts/del/(?P<id>\d+)/$', views.delete_act, name='delete_act'), url(r'^bills/$', views.bills, name='bills'), url(r'^bills/(?P<id>\d+)/$', views.bills_detail, name='bills_detail'), url(r'^bills/new/$', views.bills_new, name='bills_new'), url(r'^bills/(?P<id>\d+)/edit/$', views.bills_edit, name='bills_edit'), url(r'^bill/del/(?P<id>\d+)/$', views.delete_bill, name='delete_bill'), ]
from __future__ import division, print_function, absolute_import import unittest from .. import common import tempfile import os import platform import numpy as num from pyrocko import util, model from pyrocko.pile import make_pile from pyrocko import config, trace if common.have_gui(): # noqa from pyrocko.gui.qt_compat import qc, qw, use_pyqt5 if use_pyqt5: from PyQt5.QtTest import QTest Qt = qc.Qt else: from PyQt4.QtTest import QTest Qt = qc.Qt from pyrocko.gui.snuffler_app import SnufflerWindow from pyrocko.gui import pile_viewer as pyrocko_pile_viewer from pyrocko.gui import util as gui_util from pyrocko.gui import snuffling class DummySnuffling(snuffling.Snuffling): def setup(self): self.set_name('DummySnuffling') def call(self): figframe = self.figure_frame() ax = figframe.gca() ax.plot([0, 1], [0, 1]) figframe.draw() self.enable_pile_changed_notifications() self.pixmap_frame() try: self.web_frame() except ImportError as e: raise unittest.SkipTest(str(e)) self.get_pile() no_gui = False else: no_gui = True @common.require_gui class GUITest(unittest.TestCase): @classmethod def setUpClass(cls): ''' Create a reusable snuffler instance for all tests cases. ''' super(GUITest, cls).setUpClass() if no_gui: # nosetests runs this even when class is has @skip return from pyrocko.gui import snuffler as sm cls.snuffler = sm.get_snuffler_instance() fpath = common.test_data_file('test2.mseed') p = make_pile(fpath, show_progress=False) cls.win = SnufflerWindow(pile=p, instant_close=True) cls.pile_viewer = cls.win.pile_viewer cls.viewer = cls.win.pile_viewer.viewer pv = cls.pile_viewer cls.main_control_defaults = dict( highpass_control=pv.highpass_control.get_value(), lowpass_control=pv.lowpass_control.get_value(), gain_control=pv.gain_control.get_value(), rot_control=pv.rot_control.get_value()) @classmethod def tearDownClass(cls): ''' Quit snuffler. ''' if no_gui: # nosetests runs this even when class is has @skip return QTest.keyPress(cls.pile_viewer, 'q') def setUp(self): ''' reset GUI ''' for k, v in self.main_control_defaults.items(): getattr(self.pile_viewer, k).set_value(v) self.initial_trange = self.viewer.get_time_range() self.viewer.set_tracks_range( [0, self.viewer.ntracks_shown_max]) self.tempfiles = [] def tearDown(self): self.clear_all_markers() for tempfn in self.tempfiles: os.remove(tempfn) self.viewer.set_time_range(*self.initial_trange) def get_tempfile(self): fh, tempfn = tempfile.mkstemp() os.close(fh) self.tempfiles.append(tempfn) return tempfn def write_to_input_line(self, text): '''emulate writing to inputline and press return''' pv = self.pile_viewer il = pv.inputline QTest.keyPress(pv, ':') QTest.keyClicks(il, text) QTest.keyPress(il, Qt.Key_Return) def clear_all_markers(self): pv = self.pile_viewer QTest.keyPress(pv, 'A', Qt.ShiftModifier, 10) QTest.keyPress(pv, Qt.Key_Backspace) self.assertEqual(len(pv.viewer.get_markers()), 0) def trigger_menu_item(self, qmenu, action_text, dialog=False): ''' trigger a QMenu QAction with action_text. ''' for iaction, action in enumerate(qmenu.actions()): if action.text() == action_text: if dialog: def closeDialog(): dlg = self.snuffler.activeModalWidget() QTest.keyClick(dlg, Qt.Key_Escape) qc.QTimer.singleShot(150, closeDialog) action.trigger() break def get_slider_position(self, slider): style = slider.style() opt = qw.QStyleOptionSlider() return style.subControlRect( qw.QStyle.CC_Slider, opt, qw.QStyle.SC_SliderHandle) def drag_slider(self, slider): ''' Click *slider*, drag from one side to the other, release mouse button repeat to restore inital state''' position = self.get_slider_position(slider) QTest.mouseMove(slider, pos=position.topLeft()) QTest.mousePress(slider, Qt.LeftButton) QTest.mouseMove(slider, pos=position.bottomRight()) QTest.mouseRelease(slider, Qt.LeftButton) QTest.mousePress(slider, Qt.LeftButton) QTest.mouseMove(slider, pos=position.topLeft()) QTest.mouseRelease(slider, Qt.LeftButton) def add_one_pick(self): '''Add a single pick to pile_viewer''' pv = self.pile_viewer QTest.mouseDClick(pv.viewer, Qt.LeftButton) position_tl = pv.pos() geom = pv.frameGeometry() QTest.mouseMove(pv.viewer, pos=position_tl) QTest.mouseMove(pv.viewer, pos=(qc.QPoint( position_tl.x()+geom.x() // 2, position_tl.y()+geom.y() // 2))) # This should be done also by mouseDClick(). QTest.mouseRelease(pv.viewer, Qt.LeftButton) QTest.mouseClick(pv.viewer, Qt.LeftButton) def test_main_control_sliders(self): self.drag_slider(self.pile_viewer.highpass_control.slider) self.drag_slider(self.pile_viewer.lowpass_control.slider) self.drag_slider(self.pile_viewer.gain_control.slider) self.drag_slider(self.pile_viewer.rot_control.slider) def test_inputline(self): initrange = self.viewer.shown_tracks_range self.write_to_input_line('hide W.X.Y.Z') self.write_to_input_line('unhide W.X.Y.Z') self.pile_viewer.update() self.write_to_input_line('hide *') self.pile_viewer.update() assert(self.viewer.shown_tracks_range == (0, 1)) self.write_to_input_line('unhide') assert(self.viewer.shown_tracks_range == initrange) self.write_to_input_line('markers') self.write_to_input_line('markers 4') self.write_to_input_line('markers all') # should error self.write_to_input_line('scaling 1000.') self.write_to_input_line('scaling -1000. 1000.') gotos = ['2015-01-01 00:00:00', '2015-01-01 00:00', '2015-01-01 00', '2015-01-01', '2015-01', '2015'] for gt in gotos: self.write_to_input_line('goto %s' % gt) # test some false input self.write_to_input_line('asdf') QTest.keyPress(self.pile_viewer.inputline, Qt.Key_Escape) def test_drawing_optimization(self): n = 505 lats = num.random.uniform(-90., 90., n) lons = num.random.uniform(-180., 180., n) events = [] for i, (lat, lon) in enumerate(zip(lats, lons)): events.append( model.Event(time=i, lat=lat, lon=lon, name='XXXX%s' % i)) self.viewer.add_event(events[-1]) assert len(self.viewer.markers) == 1 self.viewer.add_events(events) assert len(self.viewer.markers) == n + 1 self.viewer.set_time_range(-500., 5000) self.viewer.set_time_range(0., None) self.viewer.set_time_range(None, 0.) def test_follow(self): self.viewer.follow(10.) self.viewer.unfollow() def test_save_image(self): tempfn_svg = self.get_tempfile() + '.svg' self.viewer.savesvg(fn=tempfn_svg) tempfn_png = self.get_tempfile() + '.png' self.viewer.savesvg(fn=tempfn_png) def test_read_events(self): event = model.Event() tempfn = self.get_tempfile() model.event.dump_events([event], tempfn) self.viewer.read_events(tempfn) def test_add_remove_stations(self): n = 10 lats = num.random.uniform(-90., 90., n) lons = num.random.uniform(-180., 180., n) stations = [ model.station.Station(network=str(i), station=str(i), lat=lat, lon=lon) for i, (lat, lon) in enumerate(zip(lats, lons)) ] tempfn = self.get_tempfile() model.station.dump_stations(stations, tempfn) self.viewer.open_stations(fns=[tempfn]) last = stations[-1] self.assertTrue(self.viewer.has_station(last)) self.viewer.get_station((last.network, last.station)) def test_markers(self): self.add_one_pick() pv = self.pile_viewer self.assertEqual(pv.viewer.get_active_event(), None) conf = config.config('snuffler') # test kinds and phases kinds = range(5) fkey_map = pyrocko_pile_viewer.fkey_map for k in kinds: for fkey, fkey_int in fkey_map.items(): fkey_int += 1 QTest.keyPress(pv, fkey) QTest.keyPress(pv, str(k)) if fkey_int != 10: want = conf.phase_key_mapping.get( "F%s" % fkey_int, 'Undefined') else: want = None m = pv.viewer.get_markers()[0] self.assertEqual(m.kind, k) if want: self.assertEqual(m.get_phasename(), want) def test_load_waveforms(self): self.viewer.load('data', regex=r'\w+.mseed') self.assertFalse(self.viewer.get_pile().is_empty()) def test_add_traces(self): trs = [] for i in range(3): trs.append( trace.Trace(network=str(i), tmin=num.random.uniform(1), ydata=num.random.random(100), deltat=num.random.random()) ) self.viewer.add_traces(trs) def test_event_marker(self): pv = self.pile_viewer self.add_one_pick() # select all markers QTest.keyPress(pv, 'a', Qt.ShiftModifier, 100) # convert to EventMarker QTest.keyPress(pv, 'e') QTest.keyPress(pv, 'd') for m in pv.viewer.get_markers(): self.assertTrue(isinstance(m, gui_util.EventMarker)) def test_load_save_markers(self): nmarkers = 505 times = list(map(util.to_time_float, num.arange(nmarkers))) markers = [gui_util.Marker(tmin=t, tmax=t, nslc_ids=[('*', '*', '*', '*'), ]) for t in times] tempfn = self.get_tempfile() tempfn_selected = self.get_tempfile() self.viewer.add_markers(markers) self.viewer.write_selected_markers( fn=tempfn_selected) self.viewer.write_markers(fn=tempfn) self.viewer.read_markers(fn=tempfn_selected) self.viewer.read_markers(fn=tempfn) for k in 'pnPN': QTest.keyPress(self.pile_viewer, k) self.viewer.go_to_time(-20., 20) self.pile_viewer.update() self.viewer.update() assert(len(self.viewer.markers) != 0) assert(len(self.viewer.markers) == nmarkers * 2) len_before = len(self.viewer.markers) self.viewer.remove_marker( self.viewer.markers[0]) assert(len(self.viewer.markers) == len_before-1) self.viewer.remove_markers(self.viewer.markers) assert(len(self.viewer.markers) == 0) def test_actions(self): # Click through many menu option combinations that do not require # further interaction. Activate options in pairs of two. pv = self.pile_viewer tinit = pv.viewer.tmin tinitlen = pv.viewer.tmax - pv.viewer.tmin non_dialog_actions = [ 'Indivdual Scale', 'Common Scale', 'Common Scale per Station', 'Common Scale per Component', 'Scaling based on Minimum and Maximum', 'Scaling based on Mean +- 2 x Std. Deviation', 'Scaling based on Mean +- 4 x Std. Deviation', 'Sort by Names', 'Sort by Distance', 'Sort by Azimuth', 'Sort by Distance in 12 Azimuthal Blocks', 'Sort by Backazimuth', '3D distances', 'Subsort by Network, Station, Location, Channel', 'Subsort by Network, Station, Channel, Location', 'Subsort by Station, Network, Channel, Location', 'Subsort by Location, Network, Station, Channel', 'Subsort by Channel, Network, Station, Location', 'Subsort by Network, Station, Channel (Grouped by Location)', 'Subsort by Station, Network, Channel (Grouped by Location)', ] dialog_actions = [ 'Open waveform files...', 'Open waveform directory...', 'Open station files...', 'Save markers...', 'Save selected markers...', 'Open marker file...', 'Open event file...', 'Save as SVG|PNG', ] options = [ 'Antialiasing', 'Liberal Fetch Optimization', 'Clip Traces', 'Show Boxes', 'Color Traces', 'Show Scale Ranges', 'Show Scale Axes', 'Show Zero Lines', 'Fix Scale Ranges', 'Allow Downsampling', 'Allow Degapping', 'FFT Filtering', 'Bandpass is Lowpass + Highpass', 'Watch Files', ] # create an event marker and activate it self.add_one_pick() keys = list('mAhefrRh+-fgc?') keys.extend([Qt.Key_PageUp, Qt.Key_PageDown]) def fire_key(x): QTest.keyPress(self.pile_viewer, key) for key in keys: QTest.qWait(100) fire_key(key) event = model.Event() markers = pv.viewer.get_markers() self.assertEqual(len(markers), 1) markers[0]._event = event pv.viewer.set_active_event(event) pv.viewer.set_event_marker_as_origin() right_click_menu = self.viewer.menu for action_text in dialog_actions: self.trigger_menu_item(right_click_menu, action_text, dialog=True) for action_text in non_dialog_actions: for oa in options: for ob in options: self.trigger_menu_item(right_click_menu, action_text) self.trigger_menu_item(right_click_menu, oa) self.trigger_menu_item(right_click_menu, ob) options.remove(oa) self.viewer.go_to_event_by_name(event.name) self.viewer.go_to_time(tinit, tinitlen) @unittest.skipIf( platform.system() != 'Windows' and os.getuid() == 0, 'does not like to run as root') def test_frames(self): frame_snuffling = DummySnuffling() self.viewer.add_snuffling(frame_snuffling) frame_snuffling.call() # close three opened frames QTest.keyPress(self.pile_viewer, 'd') QTest.keyPress(self.pile_viewer, 'd') QTest.keyPress(self.pile_viewer, 'd') if __name__ == '__main__': util.setup_logging('test_gui', 'warning') unittest.main()
class InvalidAge(Exception): def __init__(self,age): self.age = age def validate_age(age): if age < 18: raise InvalidAge(age) else: return "Welcome to the movies!!" age = int(raw_input("please enter your age:")) try: validate_age(age) except InvalidAge as e: print "Buddy!! you are very young at {}!! Grow up a bit.".format(e.age) else: print validate_age(age)
"""A binary to train CIFAR-10 using a single GPU. Accuracy: cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of data) as judged by cifar10_eval.py. Speed: With batch_size 128. System | Step Time (sec/batch) | Accuracy ------------------------------------------------------------------ 1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours) 1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours) Usage: Please see the tutorial and website for how to download the CIFAR-10 data set, compile the program and train the model. http://tensorflow.org/tutorials/deep_cnn/ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from datetime import datetime import time import tensorflow as tf import cifar10 FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel """Number of batches to run.""") tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() # Get images and labels for CIFAR-10. images, labels = cifar10.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = cifar10.inference(images) # Calculate loss. loss = cifar10.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = cifar10.train(loss, global_step) class _LoggerHook(tf.train.SessionRunHook): """Logs loss and runtime.""" def begin(self): self._step = -1 def before_run(self, run_context): self._step += 1 self._start_time = time.time() return tf.train.SessionRunArgs(loss) # Asks for loss value. def after_run(self, run_context, run_values): duration = time.time() - self._start_time loss_value = run_values.results if self._step % 10 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)) with tf.train.MonitoredTrainingSession( checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(train_op) def main(argv=None): # pylint: disable=unused-argument cifar10.maybe_download_and_extract() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) train() if __name__ == '__main__': tf.app.run()
from django.apps import AppConfig class CirculoConfig(AppConfig): name = 'circulo'
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('scoping', '0294_titlevecmodel'), ] operations = [ migrations.AddField( model_name='doc', name='tslug', field=models.TextField(null=True), ), ]
"""autogenerated by genpy from tf2_msgs/FrameGraphRequest.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class FrameGraphRequest(genpy.Message): _md5sum = "d41d8cd98f00b204e9800998ecf8427e" _type = "tf2_msgs/FrameGraphRequest" _has_header = False #flag to mark the presence of a Header object _full_text = """""" __slots__ = [] _slot_types = [] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(FrameGraphRequest, self).__init__(*args, **kwds) def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: pass except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: end = 0 return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: pass except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: end = 0 return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I """autogenerated by genpy from tf2_msgs/FrameGraphResponse.msg. Do not edit.""" import sys python3 = True if sys.hexversion > 0x03000000 else False import genpy import struct class FrameGraphResponse(genpy.Message): _md5sum = "437ea58e9463815a0d511c7326b686b0" _type = "tf2_msgs/FrameGraphResponse" _has_header = False #flag to mark the presence of a Header object _full_text = """string frame_yaml """ __slots__ = ['frame_yaml'] _slot_types = ['string'] def __init__(self, *args, **kwds): """ Constructor. Any message fields that are implicitly/explicitly set to None will be assigned a default value. The recommend use is keyword arguments as this is more robust to future message changes. You cannot mix in-order arguments and keyword arguments. The available fields are: frame_yaml :param args: complete set of field values, in .msg order :param kwds: use keyword arguments corresponding to message field names to set specific fields. """ if args or kwds: super(FrameGraphResponse, self).__init__(*args, **kwds) #message fields cannot be None, assign default values for those that are if self.frame_yaml is None: self.frame_yaml = '' else: self.frame_yaml = '' def _get_types(self): """ internal API method """ return self._slot_types def serialize(self, buff): """ serialize message into buffer :param buff: buffer, ``StringIO`` """ try: _x = self.frame_yaml length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) if python3: buff.write(struct.pack('<I%sB'%length, length, *_x)) else: buff.write(struct.pack('<I%ss'%length, length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): """ unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str`` """ try: end = 0 start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.frame_yaml = str[start:end].decode('utf-8') else: self.frame_yaml = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill def serialize_numpy(self, buff, numpy): """ serialize message with numpy array types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module """ try: _x = self.frame_yaml length = len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) if python3: buff.write(struct.pack('<I%sB'%length, length, *_x)) else: buff.write(struct.pack('<I%ss'%length, length, _x)) except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): """ unpack serialized message in str into this message instance using numpy for array types :param str: byte array of serialized message, ``str`` :param numpy: numpy python module """ try: end = 0 start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length if python3: self.frame_yaml = str[start:end].decode('utf-8') else: self.frame_yaml = str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) #most likely buffer underfill _struct_I = genpy.struct_I class FrameGraph(object): _type = 'tf2_msgs/FrameGraph' _md5sum = '437ea58e9463815a0d511c7326b686b0' _request_class = FrameGraphRequest _response_class = FrameGraphResponse
import re CJDNS_IP_REGEX = re.compile(r'^fc[0-9a-f]{2}(:[0-9a-f]{4}){7}$', re.IGNORECASE) class Node(object): def __init__(self, ip, version=None, label=None): if not valid_cjdns_ip(ip): raise ValueError('Invalid IP address') if not valid_version(version): raise ValueError('Invalid version') self.ip = ip self.version = int(version) self.label = ip[-4:] or label def __lt__(self, b): return self.ip < b.ip def __repr__(self): return 'Node(ip="%s", version=%s, label="%s")' % ( self.ip, self.version, self.label) class Edge(object): def __init__(self, a, b): self.a, self.b = sorted([a, b]) def __eq__(self, that): return self.a.ip == that.a.ip and self.b.ip == that.b.ip def __repr__(self): return 'Edge(a.ip="{}", b.ip="{}")'.format(self.a.ip, self.b.ip) def valid_cjdns_ip(ip): return CJDNS_IP_REGEX.match(ip) def valid_version(version): try: return int(version) < 30 except ValueError: return False
import gettext _ = gettext.gettext from gi.repository import Gtk class Console(Gtk.Window): def __init__(self): super(Console, self).__init__() sw = Gtk.ScrolledWindow() sw.set_policy( Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC ) self.textview = Gtk.TextView() self.textbuffer = self.textview.get_buffer() self.textview.set_editable(False) self.textview.set_wrap_mode(Gtk.WrapMode.WORD) sw.add(self.textview) self.set_title(_('Migasfree Console')) self.set_icon_name('migasfree') self.resize(640, 420) self.set_decorated(True) self.set_border_width(10) self.connect('delete-event', self.on_click_hide) box = Gtk.Box(spacing=6, orientation='vertical') box.pack_start(sw, expand=True, fill=True, padding=0) self.progress = Gtk.ProgressBar() self.progress.set_pulse_step(0.02) progress_box = Gtk.Box(False, 0, orientation='vertical') progress_box.pack_start(self.progress, False, True, 0) box.pack_start(progress_box, expand=False, fill=True, padding=0) self.add(box) def on_timeout(self, user_data): self.progress.pulse() return True def on_click_hide(self, widget, data=None): self.hide() return True
import numpy as np import matplotlib.pyplot as plt import spm1d dataset = spm1d.data.mv1d.cca.Dorn2012() y,x = dataset.get_data() #A:slow, B:fast np.random.seed(0) alpha = 0.05 two_tailed = False snpm = spm1d.stats.nonparam.cca(y, x) snpmi = snpm.inference(alpha, iterations=100) print( snpmi ) spm = spm1d.stats.cca(y, x) spmi = spm.inference(alpha) print( spmi ) plt.close('all') plt.figure(figsize=(10,4)) ax0 = plt.subplot(121) ax1 = plt.subplot(122) labels = 'Parametric', 'Non-parametric' for ax,zi,label in zip([ax0,ax1], [spmi,snpmi], labels): zi.plot(ax=ax) zi.plot_threshold_label(ax=ax, fontsize=8) zi.plot_p_values(ax=ax, size=10) ax.set_title( label ) plt.tight_layout() plt.show()
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Question.order' db.add_column(u'survey_question', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Question.order' db.delete_column(u'survey_question', 'order') models = { u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}) }, u'survey.page': { 'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}) }, u'survey.question': { 'Meta': {'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}) } } complete_apps = ['survey']
from gnuradio import blocks from gnuradio import filter from gnuradio import gr from gnuradio.filter import firdes import grgsm import math class clock_offset_corrector(gr.hier_block2): def __init__(self, fc=936.6e6, ppm=0, samp_rate_in=1625000.0/6.0*4.0): gr.hier_block2.__init__( self, "Clock offset corrector", gr.io_signature(1, 1, gr.sizeof_gr_complex*1), gr.io_signature(1, 1, gr.sizeof_gr_complex*1), ) ################################################## # Parameters ################################################## self.fc = fc self.ppm = ppm self.samp_rate_in = samp_rate_in ################################################## # Variables ################################################## self.samp_rate_out = samp_rate_out = samp_rate_in ################################################## # Blocks ################################################## self.ppm_in = None;self.message_port_register_hier_out("ppm_in") self.gsm_controlled_rotator_cc_0 = grgsm.controlled_rotator_cc(0,samp_rate_out) self.gsm_controlled_const_source_f_0 = grgsm.controlled_const_source_f(ppm) self.fractional_resampler_xx_0 = filter.fractional_resampler_cc(0, samp_rate_in/samp_rate_out) self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff((1.0e-6*samp_rate_in/samp_rate_out, )) self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((fc/samp_rate_out*(2*math.pi)/1e6, )) self.blocks_add_const_vxx_0 = blocks.add_const_vff((samp_rate_in/samp_rate_out, )) ################################################## # Connections ################################################## self.connect((self, 0), (self.fractional_resampler_xx_0, 0)) self.connect((self.fractional_resampler_xx_0, 0), (self.gsm_controlled_rotator_cc_0, 0)) self.connect((self.blocks_add_const_vxx_0, 0), (self.fractional_resampler_xx_0, 1)) self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.blocks_add_const_vxx_0, 0)) self.connect((self.blocks_multiply_const_vxx_0, 0), (self.gsm_controlled_rotator_cc_0, 1)) self.connect((self.gsm_controlled_rotator_cc_0, 0), (self, 0)) self.connect((self.gsm_controlled_const_source_f_0, 0), (self.blocks_multiply_const_vxx_0_0, 0)) self.connect((self.gsm_controlled_const_source_f_0, 0), (self.blocks_multiply_const_vxx_0, 0)) ################################################## # Asynch Message Connections ################################################## self.msg_connect(self, "ppm_in", self.gsm_controlled_const_source_f_0, "constant_msg") def get_fc(self): return self.fc def set_fc(self, fc): self.fc = fc self.blocks_multiply_const_vxx_0.set_k((self.fc/self.samp_rate_out*(2*math.pi)/1e6, )) def get_ppm(self): return self.ppm def set_ppm(self, ppm): self.ppm = ppm self.gsm_controlled_const_source_f_0.set_constant(self.ppm) def get_samp_rate_in(self): return self.samp_rate_in def set_samp_rate_in(self, samp_rate_in): self.samp_rate_in = samp_rate_in self.set_samp_rate_out(self.samp_rate_in) self.fractional_resampler_xx_0.set_resamp_ratio(self.samp_rate_in/self.samp_rate_out) self.blocks_multiply_const_vxx_0_0.set_k((1.0e-6*self.samp_rate_in/self.samp_rate_out, )) self.blocks_add_const_vxx_0.set_k((self.samp_rate_in/self.samp_rate_out, )) def get_samp_rate_out(self): return self.samp_rate_out def set_samp_rate_out(self, samp_rate_out): self.samp_rate_out = samp_rate_out self.blocks_multiply_const_vxx_0.set_k((self.fc/self.samp_rate_out*(2*math.pi)/1e6, )) self.fractional_resampler_xx_0.set_resamp_ratio(self.samp_rate_in/self.samp_rate_out) self.blocks_multiply_const_vxx_0_0.set_k((1.0e-6*self.samp_rate_in/self.samp_rate_out, )) self.gsm_controlled_rotator_cc_0.set_samp_rate(self.samp_rate_out) self.blocks_add_const_vxx_0.set_k((self.samp_rate_in/self.samp_rate_out, ))
import numpy as np mdir = "mesh3d/" fname = "out_p6-p4-p8" print "input mesh data file" f1 = open(mdir+fname+".mesh", 'r') for line in f1: if line.startswith("Vertices"): break pcount = int(f1.next()) xyz = np.empty((pcount, 3), dtype=np.float) for t in range(pcount): xyz[t] = map(float,f1.next().split()[0:3]) for line in f1: if line.startswith("Triangles"): break trisc = int(f1.next()) tris = np.empty((trisc,4), dtype=int) for t in range(trisc): tris[t] = map(int,f1.next().split()) for line in f1: if line.startswith("Tetrahedra"): break tetsc = int(f1.next()) tets = np.empty((tetsc,5), dtype=int) for t in range(tetsc): tets[t] = map(int,f1.next().split()) f1.close() print "identify geometry" ftype = [('v0', np.int),('v1', np.int),('v2', np.int),('label', 'S2')] faces = np.empty(trisc/2, dtype=ftype) for i in range(len(faces)): faces[i] = (tris[2*i][0],tris[2*i][1],tris[2*i][2],str(tris[2*i][3])+str(tris[2*i+1][3])) face_list,face_count = np.unique(faces['label'], return_counts=True) vtype = [('v0', np.int),('v1', np.int),('v2', np.int),('v3', np.int),('label', 'S1')] vols = np.empty(tetsc, dtype=vtype) for i in range(tetsc): vols[i] = (tets[i][0],tets[i][1],tets[i][2],tets[i][3],str(tets[i][4])) vol_list,vol_count = np.unique(vols['label'], return_counts=True) print "output vtk data files for faces" for i, f in enumerate(face_list): f2 = open(mdir+fname+"_"+face_list[i]+".vtk", 'w') f2.write("# vtk DataFile Version 2.0\n") f2.write("mesh data\n") f2.write("ASCII\n") f2.write("DATASET UNSTRUCTURED_GRID\n") f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points! for v in xyz: f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n') f2.write("CELLS "+str(face_count[i])+" "+str(face_count[i]*4)+"\n") for v in faces: if v[3] == f: f2.write("3 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+'\n') f2.write("CELL_TYPES "+str(face_count[i])+"\n") for t in range(face_count[i]): f2.write("5 ") f2.write("\n") f2.close() print "output vtk data files for volumes" for i, f in enumerate(vol_list): f2 = open(mdir+fname+"_"+vol_list[i]+".vtk", 'w') f2.write("# vtk DataFile Version 2.0\n") f2.write("mesh data\n") f2.write("ASCII\n") f2.write("DATASET UNSTRUCTURED_GRID\n") f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points! for v in xyz: f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n') f2.write("CELLS "+str(vol_count[i])+" "+str(vol_count[i]*5)+"\n") for v in vols: if v[4] == f: f2.write("4 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+' '+str(v[3]-1)+'\n') f2.write("CELL_TYPES "+str(vol_count[i])+"\n") for t in range(vol_count[i]): f2.write("10 ") f2.write("\n") f2.close()
from shapely.geometry import Point from geocoon.sql import read_sql from geocoon.core import GeoDataFrame, PointSeries import unittest from unittest import mock class SQLTestCase(unittest.TestCase): """ Test SQL GeoCoon SQL routines. """ @mock.patch('pandas.io.sql.read_sql') def test_read_sql(self, f_sql): """ Test SQL data frame read """ points = Point(1, 1), Point(2, 2), Point(3, 3) data = { 'a': PointSeries([p.wkb for p in points]), 'b': list(range(3)), } data = GeoDataFrame(data) data = data[['a', 'b']] f_sql.return_value = data result = read_sql('query', 'con', geom_col='a') self.assertEqual(PointSeries, type(result.a)) self.assertEqual(Point, type(result.a[0])) self.assertEqual(3, len(result.index)) self.assertTrue(all([1, 2, 3] == result.a.x)) self.assertTrue(all([1, 2, 3] == result.a.y))
import csv import decimal import os import datetime from stocker.common.events import EventStreamNew, EventStockOpen, EventStockClose from stocker.common.orders import OrderBuy, OrderSell from stocker.common.utils import Stream class CompanyProcessor(object): def __init__(self, dirname, company_id): self.dirname = os.path.join(dirname, company_id) self.company_id = company_id def get_dates(self): files = [os.path.splitext(fi)[0] for fi in os.walk(self.dirname).next()[2]] return files def get_row(self, date): filename = os.path.join(self.dirname, date) + ".csv" try: with open(filename, 'r') as f: for row in reversed(list(csv.reader(f, delimiter=';'))): try: desc = row[5] if desc.startswith('TRANSAKCJA'): yield (row, self.company_id) except IndexError: pass except IOError as e: return class Processor(object): def build_stream(self, dirname_in, filename_out): self.stream = Stream() self.stream.begin(filename_out) self.__process_companies(dirname_in) self.stream.end() def __process_companies(self, dirname): companies = [] for company in os.walk(dirname).next()[1]: companies.append(CompanyProcessor(dirname, company)) dates_set = set() for company in companies: dates_set.update(company.get_dates()) dates_ordered = sorted(dates_set, key=lambda date: datetime.datetime.strptime(date, "%Y-%m-%d")) for date in dates_ordered: self.__process_date(date, companies) def __process_date(self, date, companies): rows = [] correct_generators = [] correct_day = False generators = [company.get_row(date) for company in companies] for generator in generators: try: row, company_id = generator.next() row = (company_id, row, generator) rows.append(row) correct_generators.append(generator) except StopIteration as e: pass if correct_generators: # correct day (have transactions) correct_day = True if correct_day: self.stream.add_event(EventStockOpen( datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(9, 0)))) # main loop, multiplexing rows while correct_generators: row_data = min(rows, key=lambda row: datetime.datetime.strptime(row[1][0], "%H:%M:%S")) rows.remove(row_data) company_id, row, generator = row_data self.__process_row(row, date, company_id) try: row, company_id = generator.next() row = (company_id, row, generator) rows.append(row) except StopIteration as e: correct_generators.remove(generator) if correct_day: self.stream.add_event(EventStockClose( datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(18, 0)))) def __process_row(self, row, date, company_id): amount = int(row[3]) limit_price = decimal.Decimal(row[1].replace(',', '.')) timestamp = datetime.datetime.strptime("%s %s" % (date, row[0]), "%Y-%m-%d %H:%M:%S") expiration_date = timestamp + datetime.timedelta(days=1) self.stream.add_event( EventStreamNew(timestamp, OrderBuy(company_id, amount, limit_price, expiration_date))) self.stream.add_event( EventStreamNew(timestamp, OrderSell(company_id, amount, limit_price, expiration_date)))
import sys sys.path.append('../') from toolbox.hreaders import token_readers as reader from toolbox.hreducers import list_reducer as reducer SOLO_FACTURA = False def reduction(x,y): v1 = x.split(',') v2 = y.split(',') r = x if int(v1[1])>=int(v2[1]) else y return r _reader = reader.Token_reader("\t",1) _reducer = reducer.List_reducer(reduction) #x: previous reduction result, y: next element if SOLO_FACTURA: for line in sys.stdin: key, value = _reader.read_all(line) K,V = _reducer.reduce(key,value) if K: print '{}\t{}'.format(V.split(',')[0],V.split(',')[1]) V = _reducer.out.split(',') print '{}\t{}'.format(V[0],V[1]) else: for line in sys.stdin: key, value = _reader.read_all(line) K,V = _reducer.reduce(key,value) if K: print '{}\t{}'.format(K,V) print '{}\t{}'.format(key,V)
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak from reportlab.lib.styles import getSampleStyleSheet from reportlab.rl_config import defaultPageSize from reportlab.lib.units import cm import operator import os import ConfigParser import string config = ConfigParser.ConfigParser() config.read(os.environ["HOME"] + "/.abook/addressbook") config.remove_section('format') PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0] styles = getSampleStyleSheet() buchstabe = "A" Title = "Hello world" pageinfo = "platypus example" def Pages(canvas, doc): canvas.saveState() canvas.restoreState() def go(buchstabe): doc = SimpleDocTemplate("phello.pdf") Story = [] style = styles["Normal"] addresses=[] for s in config.sections(): nb="" ub="" mb="" if config.has_option(s,'name'): nb = "<b>" + config.get(s,'name') + "</b><br/>" worte=config.get(s,'name').split() print len(worte) if len(worte)<2: nachname=worte[0] else: nachname=worte[1] anfangsbuchstabe=nachname[0:1] if anfangsbuchstabe!=buchstabe: buchstabe=anfangsbuchstabe print buchstabe p = Table(addresses) p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"), ('ALIGN',(0,-1),(0,-1),'RIGHT')])) Story.append(p) Story.append(PageBreak()) addresses=[] if config.has_option(s,'address'): nb = nb + config.get(s,'address') + "<br/>" if config.has_option(s,'zip'): nb = nb + config.get(s,'zip') + " " if config.has_option(s,'city'): nb = nb + config.get(s,'city') + "<br/>" if config.has_option(s,'state'): nb = nb + config.get(s,'state') + " - " if config.has_option(s,'country'): nb = nb + config.get(s,'country') + "<br/>" nb = nb +"<br/>" if config.has_option(s,'phone'): ub= "Fon: " + config.get(s,'phone') + "<br/>" if config.has_option(s,'mobile'): ub= ub + "Mobi: " + config.get(s,'mobile') + "<br/>" if config.has_option(s,'email'): ub= ub + config.get(s,'email').replace(',','<br/>') + "<br/>" ub=ub+"<br/>" if config.has_option(s,'custom3'): mb= config.get(s,'custom3') + "<br/>" mb=mb+"<br/>" nameblock = Paragraph(nb,style) numberblock = Paragraph(ub,style) middleblock = Paragraph(mb,style) addresses.append([nameblock,middleblock,numberblock]) p = Table(addresses) p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"), ('ALIGN',(0,-1),(0,-1),'RIGHT')])) Story.append(p) doc.build(Story, onFirstPage=Pages, onLaterPages=Pages) go(buchstabe)
r""" csection.py -- Create a tree of contents, organized by sections and inside sections the exercises unique_name. AUTHOR: - Pedro Cruz (2012-01): initial version - Pedro Cruz (2016-03): improvment for smc An exercise could contain um its %summary tag line a description of section in form:: %sumary section descriptive text; subsection descriptive text; etc The class transform contents of some MegUA database into a tree of sections specifying exercises as leaves. Then, this tree can be flushed out to some file or output system. STRUTURE SAMPLE:: contents -> { 'Section1': Section('Section1',0), 'Section2': Section('Section2',0) } For each Section object see below in this file. A brief description is: * a SectionClassifier is the "book" made with keys (chapter names) that are keys of a dictionary. * SectionClassifier is a dictionary: keys are the chapter names and the values are Section objects. * a Section object is defined by * a name (the key of the SectionClassifiers appears again in sec_name) * level (0 if it is top level sections: chapters, and so on) * a list of exercises beloging to the section and * a dictionary of subsections (again Section objects) * Section = (sec_name, level, [list of exercises names], dict( subsections ) ) EXAMPLES: Test with: :: sage -t csection.py Create or edit a database: :: sage: from megua.megbook import MegBook sage: meg = MegBook(r'_input/csection.sqlite') Save a new or changed exercise :: sage: txt=r''' ....: %Summary Primitives; Imediate primitives; Trigonometric ....: ....: Here, is a summary. ....: ....: %Problem Some Name ....: What is the primitive of $a x + b@()$ ? ....: ....: %Answer ....: The answer is $prim+C$, for $C in \mathbb{R}$. ....: ....: class E28E28_pimtrig_001(ExerciseBase): ....: pass ....: ''' sage: meg.save(txt) ------------------------------- Instance of: E28E28_pimtrig_001 ------------------------------- ==> Summary: Here, is a summary. ==> Problem instance What is the primitive of $a x + b$ ? ==> Answer instance The answer is $prim+C$, for $C in \mathbb{R}$. sage: txt=r''' ....: %Summary Primitives; Imediate primitives; Trigonometric ....: ....: Here, is a summary. ....: ....: %Problem Some Name2 ....: What is the primitive of $a x + b@()$ ? ....: ....: %Answer ....: The answer is $prim+C$, for $C in \mathbb{R}$. ....: ....: class E28E28_pimtrig_002(ExerciseBase): ....: pass ....: ''' sage: meg.save(txt) ------------------------------- Instance of: E28E28_pimtrig_002 ------------------------------- ==> Summary: Here, is a summary. ==> Problem instance What is the primitive of $a x + b$ ? ==> Answer instance The answer is $prim+C$, for $C in \mathbb{R}$. sage: txt=r''' ....: %Summary Primitives; Imediate primitives; Polynomial ....: ....: Here, is a summary. ....: ....: %Problem Some Problem 1 ....: What is the primitive of $a x + b@()$ ? ....: ....: %Answer ....: The answer is $prim+C$, for $C in \mathbb{R}$. ....: ....: class E28E28_pdirect_001(ExerciseBase): ....: pass ....: ''' sage: meg.save(txt) ------------------------------- Instance of: E28E28_pdirect_001 ------------------------------- ==> Summary: Here, is a summary. ==> Problem instance What is the primitive of $a x + b$ ? ==> Answer instance The answer is $prim+C$, for $C in \mathbb{R}$. sage: txt=r''' ....: %Summary ....: ....: Here, is a summary. ....: ....: %Problem ....: What is the primitive of $a x + b@()$ ? ....: ....: %Answer ....: The answer is $prim+C$, for $C in \mathbb{R}$. ....: ....: class E28E28_pdirect_003(ExerciseBase): ....: pass ....: ''' sage: meg.save(txt) Each exercise can belong to a section/subsection/subsubsection. Write sections using ';' in the '%summary' line. For ex., '%summary Section; Subsection; Subsubsection'. <BLANKLINE> Each problem can have a suggestive name. Write in the '%problem' line a name, for ex., '%problem The Fish Problem'. <BLANKLINE> Check exercise E28E28_pdirect_003 for the above warnings. ------------------------------- Instance of: E28E28_pdirect_003 ------------------------------- ==> Summary: Here, is a summary. ==> Problem instance What is the primitive of $a x + b$ ? ==> Answer instance The answer is $prim+C$, for $C in \mathbb{R}$. Travel down the tree sections: :: sage: s = SectionClassifier(meg.megbook_store) sage: s.textprint() Primitives Imediate primitives Polynomial > E28E28_pdirect_001 Trigonometric > E28E28_pimtrig_001 > E28E28_pimtrig_002 E28E28_pdirect > E28E28_pdirect_003 Testing a recursive iterator: :: sage: meg = MegBook("_input/paula.sqlite") sage: s = SectionClassifier(meg.megbook_store) sage: for section in s.section_iterator(): ....: print section """ import collections from megua.localstore import ExIter class SectionClassifier: """ """ def __init__(self,megbook_store,max_level=4,debug=False,exerset=None): #save megstore reference self.megbook_store = megbook_store self.max_level = max_level #Exercise set or none for all self.exercise_set = exerset #dictionary of sections self.contents = dict() self.classify() def classify(self): """ Classify by sections. """ for row in ExIter(self.megbook_store): if self.exercise_set and not row['unique_name'] in self.exercise_set: continue #get a list in form ["section", "subsection", "subsubsection", ...] sec_list = str_to_list(row['sections_text']) if sec_list == [] or sec_list == [u'']: sec_list = [ first_part(row['unique_name']) ] #sec_list contain at least one element. if not sec_list[0] in self.contents: self.contents[sec_list[0]] = Section(sec_list[0]) #sec_list contains less than `max_level` levels subsec_list = sec_list[1:self.max_level] self.contents[sec_list[0]].add(row['unique_name'],subsec_list) def textprint(self): """ Textual print of all the contents. """ for c in self.contents: self.contents[c].textprint() def section_iterator(self): r""" OUTPUT: - an iterator yielding (secname, sorted exercises) """ # A stack-based alternative to the traverse_tree method above. od_top = collections.OrderedDict(sorted(self.contents.items())) stack = [] for secname,section in od_top.iteritems(): stack.append(section) while stack: section_top = stack.pop(0) #remove left element yield section_top od_sub = collections.OrderedDict(sorted(section_top.subsections.items())) desc = [] for secname,section in od_sub.iteritems(): desc.append(section) stack[:0] = desc #add elemnts from desc list at left (":0") class Section: r""" Section = (sec_name, level, [list of exercises names], dict( subsections ) ) """ def __init__(self,sec_name,level=0): self.sec_name = sec_name self.level = level #Exercises of this section (self). self.exercises = [] #This section (self) can have subsections. self.subsections = dict() def __str__(self): return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises)) def __repr__(self): return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises)) def add(self,exname,sections): r""" Recursive function to add an exercise to """ if sections == []: self.exercises.append(exname) self.exercises.sort() return if not sections[0] in self.subsections: self.subsections[sections[0]] = Section(sections[0],self.level+1) self.subsections[sections[0]].add(exname,sections[1:]) def textprint(self): """ Textual print of the contents of this section and, recursivly, of the subsections. """ sp = " "*self.level print sp + self.sec_name for e in self.exercises: print sp+r"> "+e for sub in self.subsections: self.subsections[sub].textprint() def str_to_list(s): """ Convert:: 'section description; subsection description; subsubsection description' into:: [ 'section description', 'subsection description', 'subsubsection description'] """ sl = s.split(';') for i in range(len(sl)): sl[i] = sl[i].strip() return sl def first_part(s): """ Usually exercise are named like `E12X34_name_001` and this routine extracts `E12X34` or `top` if no underscore is present. """ p = s.find("_") p = s.find("_",p+1) if p!=-1: s = s[:p] if s=='': s = 'top' return s
import struct import re import time import logging from chirp import chirp_common, errors, util, memmap from chirp.settings import RadioSetting, RadioSettingGroup, \ RadioSettingValueBoolean, RadioSettings LOG = logging.getLogger(__name__) CMD_CLONE_OUT = 0xE2 CMD_CLONE_IN = 0xE3 CMD_CLONE_DAT = 0xE4 CMD_CLONE_END = 0xE5 SAVE_PIPE = None class IcfFrame: """A single ICF communication frame""" src = 0 dst = 0 cmd = 0 payload = "" def __str__(self): addrs = {0xEE: "PC", 0xEF: "Radio"} cmds = {0xE0: "ID", 0xE1: "Model", 0xE2: "Clone out", 0xE3: "Clone in", 0xE4: "Clone data", 0xE5: "Clone end", 0xE6: "Clone result"} return "%s -> %s [%s]:\n%s" % (addrs[self.src], addrs[self.dst], cmds[self.cmd], util.hexprint(self.payload)) def __init__(self): pass def parse_frame_generic(data): """Parse an ICF frame of unknown type from the beginning of @data""" frame = IcfFrame() frame.src = ord(data[2]) frame.dst = ord(data[3]) frame.cmd = ord(data[4]) try: end = data.index("\xFD") except ValueError: return None, data frame.payload = data[5:end] return frame, data[end+1:] class RadioStream: """A class to make reading a stream of IcfFrames easier""" def __init__(self, pipe): self.pipe = pipe self.data = "" def _process_frames(self): if not self.data.startswith("\xFE\xFE"): LOG.error("Out of sync with radio:\n%s" % util.hexprint(self.data)) raise errors.InvalidDataError("Out of sync with radio") elif len(self.data) < 5: return [] # Not enough data for a full frame frames = [] while self.data: try: cmd = ord(self.data[4]) except IndexError: break # Out of data try: frame, rest = parse_frame_generic(self.data) if not frame: break elif frame.src == 0xEE and frame.dst == 0xEF: # PC echo, ignore pass else: frames.append(frame) self.data = rest except errors.InvalidDataError, e: LOG.error("Failed to parse frame (cmd=%i): %s" % (cmd, e)) return [] return frames def get_frames(self, nolimit=False): """Read any pending frames from the stream""" while True: _data = self.pipe.read(64) if not _data: break else: self.data += _data if not nolimit and len(self.data) > 128 and "\xFD" in self.data: break # Give us a chance to do some status if len(self.data) > 1024: break # Avoid an endless loop of chewing garbage if not self.data: return [] return self._process_frames() def get_model_data(radio, mdata="\x00\x00\x00\x00"): """Query the @radio for its model data""" send_clone_frame(radio, 0xe0, mdata, raw=True) stream = RadioStream(radio.pipe) frames = stream.get_frames() if len(frames) != 1: raise errors.RadioError("Unexpected response from radio") return frames[0].payload def get_clone_resp(pipe, length=None, max_count=None): """Read the response to a clone frame""" def exit_criteria(buf, length, cnt, max_count): """Stop reading a clone response if we have enough data or encounter the end of a frame""" if max_count is not None: if cnt >= max_count: return True if length is None: return buf.endswith("\xfd") else: return len(buf) == length resp = "" cnt = 0 while not exit_criteria(resp, length, cnt, max_count): resp += pipe.read(1) cnt += 1 return resp def send_clone_frame(radio, cmd, data, raw=False, checksum=False): """Send a clone frame with @cmd and @data to the @radio""" payload = radio.get_payload(data, raw, checksum) frame = "\xfe\xfe\xee\xef%s%s\xfd" % (chr(cmd), payload) if SAVE_PIPE: LOG.debug("Saving data...") SAVE_PIPE.write(frame) # LOG.debug("Sending:\n%s" % util.hexprint(frame)) # LOG.debug("Sending:\n%s" % util.hexprint(hed[6:])) if cmd == 0xe4: # Uncomment to avoid cloning to the radio # return frame pass radio.pipe.write(frame) if radio.MUNCH_CLONE_RESP: # Do max 2*len(frame) read(1) calls get_clone_resp(radio.pipe, max_count=2*len(frame)) return frame def process_data_frame(radio, frame, _mmap): """Process a data frame, adding the payload to @_mmap""" _data = radio.process_frame_payload(frame.payload) # Checksum logic added by Rick DeWitt, 9/2019, issue # 7075 if len(_mmap) >= 0x10000: # This map size not tested for checksum saddr, = struct.unpack(">I", _data[0:4]) length, = struct.unpack("B", _data[4]) data = _data[5:5+length] sumc, = struct.unpack("B", _data[5+length]) addr1, = struct.unpack("B", _data[0]) addr2, = struct.unpack("B", _data[1]) addr3, = struct.unpack("B", _data[2]) addr4, = struct.unpack("B", _data[3]) else: # But this one has been tested for raw mode radio (IC-2730) saddr, = struct.unpack(">H", _data[0:2]) length, = struct.unpack("B", _data[2]) data = _data[3:3+length] sumc, = struct.unpack("B", _data[3+length]) addr1, = struct.unpack("B", _data[0]) addr2, = struct.unpack("B", _data[1]) addr3 = 0 addr4 = 0 cs = addr1 + addr2 + addr3 + addr4 + length for byte in data: cs += ord(byte) vx = ((cs ^ 0xFFFF) + 1) & 0xFF if sumc != vx: LOG.error("Bad checksum in address %04X frame: %02x " "calculated, %02x sent!" % (saddr, vx, sumc)) raise errors.InvalidDataError( "Checksum error in download! " "Try disabling High Speed Clone option in Settings.") try: _mmap[saddr] = data except IndexError: LOG.error("Error trying to set %i bytes at %05x (max %05x)" % (bytes, saddr, len(_mmap))) return saddr, saddr + length def start_hispeed_clone(radio, cmd): """Send the magic incantation to the radio to go fast""" buf = ("\xFE" * 20) + \ "\xEE\xEF\xE8" + \ radio.get_model() + \ "\x00\x00\x02\x01\xFD" LOG.debug("Starting HiSpeed:\n%s" % util.hexprint(buf)) radio.pipe.write(buf) radio.pipe.flush() resp = radio.pipe.read(128) LOG.debug("Response:\n%s" % util.hexprint(resp)) LOG.info("Switching to 38400 baud") radio.pipe.baudrate = 38400 buf = ("\xFE" * 14) + \ "\xEE\xEF" + \ chr(cmd) + \ radio.get_model()[:3] + \ "\x00\xFD" LOG.debug("Starting HiSpeed Clone:\n%s" % util.hexprint(buf)) radio.pipe.write(buf) radio.pipe.flush() def _clone_from_radio(radio): md = get_model_data(radio) if md[0:4] != radio.get_model(): LOG.info("This model: %s" % util.hexprint(md[0:4])) LOG.info("Supp model: %s" % util.hexprint(radio.get_model())) raise errors.RadioError("I can't talk to this model") if radio.is_hispeed(): start_hispeed_clone(radio, CMD_CLONE_OUT) else: send_clone_frame(radio, CMD_CLONE_OUT, radio.get_model(), raw=True) LOG.debug("Sent clone frame") stream = RadioStream(radio.pipe) addr = 0 _mmap = memmap.MemoryMap(chr(0x00) * radio.get_memsize()) last_size = 0 while True: frames = stream.get_frames() if not frames: break for frame in frames: if frame.cmd == CMD_CLONE_DAT: src, dst = process_data_frame(radio, frame, _mmap) if last_size != (dst - src): LOG.debug("ICF Size change from %i to %i at %04x" % (last_size, dst - src, src)) last_size = dst - src if addr != src: LOG.debug("ICF GAP %04x - %04x" % (addr, src)) addr = dst elif frame.cmd == CMD_CLONE_END: LOG.debug("End frame (%i):\n%s" % (len(frame.payload), util.hexprint(frame.payload))) LOG.debug("Last addr: %04x" % addr) if radio.status_fn: status = chirp_common.Status() status.msg = "Cloning from radio" status.max = radio.get_memsize() status.cur = addr radio.status_fn(status) return _mmap def clone_from_radio(radio): """Do a full clone out of the radio's memory""" try: return _clone_from_radio(radio) except Exception, e: raise errors.RadioError("Failed to communicate with the radio: %s" % e) def send_mem_chunk(radio, start, stop, bs=32): """Send a single chunk of the radio's memory from @start-@stop""" _mmap = radio.get_mmap() status = chirp_common.Status() status.msg = "Cloning to radio" status.max = radio.get_memsize() for i in range(start, stop, bs): if i + bs < stop: size = bs else: size = stop - i if radio.get_memsize() >= 0x10000: chunk = struct.pack(">IB", i, size) else: chunk = struct.pack(">HB", i, size) chunk += _mmap[i:i+size] send_clone_frame(radio, CMD_CLONE_DAT, chunk, raw=False, checksum=True) if radio.status_fn: status.cur = i+bs radio.status_fn(status) return True def _clone_to_radio(radio): global SAVE_PIPE # Uncomment to save out a capture of what we actually write to the radio # SAVE_PIPE = file("pipe_capture.log", "w", 0) md = get_model_data(radio) if md[0:4] != radio.get_model(): raise errors.RadioError("I can't talk to this model") # This mimics what the Icom software does, but isn't required and just # takes longer # md = get_model_data(radio, mdata=md[0:2]+"\x00\x00") # md = get_model_data(radio, mdata=md[0:2]+"\x00\x00") stream = RadioStream(radio.pipe) if radio.is_hispeed(): start_hispeed_clone(radio, CMD_CLONE_IN) else: send_clone_frame(radio, CMD_CLONE_IN, radio.get_model(), raw=True) frames = [] for start, stop, bs in radio.get_ranges(): if not send_mem_chunk(radio, start, stop, bs): break frames += stream.get_frames() send_clone_frame(radio, CMD_CLONE_END, radio.get_endframe(), raw=True) if SAVE_PIPE: SAVE_PIPE.close() SAVE_PIPE = None for i in range(0, 10): try: frames += stream.get_frames(True) result = frames[-1] except IndexError: LOG.debug("Waiting for clone result...") time.sleep(0.5) if len(frames) == 0: raise errors.RadioError("Did not get clone result from radio") return result.payload[0] == '\x00' def clone_to_radio(radio): """Initiate a full memory clone out to @radio""" try: return _clone_to_radio(radio) except Exception, e: logging.exception("Failed to communicate with the radio") raise errors.RadioError("Failed to communicate with the radio: %s" % e) def convert_model(mod_str): """Convert an ICF-style model string into what we get from the radio""" data = "" for i in range(0, len(mod_str), 2): hexval = mod_str[i:i+2] intval = int(hexval, 16) data += chr(intval) return data def convert_data_line(line): """Convert an ICF data line to raw memory format""" if line.startswith("#"): return "" line = line.strip() if len(line) == 38: # Small memory (< 0x10000) size = int(line[4:6], 16) data = line[6:] else: # Large memory (>= 0x10000) size = int(line[8:10], 16) data = line[10:] _mmap = "" i = 0 while i < (size * 2): try: val = int("%s%s" % (data[i], data[i+1]), 16) i += 2 _mmap += struct.pack("B", val) except ValueError, e: LOG.debug("Failed to parse byte: %s" % e) break return _mmap def read_file(filename): """Read an ICF file and return the model string and memory data""" f = file(filename) mod_str = f.readline() dat = f.readlines() model = convert_model(mod_str.strip()) _mmap = "" for line in dat: if not line.startswith("#"): _mmap += convert_data_line(line) return model, memmap.MemoryMap(_mmap) def is_9x_icf(filename): """Returns True if @filename is an IC9x ICF file""" f = file(filename) mdata = f.read(8) f.close() return mdata in ["30660000", "28880000"] def is_icf_file(filename): """Returns True if @filename is an ICF file""" f = file(filename) data = f.readline() data += f.readline() f.close() data = data.replace("\n", "").replace("\r", "") return bool(re.match("^[0-9]{8}#", data)) class IcomBank(chirp_common.Bank): """A bank that works for all Icom radios""" # Integral index of the bank (not to be confused with per-memory # bank indexes index = 0 class IcomNamedBank(IcomBank): """A bank with an adjustable name""" def set_name(self, name): """Set the name of the bank""" pass class IcomBankModel(chirp_common.BankModel): """Icom radios all have pretty much the same simple bank model. This central implementation can, with a few icom-specific radio interfaces serve most/all of them""" def get_num_mappings(self): return self._radio._num_banks def get_mappings(self): banks = [] for i in range(0, self._radio._num_banks): index = chr(ord("A") + i) bank = self._radio._bank_class(self, index, "BANK-%s" % index) bank.index = i banks.append(bank) return banks def add_memory_to_mapping(self, memory, bank): self._radio._set_bank(memory.number, bank.index) def remove_memory_from_mapping(self, memory, bank): if self._radio._get_bank(memory.number) != bank.index: raise Exception("Memory %i not in bank %s. Cannot remove." % (memory.number, bank)) self._radio._set_bank(memory.number, None) def get_mapping_memories(self, bank): memories = [] for i in range(*self._radio.get_features().memory_bounds): if self._radio._get_bank(i) == bank.index: memories.append(self._radio.get_memory(i)) return memories def get_memory_mappings(self, memory): index = self._radio._get_bank(memory.number) if index is None: return [] else: return [self.get_mappings()[index]] class IcomIndexedBankModel(IcomBankModel, chirp_common.MappingModelIndexInterface): """Generic bank model for Icom radios with indexed banks""" def get_index_bounds(self): return self._radio._bank_index_bounds def get_memory_index(self, memory, bank): return self._radio._get_bank_index(memory.number) def set_memory_index(self, memory, bank, index): if bank not in self.get_memory_mappings(memory): raise Exception("Memory %i is not in bank %s" % (memory.number, bank)) if index not in range(*self._radio._bank_index_bounds): raise Exception("Invalid index") self._radio._set_bank_index(memory.number, index) def get_next_mapping_index(self, bank): indexes = [] for i in range(*self._radio.get_features().memory_bounds): if self._radio._get_bank(i) == bank.index: indexes.append(self._radio._get_bank_index(i)) for i in range(0, 256): if i not in indexes: return i raise errors.RadioError("Out of slots in this bank") def compute_checksum(data): cs = 0 for byte in data: cs += ord(byte) return ((cs ^ 0xFFFF) + 1) & 0xFF class IcomCloneModeRadio(chirp_common.CloneModeRadio): """Base class for Icom clone-mode radios""" VENDOR = "Icom" BAUDRATE = 9600 # Ideally, the driver should read clone response after each clone frame # is sent, but for some reason it hasn't behaved this way for years. # So not to break the existing tested drivers the MUNCH_CLONE_RESP flag # was added. It's False by default which brings the old behavior, # i.e. clone response is not read. The expectation is that new Icom # drivers will use MUNCH_CLONE_RESP = True and old drivers will be # gradually migrated to this. Once all Icom drivers will use # MUNCH_CLONE_RESP = True, this flag will be removed. MUNCH_CLONE_RESP = False _model = "\x00\x00\x00\x00" # 4-byte model string _endframe = "" # Model-unique ending frame _ranges = [] # Ranges of the mmap to send to the radio _num_banks = 10 # Most simple Icoms have 10 banks, A-J _bank_index_bounds = (0, 99) _bank_class = IcomBank _can_hispeed = False @classmethod def is_hispeed(cls): """Returns True if the radio supports hispeed cloning""" return cls._can_hispeed @classmethod def get_model(cls): """Returns the Icom model data for this radio""" return cls._model @classmethod def get_endframe(cls): """Returns the magic clone end frame for this radio""" return cls._endframe @classmethod def get_ranges(cls): """Returns the ranges this radio likes to have in a clone""" return cls._ranges def process_frame_payload(self, payload): """Convert BCD-encoded data to raw""" bcddata = payload data = "" i = 0 while i+1 < len(bcddata): try: val = int("%s%s" % (bcddata[i], bcddata[i+1]), 16) i += 2 data += struct.pack("B", val) except ValueError, e: LOG.error("Failed to parse byte: %s" % e) break return data def get_payload(self, data, raw, checksum): """Returns the data with optional checksum BCD-encoded for the radio""" if raw: return data payload = "" for byte in data: payload += "%02X" % ord(byte) if checksum: payload += "%02X" % compute_checksum(data) return payload def sync_in(self): self._mmap = clone_from_radio(self) self.process_mmap() def sync_out(self): clone_to_radio(self) def get_bank_model(self): rf = self.get_features() if rf.has_bank: if rf.has_bank_index: return IcomIndexedBankModel(self) else: return IcomBankModel(self) else: return None # Icom-specific bank routines def _get_bank(self, loc): """Get the integral bank index of memory @loc, or None""" raise Exception("Not implemented") def _set_bank(self, loc, index): """Set the integral bank index of memory @loc to @index, or no bank if None""" raise Exception("Not implemented") def get_settings(self): return make_speed_switch_setting(self) def set_settings(self, settings): return honor_speed_switch_setting(self, settings) def flip_high_order_bit(data): return [chr(ord(d) ^ 0x80) for d in list(data)] def escape_raw_byte(byte): """Escapes a raw byte for sending to the radio""" # Certain bytes are used as control characters to the radio, so if one of # these bytes is present in the stream to the radio, it gets escaped as # 0xff followed by (byte & 0x0f) if ord(byte) > 0xf9: return "\xff%s" % (chr(ord(byte) & 0xf)) return byte def unescape_raw_bytes(escaped_data): """Unescapes raw bytes from the radio.""" data = "" i = 0 while i < len(escaped_data): byte = escaped_data[i] if byte == '\xff': if i + 1 >= len(escaped_data): raise errors.InvalidDataError( "Unexpected escape character at end of data") i += 1 byte = chr(0xf0 | ord(escaped_data[i])) data += byte i += 1 return data class IcomRawCloneModeRadio(IcomCloneModeRadio): """Subclass for Icom clone-mode radios using the raw data protocol.""" def process_frame_payload(self, payload): """Payloads from a raw-clone-mode radio are already in raw format.""" return unescape_raw_bytes(payload) def get_payload(self, data, raw, checksum): """Returns the data with optional checksum in raw format.""" if checksum: cs = chr(compute_checksum(data)) else: cs = "" payload = "%s%s" % (data, cs) # Escape control characters. escaped_payload = [escape_raw_byte(b) for b in payload] return "".join(escaped_payload) def sync_in(self): # The radio returns all the bytes with the high-order bit flipped. _mmap = clone_from_radio(self) _mmap = flip_high_order_bit(_mmap.get_packed()) self._mmap = memmap.MemoryMap(_mmap) self.process_mmap() def get_mmap(self): _data = flip_high_order_bit(self._mmap.get_packed()) return memmap.MemoryMap(_data) class IcomLiveRadio(chirp_common.LiveRadio): """Base class for an Icom Live-mode radio""" VENDOR = "Icom" BAUD_RATE = 38400 _num_banks = 26 # Most live Icoms have 26 banks, A-Z _bank_index_bounds = (0, 99) _bank_class = IcomBank def get_bank_model(self): rf = self.get_features() if rf.has_bank: if rf.has_bank_index: return IcomIndexedBankModel(self) else: return IcomBankModel(self) else: return None def make_speed_switch_setting(radio): if not radio.__class__._can_hispeed: return {} drvopts = RadioSettingGroup("drvopts", "Driver Options") top = RadioSettings(drvopts) rs = RadioSetting("drv_clone_speed", "Use Hi-Speed Clone", RadioSettingValueBoolean(radio._can_hispeed)) drvopts.append(rs) return top def honor_speed_switch_setting(radio, settings): for element in settings: if element.get_name() == "drvopts": return honor_speed_switch_setting(radio, element) if element.get_name() == "drv_clone_speed": radio.__class__._can_hispeed = element.value.get_value() return
import pygame class EzMenu: def __init__(self, *options): self.options = options self.x = 0 self.y = 0 self.font = pygame.font.Font(None, 32) self.option = 0 self.width = 1 self.color = [0, 0, 0] self.hcolor = [255, 0, 0] self.height = len(self.options)*self.font.get_height() for o in self.options: text = o[0] ren = self.font.render(text, 2, (0, 0, 0)) if ren.get_width() > self.width: self.width = ren.get_width() def draw(self, surface): i=0 for o in self.options: if i==self.option: clr = self.hcolor else: clr = self.color text = o[0] ren = self.font.render(text, 2, clr) if ren.get_width() > self.width: self.width = ren.get_width() surface.blit(ren, ((self.x+self.width/2) - ren.get_width()/2, self.y + i*(self.font.get_height()+4))) i+=1 def update(self, events): for e in events: if e.type == pygame.KEYDOWN: if e.key == pygame.K_DOWN: self.option += 1 if e.key == pygame.K_UP: self.option -= 1 if e.key == pygame.K_RETURN: self.options[self.option][1]() if self.option > len(self.options)-1: self.option = 0 if self.option < 0: self.option = len(self.options)-1 def set_pos(self, x, y): self.x = x self.y = y def set_font(self, font): self.font = font def set_highlight_color(self, color): self.hcolor = color def set_normal_color(self, color): self.color = color def center_at(self, x, y): self.x = x-(self.width/2) self.y = y-(self.height/2)
""" WSGI config for GoodDog project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "GoodDog.settings") application = get_wsgi_application()
"""Keywords (from "graminit.c") This file is automatically generated; please don't muck it up! To update the symbols in this file, 'cd' to the top directory of the python source tree after building the interpreter and run: python Lib/keyword.py """ __all__ = ["iskeyword", "kwlist"] kwlist = [ 'and', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise', 'return', 'try', 'while', 'yield', ] kwdict = {} for keyword in kwlist: kwdict[keyword] = 1 iskeyword = kwdict.has_key def main(): import sys, re args = sys.argv[1:] iptfile = args and args[0] or "Python/graminit.c" if len(args) > 1: optfile = args[1] else: optfile = "Lib/keyword.py" # scan the source file for keywords fp = open(iptfile) strprog = re.compile('"([^"]+)"') lines = [] while 1: line = fp.readline() if not line: break if line.find('{1, "') > -1: match = strprog.search(line) if match: lines.append(" '" + match.group(1) + "',\n") fp.close() lines.sort() # load the output skeleton from the target fp = open(optfile) format = fp.readlines() fp.close() # insert the lines of keywords try: start = format.index("#--start keywords--\n") + 1 end = format.index("#--end keywords--\n") format[start:end] = lines except ValueError: sys.stderr.write("target does not contain format markers\n") sys.exit(1) # write the output file fp = open(optfile, 'w') fp.write(''.join(format)) fp.close() if __name__ == "__main__": main()
default_app_config = 'users.apps.UserConfig'
"""Calculate exact solutions for the zero dimensional LLG as given by [Mallinson2000] """ from __future__ import division from __future__ import absolute_import from math import sin, cos, tan, log, atan2, acos, pi, sqrt import scipy as sp import matplotlib.pyplot as plt import functools as ft import simpleode.core.utils as utils def calculate_switching_time(magnetic_parameters, p_start, p_now): """Calculate the time taken to switch from polar angle p_start to p_now with the magnetic parameters given. """ # Should never quite get to pi/2 # if p_now >= pi/2: # return sp.inf # Cache some things to simplify the expressions later H = magnetic_parameters.H(None) Hk = magnetic_parameters.Hk() alpha = magnetic_parameters.alpha gamma = magnetic_parameters.gamma # Calculate the various parts of the expression prefactor = ((alpha**2 + 1)/(gamma * alpha)) \ * (1.0 / (H**2 - Hk**2)) a = H * log(tan(p_now/2) / tan(p_start/2)) b = Hk * log((H - Hk*cos(p_start)) / (H - Hk*cos(p_now))) c = Hk * log(sin(p_now) / sin(p_start)) # Put everything together return prefactor * (a + b + c) def calculate_azimuthal(magnetic_parameters, p_start, p_now): """Calculate the azimuthal angle corresponding to switching from p_start to p_now with the magnetic parameters given. """ def azi_into_range(azi): a = azi % (2*pi) if a < 0: a += 2*pi return a alpha = magnetic_parameters.alpha no_range_azi = (-1/alpha) * log(tan(p_now/2) / tan(p_start/2)) return azi_into_range(no_range_azi) def generate_dynamics(magnetic_parameters, start_angle=pi/18, end_angle=17*pi/18, steps=1000): """Generate a list of polar angles then return a list of corresponding m directions (in spherical polar coordinates) and switching times. """ mag_params = magnetic_parameters # Construct a set of solution positions pols = sp.linspace(start_angle, end_angle, steps) azis = [calculate_azimuthal(mag_params, start_angle, p) for p in pols] sphs = [utils.SphPoint(1.0, azi, pol) for azi, pol in zip(azis, pols)] # Calculate switching times for these positions times = [calculate_switching_time(mag_params, start_angle, p) for p in pols] return (sphs, times) def plot_dynamics(magnetic_parameters, start_angle=pi/18, end_angle=17*pi/18, steps=1000): """Plot exact positions given start/finish angles and magnetic parameters. """ sphs, times = generate_dynamics(magnetic_parameters, start_angle, end_angle, steps) sphstitle = "Path of m for " + str(magnetic_parameters) \ + "\n (starting point is marked)." utils.plot_sph_points(sphs, title=sphstitle) timestitle = "Polar angle vs time for " + str(magnetic_parameters) utils.plot_polar_vs_time(sphs, times, title=timestitle) plt.show() def calculate_equivalent_dynamics(magnetic_parameters, polars): """Given a list of polar angles (and some magnetic parameters) calculate what the corresponding azimuthal angles and switching times (from the first angle) should be. """ start_angle = polars[0] f_times = ft.partial(calculate_switching_time, magnetic_parameters, start_angle) exact_times = [f_times(p) for p in polars] f_azi = ft.partial(calculate_azimuthal, magnetic_parameters, start_angle) exact_azis = [f_azi(p) for p in polars] return exact_times, exact_azis def plot_vs_exact(magnetic_parameters, ts, ms): # Extract lists of the polar coordinates m_as_sph_points = map(utils.array2sph, ms) pols = [m.pol for m in m_as_sph_points] azis = [m.azi for m in m_as_sph_points] # Calculate the corresponding exact dynamics exact_times, exact_azis = \ calculate_equivalent_dynamics(magnetic_parameters, pols) # Plot plt.figure() plt.plot(ts, pols, '--', exact_times, pols) plt.figure() plt.plot(pols, azis, '--', pols, exact_azis) plt.show()
class Sbs: def __init__(self, sbsFilename, sbc_filename, newSbsFilename): import xml.etree.ElementTree as ET import Sbc self.mySbc = Sbc.Sbc(sbc_filename) self.sbsTree = ET.parse(sbsFilename) self.sbsRoot = self.sbsTree.getroot() self.XSI_TYPE = "{http://www.w3.org/2001/XMLSchema-instance}type" self.newSbsFilename = newSbsFilename def findPlayerBySteamID(self, steam_id): if (steam_id == 0): return False print("looking for player with steamID of %s" % steam_id) ourPlayerDict = self.mySbc.getPlayerDict() for player in ourPlayerDict: # print playerDict[player]['steamID'] if ourPlayerDict[player]['steamID'] == steam_id: return ourPlayerDict[player] # if we don't find the user return False def giveReward(self, rewardOwner, rewardType, rewardAmount): """ This method will hunt down the first cargo container owned by <Owner> matching their ingame ID, and with with "CustomName" of "LOOT" and place the rewards in it """ import xml.etree.ElementTree as ET print("trying to give %s %s units of %s" % (rewardOwner, rewardAmount, rewardType)) for sectorObjects in self.sbsRoot.iter('SectorObjects'): for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'): # EntityId = entityBase.find('EntityId') # print ("checking entityID %s" % EntityId.text) gridSize = entityBase.find('GridSizeEnum') # TODO+: some kind of warning if we have a reward to give, but can't find this user's LOOT container if hasattr(gridSize, 'text'): cubeBlocks = entityBase.find('CubeBlocks') for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'): owner = myCubeBlock.find("Owner") EntityId = myCubeBlock.find('EntityId') customName = myCubeBlock.find('CustomName') if hasattr(owner, 'text') and owner.text == rewardOwner and myCubeBlock.get(self.XSI_TYPE) == "MyObjectBuilder_CargoContainer" and hasattr(customName, 'text'): if "LOOT" in customName.text: print("I found a cargo container owned by %s with entityID of %s and name of %s" % (owner.text, EntityId.text, customName.text)) componentContainer = myCubeBlock.find('ComponentContainer') components = componentContainer.find('Components') componentData = components.find('ComponentData') component = componentData.find('Component') items = component.find('Items') itemCount = 0 for myInventoryItems in items.iter('MyObjectBuilder_InventoryItem'): itemCount += 1 print("planning to add %s of %s into it as item %s" % (rewardAmount, rewardType, itemCount)) # <MyObjectBuilder_InventoryItem> # <Amount>200</Amount> # <PhysicalContent xsi:type="MyObjectBuilder_Ore"> # <SubtypeName>Uranium</SubtypeName> ## from rewardType # </PhysicalContent> # <ItemId>4</ItemId> ## from itemCount # <AmountDecimal>200</AmountDecimal> ## from rewardAmount # </MyObjectBuilder_InventoryItem> # myCubeBlock.append((ET.fromstring('<MyObjectBuilder_InventoryItem><Amount>123456789</Amount></MyObjectBuilder_InventoryItem>'))) inventoryItem = ET.SubElement(items, 'MyObjectBuilder_InventoryItem') amount = ET.SubElement(inventoryItem, 'Amount') amount.text = str(rewardAmount) physicalContent = ET.SubElement(inventoryItem, 'PhysicalContent') physicalContent.set(self.XSI_TYPE, 'MyObjectBuilder_Ore') subtypeName = ET.SubElement(physicalContent, 'SubtypeName') subtypeName.text = rewardType itemId = ET.SubElement(inventoryItem, 'ItemId') itemId.text = str(itemCount) amountDecimal = ET.SubElement(inventoryItem, 'AmountDecimal') amountDecimal.text = str(rewardAmount) nextItemId = component.find('nextItemId') nextItemId.text = str(itemCount + 1) # FIXME: this makes a mess of the html, figure out a way to clean it up? def removeFloaters(self): import xml.etree.ElementTree as ET removedCount = 0 warnCount = 0 for sectorObjects in self.sbsRoot.iter('SectorObjects'): for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'): cubeGridID = entityBase.find('EntityId') gridSizeEnum = entityBase.find('GridSizeEnum') objectType = entityBase.get(self.XSI_TYPE) isStatic = entityBase.find('IsStatic') # FIXME: this does not do what I thought it did. Tested with simple station, and it isn't set as static when I build it from scratch. # TODO: only way I can see to easily fix is check for <Forward x="-0" y="-0" z="-1" /> for static things # print cubeGridID.text if hasattr(cubeGridID, 'text') else 'not defined' if hasattr(cubeGridID, 'text'): print("Grid EntityID: %s " % cubeGridID.text) else: print("FIXME: no gridID") # print ("\t is objectType %s" % objectType ) if hasattr(isStatic, 'text'): # this is a base, all of our checks are null and void. Bases don't float or cost me CPU print("\t skipping trash checks because this IsStatic") continue if hasattr(gridSizeEnum, 'text'): # is a grid, small or large gridName = entityBase.find('DisplayName').text print("\t is a grid size %s %s" % (gridSizeEnum.text, gridName)) # if the name contains DEL.WRN if "[DEL.WRN]" in gridName: print("\t ALREADY HAD DEL.WRN in the NAME, GOODBYE") sectorObjects.remove(entityBase) removedCount += 1 else: # it doesn't have a DEL WRN yet, lets check for our rules # TODO: look through the whole entityBase for 6 thrusters, a power supply, and at least one block not owned by pirates thrusterCount = 0 powerSource = 0 controlSurface = 0 gyroCount = 0 turretCount = 0 ownerCount = 0 ownedThings = 0 ownerList = [] cubeBlocks = entityBase.find('CubeBlocks') for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'): owner = myCubeBlock.find("Owner") # subtype = myCubeBlock.find('SubtypeName') cubeType = myCubeBlock.get(self.XSI_TYPE) entityID = myCubeBlock.find("EntityId") # print ("\t\tTODO: cubeType of: %s" % cubeType) if "Thrust" in cubeType: thrusterCount += 1 elif "Cockpit" in cubeType: controlSurface += 1 elif "Reactor" in cubeType: powerSource += 1 elif "SolarPanel" in cubeType: powerSource += 1 elif "RemoteControl" in cubeType: controlSurface += 1 elif "Gyro" in cubeType: gyroCount += 1 elif "Turret" in cubeType: turretCount += 1 if hasattr(owner, 'text'): # print ("\tOwner: %s" % owner.text) if owner.text not in ownerList: ownerList.append(owner.text) ownerCount += 1 ownedThings += 1 # TODO: this is how many blocks have an owner, above is distinct owners of this grid print("\t totals: %s %s %s %s %s %s %s" % (thrusterCount, powerSource, controlSurface, gyroCount, turretCount, ownerCount, len(ownerList))) # TODO: if it fails all my tests, # [CHECK] set name to [DEL.WRN] # set ShowOnHUD to True ## can't, this is per cube. Ignore this. if (thrusterCount < 6 or controlSurface < 1 or powerSource < 1 or gyroCount < 1 or ownerCount < 1): print("\tWARNING: THIS GRID IS DUE TO DELETE") gridNameToUpdate = entityBase.find('DisplayName') gridNameToUpdate.text = "[DEL.WRN]" + gridNameToUpdate.text print("\tname is now: %s" % gridNameToUpdate.text) warnCount += 1 for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'): # set all DeformationRatio to 1 (right up under owner) <DeformationRatio>0.5</DeformationRatio> deformationElement = ET.SubElement(myCubeBlock, "DeformationRatio") deformationElement.text = ".77" # myCubeBlock.append('DeformationRatio', '.77') else: if (objectType == "MyObjectBuilder_FloatingObject"): print("\t GOODBYE") sectorObjects.remove(entityBase) removedCount += 1 elif (objectType == "MyObjectBuilder_ReplicableEntity"): # print ("\t Backpack!") backPackName = entityBase.find('Name') if hasattr(backPackName, 'text'): print("\t Backpackname: %s" % backPackName.text) print("\t GOODBYE") sectorObjects.remove(entityBase) removedCount += 1 elif (objectType == "MyObjectBuilder_VoxelMap"): voxelStorageName = entityBase.find('StorageName') if hasattr(voxelStorageName, 'text'): print("\t voxelStorageName: %s" % voxelStorageName.text) elif (objectType == "MyObjectBuilder_Character"): # oops, someone was online # entityID matches CharacterEntityId in the sbc entityID = entityBase.find('EntityId').text # steamID print("\t looking for %s entityID in playerDict" % entityID) thisPlayersDict = self.findPlayerBySteamID(entityID) # returns False if we didn't have this players steamID in the sbc, meaning they weren't online if (thisPlayersDict is not False and entityID is not False): print("\t Sorry player: %s %s" % (entityID, thisPlayersDict["username"])) else: print("\tFIXME: this player was online, but I don't have their steamID of %s in the sbc" % entityID) else: print("\t ##### has no grid size") # print ("writing tree out to %s" % newSbsFileName) # tree = ET.ElementTree(sbsRoot) # sbsRoot.attrib["xmlns:xsd"]="http://www.w3.org/2001/XMLSchema" # tree.write(newSbsFileName, encoding='utf-8', xml_declaration=True) return (removedCount, warnCount) def writeFile(self): import xml.etree.ElementTree as ET print("writing tree out to %s" % self.newSbsFilename) tree = ET.ElementTree(self.sbsRoot) self.sbsRoot.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema" tree.write(self.newSbsFilename, encoding='utf-8', xml_declaration=True)
from multiprocessing import Process, Event import threading import time import signal, select import traceback import setproctitle from APSyncFramework.utils.common_utils import PeriodicEvent from APSyncFramework.utils.json_utils import ping, json_wrap_with_target from APSyncFramework.utils.file_utils import read_config, write_config class APModule(Process): '''The base class for all modules''' def __init__(self, in_queue, out_queue, name, description = None): super(APModule, self).__init__() signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) self.daemon = True self.config_list= [] # overwrite this list self.config_changed = False self.config = read_config() self.start_time = time.time() self.last_ping = None self.needs_unloading = Event() self.lock = threading.Lock() self.in_queue = in_queue self.out_queue = out_queue self.name = name self.ping = PeriodicEvent(frequency = 1.0/3.0, event = self.send_ping) self.in_queue_thread = threading.Thread(target=self.in_queue_handling, args = (self.lock,)) self.in_queue_thread.daemon = True setproctitle.setproctitle(self.name) if description is None: self.description = "APSync {0} process".format(self.name) else: self.description = description def update_config(self, config_list = []): if len(config_list): self.config_list = config_list for (var_name, var_default) in self.config_list: self.set_config(var_name, var_default) if self.config_changed: # TODO: send a msg to the webserver to update / reload the current page self.log('At least one of your cloudsync settings was missing or has been updated, please reload the webpage if open.', 'INFO') self.config_changed = False config_on_disk = read_config() for k in config_on_disk.keys(): if not k in self.config: self.config[k] = config_on_disk[k] write_config(self.config) def send_ping(self): self.out_queue.put_nowait(ping(self.name, self.pid)) def exit_gracefully(self, signum, frame): self.unload() def unload(self): print self.name, 'called unload' self.unload_callback() self.needs_unloading.set() def unload_callback(self): ''' overload to perform any module specific cleanup''' pass def run(self): if self.in_queue_thread is not None: self.in_queue_thread.start() while not self.needs_unloading.is_set(): try: self.main() except: print ("FATAL: module ({0}) exited while multiprocessing".format(self.name)) traceback.print_exc() # TODO: logging here print self.name, 'main finished' def main(self): pass def in_queue_handling(self, lock=None): while not self.needs_unloading.is_set(): (inputready,outputready,exceptready) = select.select([self.in_queue._reader],[],[],0.1) for s in inputready: while not self.in_queue.empty(): # drain the queue data = self.in_queue.get_nowait() if isinstance(data, Unload): self.unload() else: # do something useful with the data... self.process_in_queue_data(data) self.ping.trigger() print self.name, 'in queue finished' def process_in_queue_data(self, data): pass def log(self, message, level = 'INFO'): self.out_queue.put_nowait(json_wrap_with_target({'msg':message, 'level':level}, target = 'logging')) def set_config(self, var_name, var_default): new_val = self.config.get(var_name, var_default) try: cur_val = self.config[var_name] if new_val != cur_val: self.config_changed = True except: self.config_changed = True finally: self.config[var_name] = new_val return new_val class Unload(): def __init__(self, name): self.ack = False
from ....model.util.HelperModule import get_partial_index from ....model.DioptasModel import DioptasModel from ....widgets.integration import IntegrationWidget from ....widgets.plot_widgets.ImgWidget import IntegrationImgWidget class PhaseInCakeController(object): """ PhaseInCakeController handles all the interaction between the phase controls and the plotted lines in the cake view. """ def __init__(self, integration_widget, dioptas_model): """ :param integration_widget: Reference to an IntegrationWidget :param dioptas_model: reference to DioptasModel object :type integration_widget: IntegrationWidget :type dioptas_model: DioptasModel """ self.model = dioptas_model self.phase_model = self.model.phase_model self.integration_widget = integration_widget self.cake_view_widget = integration_widget.integration_image_widget.cake_view # type: IntegrationImgWidget self.connect() def connect(self): self.phase_model.phase_added.connect(self.add_phase_plot) self.model.phase_model.phase_removed.connect(self.cake_view_widget.del_cake_phase) self.phase_model.phase_changed.connect(self.update_phase_lines) self.phase_model.phase_changed.connect(self.update_phase_color) self.phase_model.phase_changed.connect(self.update_phase_visible) self.phase_model.reflection_added.connect(self.reflection_added) self.phase_model.reflection_deleted.connect(self.reflection_deleted) def get_phase_position_and_intensities(self, ind, clip=True): """ Obtains the positions and intensities for lines of a phase with an index ind within the cake view. No clipping is used for the first call to add the CakePhasePlot to the ImgWidget. Subsequent calls are used with clipping. Thus, only lines within the cake_tth are returned. The visibility of each line is then estimated in the ImgWidget based on the length of the clipped and not clipped lists. :param ind: the index of the phase :param clip: whether or not the lists should be clipped. Clipped means that lines which have positions larger than the :return: line_positions, line_intensities """ if self.model.cake_tth is None: cake_tth = self.model.calibration_model.tth else: cake_tth = self.model.cake_tth reflections_tth = self.phase_model.get_phase_line_positions(ind, 'tth', self.model.calibration_model.wavelength * 1e10) reflections_intensities = [reflex[1] for reflex in self.phase_model.reflections[ind]] cake_line_positions = [] cake_line_intensities = [] for ind, tth in enumerate(reflections_tth): pos_ind = get_partial_index(cake_tth, tth) if pos_ind is not None: cake_line_positions.append(pos_ind + 0.5) cake_line_intensities.append(reflections_intensities[ind]) elif clip is False: cake_line_positions.append(0) cake_line_intensities.append(reflections_intensities[ind]) return cake_line_positions, cake_line_intensities def add_phase_plot(self): cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(-1, False) self.cake_view_widget.add_cake_phase(cake_line_positions, cake_line_intensities, self.phase_model.phase_colors[-1]) def update_phase_lines(self, ind): cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(ind) self.cake_view_widget.update_phase_intensities(ind, cake_line_positions, cake_line_intensities) def update_phase_color(self, ind): self.cake_view_widget.set_cake_phase_color(ind, self.model.phase_model.phase_colors[ind]) def update_phase_visible(self, ind): if self.phase_model.phase_visible[ind] and self.integration_widget.img_mode == 'Cake' and \ self.integration_widget.img_phases_btn.isChecked(): self.cake_view_widget.show_cake_phase(ind) else: self.cake_view_widget.hide_cake_phase(ind) def reflection_added(self, ind): self.cake_view_widget.phases[ind].add_line() def reflection_deleted(self, phase_ind, reflection_ind): self.cake_view_widget.phases[phase_ind].delete_line(reflection_ind)
import sys import os import shlex extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'last_letter' copyright = u'2014, George Zogopoulos - Papaliakos' author = u'George Zogopoulos - Papaliakos' version = '0.5' release = '0.5' language = None exclude_patterns = [] pygments_style = 'sphinx' todo_include_todos = True html_theme = 'sphinx_rtd_theme' html_theme_path = ["_static/themes", ] html_logo = None html_static_path = ['_static'] html_domain_indices = False html_use_index = False html_show_sourcelink = False html_show_sphinx = False html_show_copyright = False htmlhelp_basename = 'last_letter_doc' latex_elements = { } latex_documents = [ (master_doc, 'last_letter.tex', u'last_letter Documentation', u'George Zogopoulos - Papaliakos', 'manual'), ] man_pages = [ (master_doc, 'last_letter', u'last_letter Documentation', [author], 1) ] texinfo_documents = [ (master_doc, 'last_letter', u'last_letter Documentation', author, 'last_letter', 'A collection of ROS packages for UAV simulation and autopilot development.', 'Miscellaneous'), ] epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright epub_exclude_files = ['search.html']
""" An implementation of the time frequency phase misfit and adjoint source after Fichtner et al. (2008). :copyright: Lion Krischer (krischer@geophysik.uni-muenchen.de), 2013 :license: GNU General Public License, Version 3 (http://www.gnu.org/copyleft/gpl.html) """ import warnings import numexpr as ne import numpy as np import obspy from obspy.signal.interpolation import lanczos_interpolation from lasif import LASIFAdjointSourceCalculationError from lasif.adjoint_sources import time_frequency, utils eps = np.spacing(1) def adsrc_tf_phase_misfit(t, data, synthetic, min_period, max_period, plot=False, max_criterion=7.0): """ :rtype: dictionary :returns: Return a dictionary with three keys: * adjoint_source: The calculated adjoint source as a numpy array * misfit: The misfit value * messages: A list of strings giving additional hints to what happened in the calculation. """ # Assumes that t starts at 0. Pad your data if that is not the case - # Parts with zeros are essentially skipped making it fairly efficient. assert t[0] == 0 messages = [] # Internal sampling interval. Some explanations for this "magic" number. # LASIF's preprocessing allows no frequency content with smaller periods # than min_period / 2.2 (see function_templates/preprocesssing_function.py # for details). Assuming most users don't change this, this is equal to # the Nyquist frequency and the largest possible sampling interval to # catch everything is min_period / 4.4. # # The current choice is historic as changing does (very slightly) chance # the calculated misfit and we don't want to disturb inversions in # progress. The difference is likely minimal in any case. We might have # same aliasing into the lower frequencies but the filters coupled with # the TF-domain weighting will get rid of them in essentially all # realistically occurring cases. dt_new = max(float(int(min_period / 3.0)), t[1] - t[0]) # New time axis ti = utils.matlab_range(t[0], t[-1], dt_new) # Make sure its odd - that avoid having to deal with some issues # regarding frequency bin interpolation. Now positive and negative # frequencies will always be all symmetric. Data is assumed to be # tapered in any case so no problem are to be expected. if not len(ti) % 2: ti = ti[:-1] # Interpolate both signals to the new time axis - this massively speeds # up the whole procedure as most signals are highly oversampled. The # adjoint source at the end is re-interpolated to the original sampling # points. original_data = data original_synthetic = synthetic data = lanczos_interpolation( data=data, old_start=t[0], old_dt=t[1] - t[0], new_start=t[0], new_dt=dt_new, new_npts=len(ti), a=8, window="blackmann") synthetic = lanczos_interpolation( data=synthetic, old_start=t[0], old_dt=t[1] - t[0], new_start=t[0], new_dt=dt_new, new_npts=len(ti), a=8, window="blackmann") original_time = t t = ti # ------------------------------------------------------------------------- # Compute time-frequency representations # Window width is twice the minimal period. width = 2.0 * min_period # Compute time-frequency representation of the cross-correlation _, _, tf_cc = time_frequency.time_frequency_cc_difference( t, data, synthetic, width) # Compute the time-frequency representation of the synthetic tau, nu, tf_synth = time_frequency.time_frequency_transform(t, synthetic, width) # ------------------------------------------------------------------------- # compute tf window and weighting function # noise taper: down-weight tf amplitudes that are very low tf_cc_abs = np.abs(tf_cc) m = tf_cc_abs.max() / 10.0 # NOQA weight = ne.evaluate("1.0 - exp(-(tf_cc_abs ** 2) / (m ** 2))") nu_t = nu.T # highpass filter (periods longer than max_period are suppressed # exponentially) weight *= (1.0 - np.exp(-(nu_t * max_period) ** 2)) # lowpass filter (periods shorter than min_period are suppressed # exponentially) nu_t_large = np.zeros(nu_t.shape) nu_t_small = np.zeros(nu_t.shape) thres = (nu_t <= 1.0 / min_period) nu_t_large[np.invert(thres)] = 1.0 nu_t_small[thres] = 1.0 weight *= (np.exp(-10.0 * np.abs(nu_t * min_period - 1.0)) * nu_t_large + nu_t_small) # normalisation weight /= weight.max() # computation of phase difference, make quality checks and misfit --------- # Compute the phase difference. # DP = np.imag(np.log(m + tf_cc / (2 * m + np.abs(tf_cc)))) DP = np.angle(tf_cc) # Attempt to detect phase jumps by taking the derivatives in time and # frequency direction. 0.7 is an emperical value. abs_weighted_DP = np.abs(weight * DP) _x = abs_weighted_DP.max() # NOQA test_field = ne.evaluate("weight * DP / _x") criterion_1 = np.sum([np.abs(np.diff(test_field, axis=0)) > 0.7]) criterion_2 = np.sum([np.abs(np.diff(test_field, axis=1)) > 0.7]) criterion = np.sum([criterion_1, criterion_2]) # Compute the phase misfit dnu = nu[1] - nu[0] i = ne.evaluate("sum(weight ** 2 * DP ** 2)") # inserted by Nienke Blom, 22-11-2016 weighted_DP = ne.evaluate("weight * DP") phasediff_integral = float(ne.evaluate("sum(weighted_DP * dnu * dt_new)")) mean_delay = np.mean(weighted_DP) wDP = weighted_DP.flatten() wDP_thresh = wDP[abs(wDP) > 0.1 * max(wDP, key=lambda x: abs(x))] median_delay = np.median(wDP_thresh) max_delay = max(wDP, key=lambda x: abs(x)) phase_misfit = np.sqrt(i * dt_new * dnu) # Sanity check. Should not occur. if np.isnan(phase_misfit): msg = "The phase misfit is NaN." raise LASIFAdjointSourceCalculationError(msg) # The misfit can still be computed, even if not adjoint source is # available. if criterion > max_criterion: warning = ("Possible phase jump detected. Misfit included. No " "adjoint source computed. Criterion: %.1f - Max allowed " "criterion: %.1f" % (criterion, max_criterion)) warnings.warn(warning) messages.append(warning) ret_dict = { "adjoint_source": None, "misfit_value": phase_misfit, "details": {"messages": messages, #"weighted_DP": weighted_DP, #"weight": weight, #"DP": DP, "mean_delay": mean_delay, # added NAB 30-8-2017 "phasediff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017 "median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017 "max_delay": max_delay} # added NAB 31-8-2017 } return ret_dict # Make kernel for the inverse tf transform idp = ne.evaluate( "weight ** 2 * DP * tf_synth / (m + abs(tf_synth) ** 2)") # Invert tf transform and make adjoint source ad_src, it, I = time_frequency.itfa(tau, idp, width) # Interpolate both signals to the new time axis ad_src = lanczos_interpolation( # Pad with a couple of zeros in case some where lost in all # these resampling operations. The first sample should not # change the time. data=np.concatenate([ad_src.imag, np.zeros(100)]), old_start=tau[0], old_dt=tau[1] - tau[0], new_start=original_time[0], new_dt=original_time[1] - original_time[0], new_npts=len(original_time), a=8, window="blackmann") # Divide by the misfit and change sign. ad_src /= (phase_misfit + eps) ad_src = -1.0 * np.diff(ad_src) / (t[1] - t[0]) # Taper at both ends. Exploit ObsPy to not have to deal with all the # nasty things. ad_src = \ obspy.Trace(ad_src).taper(max_percentage=0.05, type="hann").data # Reverse time and add a leading zero so the adjoint source has the # same length as the input time series. ad_src = ad_src[::-1] ad_src = np.concatenate([[0.0], ad_src]) # Plot if requested. ------------------------------------------------------ if plot: import matplotlib as mpl import matplotlib.pyplot as plt plt.style.use("seaborn-whitegrid") from lasif.colors import get_colormap if isinstance(plot, mpl.figure.Figure): fig = plot else: fig = plt.gcf() # Manually set-up the axes for full control. l, b, w, h = 0.1, 0.05, 0.80, 0.22 rect = l, b + 3 * h, w, h waveforms_axis = fig.add_axes(rect) rect = l, b + h, w, 2 * h tf_axis = fig.add_axes(rect) rect = l, b, w, h adj_src_axis = fig.add_axes(rect) rect = l + w + 0.02, b, 1.0 - (l + w + 0.02) - 0.05, 4 * h cm_axis = fig.add_axes(rect) # Plot the weighted phase difference. weighted_phase_difference = (DP * weight).transpose() mappable = tf_axis.pcolormesh( tau, nu, weighted_phase_difference, vmin=-1.0, vmax=1.0, cmap=get_colormap("tomo_full_scale_linear_lightness_r"), shading="gouraud", zorder=-10) tf_axis.grid(True) tf_axis.grid(True, which='minor', axis='both', linestyle='-', color='k') cm = fig.colorbar(mappable, cax=cm_axis) cm.set_label("Phase difference in radian", fontsize="large") # Various texts on the time frequency domain plot. text = "Misfit: %.4f" % phase_misfit tf_axis.text(x=0.99, y=0.02, s=text, transform=tf_axis.transAxes, fontsize="large", color="#C25734", fontweight=900, verticalalignment="bottom", horizontalalignment="right") txt = "Weighted Phase Difference - red is a phase advance of the " \ "synthetics" tf_axis.text(x=0.99, y=0.95, s=txt, fontsize="large", color="0.1", transform=tf_axis.transAxes, verticalalignment="top", horizontalalignment="right") if messages: message = "\n".join(messages) tf_axis.text(x=0.99, y=0.98, s=message, transform=tf_axis.transAxes, bbox=dict(facecolor='red', alpha=0.8), verticalalignment="top", horizontalalignment="right") # Adjoint source. adj_src_axis.plot(original_time, ad_src[::-1], color="0.1", lw=2, label="Adjoint source (non-time-reversed)") adj_src_axis.legend() # Waveforms. waveforms_axis.plot(original_time, original_data, color="0.1", lw=2, label="Observed") waveforms_axis.plot(original_time, original_synthetic, color="#C11E11", lw=2, label="Synthetic") waveforms_axis.legend() # Set limits for all axes. tf_axis.set_ylim(0, 2.0 / min_period) tf_axis.set_xlim(0, tau[-1]) adj_src_axis.set_xlim(0, tau[-1]) waveforms_axis.set_xlim(0, tau[-1]) waveforms_axis.set_ylabel("Velocity [m/s]", fontsize="large") tf_axis.set_ylabel("Period [s]", fontsize="large") adj_src_axis.set_xlabel("Seconds since event", fontsize="large") # Hack to keep ticklines but remove the ticks - there is probably a # better way to do this. waveforms_axis.set_xticklabels([ "" for _i in waveforms_axis.get_xticks()]) tf_axis.set_xticklabels(["" for _i in tf_axis.get_xticks()]) _l = tf_axis.get_ylim() _r = _l[1] - _l[0] _t = tf_axis.get_yticks() _t = _t[(_l[0] + 0.1 * _r < _t) & (_t < _l[1] - 0.1 * _r)] tf_axis.set_yticks(_t) tf_axis.set_yticklabels(["%.1fs" % (1.0 / _i) for _i in _t]) waveforms_axis.get_yaxis().set_label_coords(-0.08, 0.5) tf_axis.get_yaxis().set_label_coords(-0.08, 0.5) fig.suptitle("Time Frequency Phase Misfit and Adjoint Source", fontsize="xx-large") ret_dict = { "adjoint_source": ad_src, "misfit_value": phase_misfit, "details": {"messages": messages, #"weighted_DP": weighted_DP, #"weight": weight, #"DP": DP, "mean_delay": mean_delay, # added NAB 30-8-2017 "phasediff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017 "median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017 "max_delay": max_delay} # added NAB 31-8-2017 } return ret_dict
import json import random import requests from plugin import create_plugin from message import SteelyMessage HELP_STR = """ Request your favourite bible quotes, right to the chat. Usage: /bible - Random quote /bible Genesis 1:3 - Specific verse /bible help - This help text Verses are specified in the format {book} {chapter}:{verse} TODO: Book acronyms, e.g. Gen -> Genesis TODO: Verse ranges, e.g. Genesis 1:1-3 """ BIBLE_FILE = "plugins/bible/en_kjv.json" BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json' plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR) bible = None book_to_index = {} def make_book_to_index(bible): btoi = {} for i, book in enumerate(bible): btoi[book['name'].lower()] = i return btoi @plugin.setup() def plugin_setup(): global bible, book_to_index try: bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read()) book_to_index = make_book_to_index(bible) return except BaseException as e: pass # We've tried nothing and we're all out of ideas, download a new bible. try: bible = json.loads( requests.get(BIBLE_URL).content.decode('utf-8-sig')) except BaseException as e: return "Error loading bible: " + str(e) book_to_index = make_book_to_index(bible) with open(BIBLE_FILE, 'w') as f: json.dump(bible, f) @plugin.listen(command='bible help') def help_command(bot, message: SteelyMessage, **kwargs): bot.sendMessage( HELP_STR, thread_id=message.thread_id, thread_type=message.thread_type) def is_valid_quote(book, chapter, verse): return (0 <= book < len(bible) and 0 <= chapter < len(bible[book]['chapters']) and 0 <= verse < len(bible[book]['chapters'][chapter])) def get_quote(book, chapter, verse): return "{}\n - {} {}:{}".format( bible[book]["chapters"][chapter][verse], bible[book]["name"], chapter + 1, verse + 1) def get_quote_from_ref(book_name, ref): if book_name.lower() not in book_to_index: return "Could not find book name: " + book_name book_i = book_to_index[book_name.lower()] if len(ref.split(':')) != 2: return 'Reference not in form "Book Chapter:Passage"' chapter, verse = ref.split(':') if not chapter.isnumeric(): return "Chapter must be an int" chapter_i = int(chapter) - 1 if not verse.isnumeric(): return "Passage must be an int" verse_i = int(verse) - 1 if not is_valid_quote(book_i, chapter_i, verse_i): return "Verse or chapter out of range" return get_quote(book_i, chapter_i, verse_i) @plugin.listen(command='bible [book] [passage]') def passage_command(bot, message: SteelyMessage, **kwargs): if 'passage' not in kwargs: book = random.randrange(len(bible)) chapter = random.randrange(len(bible[book]["chapters"])) verse = random.randrange(len(bible[book]["chapters"][chapter])) bot.sendMessage( get_quote(book, chapter, verse), thread_id=message.thread_id, thread_type=message.thread_type) else: bot.sendMessage( get_quote_from_ref(kwargs['book'], kwargs['passage']), thread_id=message.thread_id, thread_type=message.thread_type)
""" This program decodes the Motorola SmartNet II trunking protocol from the control channel Tune it to the control channel center freq, and it'll spit out the decoded packets. In what format? Who knows. Based on your AIS decoding software, which is in turn based on the gr-pager code and the gr-air code. """ from gnuradio import gr, gru, blks2, optfir, digital from gnuradio import audio from gnuradio import eng_notation from gnuradio import uhd from fsk_demod import fsk_demod from optparse import OptionParser from gnuradio.eng_option import eng_option from gnuradio import smartnet import time import gnuradio.gr.gr_threading as _threading import csv class top_block_runner(_threading.Thread): def __init__(self, tb): _threading.Thread.__init__(self) self.setDaemon(1) self.tb = tb self.done = False self.start() def run(self): self.tb.run() self.done = True class my_top_block(gr.top_block): def __init__(self, options, queue): gr.top_block.__init__(self) if options.filename is not None: self.fs = gr.file_source(gr.sizeof_gr_complex, options.filename) self.rate = options.rate else: self.u = uhd.usrp_source(options.addr, io_type=uhd.io_type.COMPLEX_FLOAT32, num_channels=1) if options.subdev is not None: self.u.set_subdev_spec(options.subdev, 0) self.u.set_samp_rate(options.rate) self.rate = self.u.get_samp_rate() # Set the antenna if(options.antenna): self.u.set_antenna(options.antenna, 0) self.centerfreq = options.centerfreq print "Tuning to: %fMHz" % (self.centerfreq - options.error) if not(self.tune(options.centerfreq - options.error)): print "Failed to set initial frequency" if options.gain is None: #set to halfway g = self.u.get_gain_range() options.gain = (g.start()+g.stop()) / 2.0 print "Setting gain to %i" % options.gain self.u.set_gain(options.gain) self.u.set_bandwidth(options.bandwidth) print "Samples per second is %i" % self.rate self._syms_per_sec = 3600; options.samples_per_second = self.rate options.syms_per_sec = self._syms_per_sec options.gain_mu = 0.01 options.mu=0.5 options.omega_relative_limit = 0.3 options.syms_per_sec = self._syms_per_sec options.offset = options.centerfreq - options.freq print "Control channel offset: %f" % options.offset self.demod = fsk_demod(options) self.start_correlator = gr.correlate_access_code_tag_bb("10101100", 0, "smartnet_preamble") #should mark start of packet self.smartnet_deinterleave = smartnet.deinterleave() self.smartnet_crc = smartnet.crc(queue) if options.filename is None: self.connect(self.u, self.demod) else: self.connect(self.fs, self.demod) self.connect(self.demod, self.start_correlator, self.smartnet_deinterleave, self.smartnet_crc) #hook up the audio patch if options.audio: self.audiorate = 48000 self.audiotaps = gr.firdes.low_pass(1, self.rate, 8000, 2000, gr.firdes.WIN_HANN) self.prefilter_decim = int(self.rate / self.audiorate) #might have to use a rational resampler for audio print "Prefilter decimation: %i" % self.prefilter_decim self.audio_prefilter = gr.freq_xlating_fir_filter_ccf(self.prefilter_decim, #decimation self.audiotaps, #taps 0, #freq offset self.rate) #sampling rate #on a trunked network where you know you will have good signal, a carrier power squelch works well. real FM receviers use a noise squelch, where #the received audio is high-passed above the cutoff and then fed to a reverse squelch. If the power is then BELOW a threshold, open the squelch. self.squelch = gr.pwr_squelch_cc(options.squelch, #squelch point alpha = 0.1, #wat ramp = 10, #wat gate = False) self.audiodemod = blks2.fm_demod_cf(self.rate/self.prefilter_decim, #rate 1, #audio decimation 4000, #deviation 3000, #audio passband 4000, #audio stopband 1, #gain 75e-6) #deemphasis constant #the filtering removes FSK data woobling from the subaudible channel (might be able to combine w/lpf above) self.audiofilttaps = gr.firdes.high_pass(1, self.audiorate, 300, 50, gr.firdes.WIN_HANN) self.audiofilt = gr.fir_filter_fff(1, self.audiofilttaps) self.audiogain = gr.multiply_const_ff(options.volume) self.audiosink = audio.sink (self.audiorate, "") self.mute() if options.filename is None: self.connect(self.u, self.audio_prefilter) else: self.connect(self.fs, self.audio_prefilter) self.connect(self.audio_prefilter, self.squelch, self.audiodemod, self.audiofilt, self.audiogain, self.audiosink) #here we set up the low-pass filter for audio subchannel data decoding. gain of 10, decimation of 10. def tune(self, freq): result = self.u.set_center_freq(freq) return True def tuneoffset(self, target_freq, rffreq): #print "Setting offset; target freq is %f, Center freq is %f" % (target_freq, rffreq) self.audio_prefilter.set_center_freq(rffreq-target_freq*1e6) def setvolume(self, vol): self.audiogain.set_k(vol) def mute(self): self.setvolume(0) def unmute(self, volume): self.setvolume(volume) def getfreq(chanlist, cmd): if chanlist is None: if cmd < 0x2d0: freq = float(cmd * 0.025 + 851.0125) else: freq = None else: if chanlist.get(str(cmd), None) is not None: freq = float(chanlist[str(cmd)]) else: freq = None return freq def parsefreq(s, chanlist): retfreq = None [address, groupflag, command] = s.split(",") command = int(command) address = int(address) & 0xFFF0 groupflag = bool(groupflag) if chanlist is None: if command < 0x2d0: retfreq = getfreq(chanlist, command) else: if chanlist.get(str(command), None) is not None: #if it falls into the channel somewhere retfreq = getfreq(chanlist, command) return [retfreq, address] # mask so the squelch opens up on the entire group def parse(s, shorttglist, longtglist, chanlist, elimdupes): #this is the main parser. it takes in commands in the form "address,command" (no quotes of course) and outputs text via print #it is also responsible for using the talkgroup list, if any [address, groupflag, command] = s.split(",") command = int(command) address = int(address) lookupaddr = address & 0xFFF0 groupflag = bool(groupflag) if longtglist is not None and longtglist.get(str(lookupaddr), None) is not None: longname = longtglist[str(lookupaddr)] #the mask is to screen out extra status bits, which we can add in later (see the RadioReference.com wiki on SmartNet Type II) else: longname = None if shorttglist is not None and shorttglist.get(str(lookupaddr), None) is not None: shortname = shorttglist[str(lookupaddr)] else: shortname = None retval = None if command == 0x30B and groupflag is True and lastmsg.get("command", None) == 0x308 and address & 0x2000 and address & 0x0800: retval = "SysID: Sys #" + hex(lastmsg["address"]) + " on " + str(getfreq(chanlist, address & 0x3FF)) else: if getfreq(chanlist, command) is not None and dupes.get(command, None) != address: retval = "Freq assignment: " + str(shortname) + " (" + str(address) + ")" + " @ " + str(getfreq(chanlist, command)) + " (" + str(longname) + ")" if elimdupes is True: dupes[command] = address lastlastmsg = lastmsg lastmsg["command"]=command lastmsg["address"]=address return retval def main(): # Create Options Parser: parser = OptionParser (option_class=eng_option, conflict_handler="resolve") expert_grp = parser.add_option_group("Expert") parser.add_option("-f", "--freq", type="eng_float", default=866.9625e6, help="set control channel frequency to MHz [default=%default]", metavar="FREQ") parser.add_option("-c", "--centerfreq", type="eng_float", default=867.5e6, help="set center receive frequency to MHz [default=%default]. Set to center of 800MHz band for best results") parser.add_option("-g", "--gain", type="int", default=None, help="set RF gain", metavar="dB") parser.add_option("-b", "--bandwidth", type="eng_float", default=3e6, help="set bandwidth of DBS RX frond end [default=%default]") parser.add_option("-F", "--filename", type="string", default=None, help="read data from filename rather than USRP") parser.add_option("-t", "--tgfile", type="string", default="sf_talkgroups.csv", help="read in CSV-formatted talkgroup list for pretty printing of talkgroup names") parser.add_option("-C", "--chanlistfile", type="string", default="motochan14.csv", help="read in list of Motorola channel frequencies (improves accuracy of frequency decoding) [default=%default]") parser.add_option("-e", "--allowdupes", action="store_false", default=True, help="do not eliminate duplicate records (produces lots of noise)") parser.add_option("-E", "--error", type="eng_float", default=0, help="enter an offset error to compensate for USRP clock inaccuracy") parser.add_option("-u", "--audio", action="store_true", default=False, help="output audio on speaker") parser.add_option("-m", "--monitor", type="int", default=None, help="monitor a specific talkgroup") parser.add_option("-v", "--volume", type="eng_float", default=0.2, help="set volume gain for audio output [default=%default]") parser.add_option("-s", "--squelch", type="eng_float", default=28, help="set audio squelch level (default=%default, play with it)") parser.add_option("-s", "--subdev", type="string", help="UHD subdev spec", default=None) parser.add_option("-A", "--antenna", type="string", default=None, help="select Rx Antenna where appropriate") parser.add_option("-r", "--rate", type="eng_float", default=64e6/18, help="set sample rate [default=%default]") parser.add_option("-a", "--addr", type="string", default="", help="address options to pass to UHD") #receive_path.add_options(parser, expert_grp) (options, args) = parser.parse_args () if len(args) != 0: parser.print_help(sys.stderr) sys.exit(1) if options.tgfile is not None: tgreader=csv.DictReader(open(options.tgfile), quotechar='"') shorttglist = {"0": 0} longtglist = {"0": 0} for record in tgreader: shorttglist[record['tgnum']] = record['shortname'] longtglist[record['tgnum']] = record['longname'] else: shorttglist = None longtglist = None if options.chanlistfile is not None: clreader=csv.DictReader(open(options.chanlistfile), quotechar='"') chanlist={"0": 0} for record in clreader: chanlist[record['channel']] = record['frequency'] else: chanlist = None # build the graph queue = gr.msg_queue(10) tb = my_top_block(options, queue) runner = top_block_runner(tb) global dupes dupes = {0: 0} global lastmsg lastmsg = {"command": 0x0000, "address": 0x0000} global lastlastmsg lastlastmsg = lastmsg currentoffset = 0 updaterate = 10 #tb.setvolume(options.volume) #tb.mute() try: while 1: if not queue.empty_p(): msg = queue.delete_head() # Blocking read sentence = msg.to_string() s = parse(sentence, shorttglist, longtglist, chanlist, options.allowdupes) if s is not None: print s if options.audio: [newfreq, newaddr] = parsefreq(sentence, chanlist) if newfreq == currentoffset and newaddr != (options.monitor & 0xFFF0): tb.mute() if newaddr == (options.monitor & 0xFFF0): #the mask is to allow listening to all "flags" within a talkgroup: emergency, broadcast, etc. tb.unmute(options.volume) if newfreq is not None and newfreq != currentoffset: print "Changing freq to %f" % newfreq currentoffset = newfreq tb.tuneoffset(newfreq, options.centerfreq) elif runner.done: break else: time.sleep(1.0/updaterate) except KeyboardInterrupt: tb.stop() runner = None if __name__ == '__main__': main()
from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.mixins import UserPassesTestMixin from django.views.generic import ListView from django.views import View from django.db.models import Q import posgradmin.models as models from posgradmin import authorization as auth from django.conf import settings from django.shortcuts import render, HttpResponseRedirect import posgradmin.forms as forms from dal import autocomplete from django.urls import reverse from django.forms.models import model_to_dict from pdfrw import PdfReader, PdfWriter, PageMerge from django.template.loader import render_to_string from sh import pandoc, mkdir from tempfile import NamedTemporaryFile import datetime from django.utils.text import slugify from .settings import BASE_DIR, MEDIA_ROOT, MEDIA_URL class AcademicoAutocomplete(LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView): login_url = settings.APP_PREFIX + 'accounts/login/' def test_func(self): if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E', 'candidato profesor']: return True return False def get_queryset(self): qs = models.Academico.objects.filter(Q(acreditacion='candidato profesor') | Q(acreditacion='P') | Q(acreditacion='M') | Q(acreditacion='D') | Q(acreditacion='E')) if self.q: qs = qs.filter(Q(user__first_name__istartswith=self.q) | Q(user__last_name__icontains=self.q)) return qs class ProponerAsignatura(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/proponer_asignatura.html' form_class = forms.AsignaturaModelForm def test_func(self): if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'candidato profesor']: return True return False def get(self, request, *args, **kwargs): form = self.form_class(initial={'academicos': [request.user.academico, ]}) breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'), ('', 'Proponer Asignatura') ) return render(request, self.template, { 'title': 'Proponer Asignatura', 'breadcrumbs': breadcrumbs, 'form': form }) def post(self, request, *args, **kwargs): form = self.form_class(request.POST, request.FILES) if form.is_valid(): a = models.Asignatura( asignatura=request.POST['asignatura'], tipo='Optativa', estado='propuesta', programa=request.FILES['programa']) a.save() return HttpResponseRedirect(reverse('inicio')) else: print(form.errors) class SolicitaCurso(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/solicita_curso.html' form_class = forms.CursoModelForm def test_func(self): if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E', 'candidato profesor']: return True return False def get(self, request, *args, **kwargs): convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk'])) if convocatoria.status == 'cerrada': return HttpResponseRedirect(reverse('mis_cursos')) asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id'])) form = self.form_class(initial={'academicos': [request.user.academico, ]}) breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'), (reverse('elige_asignatura', args=[convocatoria.id,]), "Convocatoria para cursos %s-%s" % (convocatoria.year, convocatoria.semestre)) ) return render(request, self.template, { 'title': 'Solicitar curso', 'breadcrumbs': breadcrumbs, 'convocatoria': convocatoria, 'asignatura': asignatura, 'form': form }) def post(self, request, *args, **kwargs): convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk'])) if convocatoria.status == 'cerrada': return HttpResponseRedirect(reverse('mis_cursos')) asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id'])) form = self.form_class(request.POST) if form.is_valid(): curso = models.Curso( convocatoria=convocatoria, asignatura=asignatura, year=convocatoria.year, semestre=convocatoria.semestre, sede=request.POST['sede'], aula=request.POST['aula'], horario=request.POST['horario']) curso.save() for ac_id in request.POST.getlist('academicos'): ac = models.Academico.objects.get(pk=int(ac_id)) curso.academicos.add(ac) curso.academicos.add(request.user.academico) curso.save() return HttpResponseRedirect(reverse('mis_cursos')) class CursoView(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/solicita_curso.html' form_class = forms.CursoModelForm def test_func(self): curso = models.Curso.objects.get(pk=int(self.kwargs['pk'])) if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E', 'candidato profesor']: if self.request.user.academico in curso.academicos.all(): return True return False def get(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) form = self.form_class(initial=model_to_dict(curso)) breadcrumbs = ((reverse('inicio'), 'Inicio'), (reverse('mis_cursos'), "Mis cursos")) return render(request, self.template, { 'title': 'Editar curso', 'breadcrumbs': breadcrumbs, 'convocatoria': curso.convocatoria, 'asignatura': curso.asignatura, 'form': form }) def post(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) convocatoria = curso.convocatoria if convocatoria.status == 'cerrada': return HttpResponseRedirect(reverse('mis_cursos')) asignatura = curso.asignatura form = self.form_class(request.POST) if form.is_valid(): curso.sede = request.POST['sede'] curso.aula = request.POST['aula'] curso.horario = request.POST['horario'] curso.save() curso.academicos.clear() for ac_id in request.POST.getlist('academicos'): ac = models.Academico.objects.get(pk=int(ac_id)) curso.academicos.add(ac) curso.save() return HttpResponseRedirect(reverse('mis_cursos')) class CursoConstancia(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/curso_constancia.html' form_class = forms.CursoConstancia def test_func(self): curso = models.Curso.objects.get(pk=int(self.kwargs['pk'])) if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E', 'candidato profesor']: if self.request.user.academico in curso.academicos.all(): return True return False def get(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) form = self.form_class(initial=model_to_dict(curso)) breadcrumbs = ((reverse('inicio'), 'Inicio'), (reverse('mis_cursos'), "Mis cursos")) return render(request, self.template, { 'title': 'Emitir constancia de participación', 'breadcrumbs': breadcrumbs, 'convocatoria': curso.convocatoria, 'asignatura': curso.asignatura, 'form': form }) def post(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) profesor_invitado = request.POST['profesor_invitado'] fecha_participacion = datetime.date(int(request.POST['fecha_de_participación_year']), int(request.POST['fecha_de_participación_month']), int(request.POST['fecha_de_participación_day'])) with NamedTemporaryFile(mode='r+', encoding='utf-8') as carta_md: carta_md.write( render_to_string('posgradmin/constancia_curso.md', {'fecha': datetime.date.today(), 'profesor_invitado': profesor_invitado, 'tema': request.POST['tema'], 'curso': curso, 'fecha_participacion': fecha_participacion, 'profesor': request.user.get_full_name() })) carta_md.seek(0) outdir = '%s/perfil-academico/%s/' % (MEDIA_ROOT, request.user.academico.id) tmpname = 'cursoplain_%s_%s.pdf' % (curso.id, slugify(profesor_invitado) ) final_name = tmpname.replace('cursoplain', 'constancia_curso') mkdir("-p", outdir) pandoc(carta_md.name, output=outdir + tmpname) C = PdfReader(outdir + tmpname) M = PdfReader(BASE_DIR + '/docs/membrete_pcs.pdf') w = PdfWriter() merger = PageMerge(M.pages[0]) merger.add(C.pages[0]).render() w.write(outdir + final_name, M) return HttpResponseRedirect(MEDIA_URL+"perfil-academico/%s/%s" % (request.user.academico.id, final_name)) class CursoConstanciaEstudiante(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/curso_constancia.html' form_class = forms.CursoConstanciaEstudiante def test_func(self): curso = models.Curso.objects.get(pk=int(self.kwargs['pk'])) if auth.is_academico(self.request.user): if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E', 'candidato profesor']: if self.request.user.academico in curso.academicos.all(): return True return False def get(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) form = self.form_class(initial=model_to_dict(curso)) breadcrumbs = ((reverse('inicio'), 'Inicio'), (reverse('mis_cursos'), "Mis cursos")) return render(request, self.template, { 'title': 'Emitir constancia para estudiante', 'breadcrumbs': breadcrumbs, 'convocatoria': curso.convocatoria, 'asignatura': curso.asignatura, 'form': form }) def post(self, request, *args, **kwargs): curso = models.Curso.objects.get(pk=int(kwargs['pk'])) form = self.form_class(request.POST, request.FILES) if form.is_valid(): estudiante_invitado = request.POST['estudiante_invitado'] calificacion = request.POST['calificacion'] with NamedTemporaryFile(mode='r+', encoding='utf-8') as carta_md: carta_md.write( render_to_string('posgradmin/constancia_curso_estudiante.md', {'fecha': datetime.date.today(), 'estudiante_invitado': estudiante_invitado, 'calificacion': calificacion, 'curso': curso, 'profesor': request.user.get_full_name() })) carta_md.seek(0) outdir = '%s/perfil-academico/%s/' % (MEDIA_ROOT, request.user.academico.id) tmpname = 'cursoplain_%s_%s.pdf' % (curso.id, slugify(estudiante_invitado) ) final_name = tmpname.replace('cursoplain', 'constancia_curso') mkdir("-p", outdir) pandoc(carta_md.name, output=outdir + tmpname) C = PdfReader(outdir + tmpname) M = PdfReader(BASE_DIR + '/docs/membrete_pcs.pdf') w = PdfWriter() merger = PageMerge(M.pages[0]) merger.add(C.pages[0]).render() w.write(outdir + final_name, M) return HttpResponseRedirect( MEDIA_URL + "perfil-academico/%s/%s" % (request.user.academico.id, final_name)) else: return render(request, self.template, { 'title': 'Emitir constancia para estudiante', 'breadcrumbs': breadcrumbs, 'convocatoria': curso.convocatoria, 'asignatura': curso.asignatura, 'form': form }) class EligeAsignatura(LoginRequiredMixin, UserPassesTestMixin, View): login_url = settings.APP_PREFIX + 'accounts/login/' template = 'posgradmin/elige_asignatura.html' def test_func(self): return auth.is_academico(self.request.user) def get(self, request, *args, **kwargs): pk = int(kwargs['pk']) convocatoria = models.ConvocatoriaCurso.objects.get(pk=pk) asignaturas = models.Asignatura.objects.filter( Q(tipo='Optativa') & (Q(estado='aceptada') | Q(estado='propuesta'))) breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'), ('', "Convocatoria para cursos %s-%s" % (convocatoria.year, convocatoria.semestre)) ) return render(request, self.template, { 'title': 'Asignaturas', 'breadcrumbs': breadcrumbs, 'asignaturas': asignaturas, 'convocatoria': convocatoria, }) class MisEstudiantesView(LoginRequiredMixin, UserPassesTestMixin, ListView): login_url = settings.APP_PREFIX + 'accounts/login/' def test_func(self): return auth.is_academico(self.request.user) model = models.Estudiante template_name = 'posgradmin/mis_estudiantes_list.html' def get_queryset(self): new_context = self.request.user.academico.estudiantes() return new_context class MisCursos(LoginRequiredMixin, UserPassesTestMixin, ListView): login_url = settings.APP_PREFIX + 'accounts/login/' def test_func(self): return auth.is_academico(self.request.user) model = models.Curso template_name = 'posgradmin/mis_cursos_list.html' def get_queryset(self): return self.request.user.academico.curso_set.all() def get_context_data(self, **kwargs): ctxt = super(MisCursos, self).get_context_data(**kwargs) ctxt['MEDIA_URL'] = MEDIA_URL return ctxt
from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(1024, 768) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setEnabled(True) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth()) self.centralwidget.setSizePolicy(sizePolicy) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.horizontalLayout_2 = QtGui.QHBoxLayout() self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2")) self.label_3 = QtGui.QLabel(self.centralwidget) self.label_3.setMaximumSize(QtCore.QSize(200, 200)) self.label_3.setText(_fromUtf8("")) self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/logo.jpg"))) self.label_3.setScaledContents(True) self.label_3.setObjectName(_fromUtf8("label_3")) self.horizontalLayout_2.addWidget(self.label_3) self.verticalLayout_2 = QtGui.QVBoxLayout() self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2")) self.label_2 = QtGui.QLabel(self.centralwidget) font = QtGui.QFont() font.setPointSize(20) self.label_2.setFont(font) self.label_2.setAlignment(QtCore.Qt.AlignCenter) self.label_2.setObjectName(_fromUtf8("label_2")) self.verticalLayout_2.addWidget(self.label_2) self.labelServerId = QtGui.QLabel(self.centralwidget) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush) brush = QtGui.QBrush(QtGui.QColor(118, 116, 113)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(118, 116, 113)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush) self.labelServerId.setPalette(palette) font = QtGui.QFont() font.setPointSize(16) font.setBold(True) font.setWeight(75) self.labelServerId.setFont(font) self.labelServerId.setAlignment(QtCore.Qt.AlignCenter) self.labelServerId.setObjectName(_fromUtf8("labelServerId")) self.verticalLayout_2.addWidget(self.labelServerId) self.labelYear = QtGui.QLabel(self.centralwidget) palette = QtGui.QPalette() brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush) brush = QtGui.QBrush(QtGui.QColor(118, 116, 113)) brush.setStyle(QtCore.Qt.SolidPattern) palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush) self.labelYear.setPalette(palette) font = QtGui.QFont() font.setPointSize(37) font.setBold(True) font.setWeight(75) self.labelYear.setFont(font) self.labelYear.setTextFormat(QtCore.Qt.PlainText) self.labelYear.setAlignment(QtCore.Qt.AlignCenter) self.labelYear.setObjectName(_fromUtf8("labelYear")) self.verticalLayout_2.addWidget(self.labelYear) self.horizontalLayout_2.addLayout(self.verticalLayout_2) self.label = QtGui.QLabel(self.centralwidget) self.label.setMaximumSize(QtCore.QSize(200, 200)) self.label.setText(_fromUtf8("")) self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/Stampa-silicone-tondo-fi55.png"))) self.label.setScaledContents(True) self.label.setObjectName(_fromUtf8("label")) self.horizontalLayout_2.addWidget(self.label) self.verticalLayout.addLayout(self.horizontalLayout_2) self.line = QtGui.QFrame(self.centralwidget) self.line.setFrameShadow(QtGui.QFrame.Raised) self.line.setLineWidth(4) self.line.setFrameShape(QtGui.QFrame.HLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.verticalLayout.addWidget(self.line) spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout")) self.btnNewYear = QtGui.QToolButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(11) sizePolicy.setHeightForWidth(self.btnNewYear.sizePolicy().hasHeightForWidth()) self.btnNewYear.setSizePolicy(sizePolicy) self.btnNewYear.setMinimumSize(QtCore.QSize(0, 200)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.btnNewYear.setFont(font) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/planner.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnNewYear.setIcon(icon) self.btnNewYear.setIconSize(QtCore.QSize(128, 128)) self.btnNewYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnNewYear.setAutoRaise(False) self.btnNewYear.setArrowType(QtCore.Qt.NoArrow) self.btnNewYear.setObjectName(_fromUtf8("btnNewYear")) self.horizontalLayout.addWidget(self.btnNewYear) self.btnCloseYear = QtGui.QToolButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(11) sizePolicy.setHeightForWidth(self.btnCloseYear.sizePolicy().hasHeightForWidth()) self.btnCloseYear.setSizePolicy(sizePolicy) self.btnCloseYear.setMinimumSize(QtCore.QSize(0, 200)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.btnCloseYear.setFont(font) self.btnCloseYear.setAutoFillBackground(False) icon1 = QtGui.QIcon() icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnCloseYear.setIcon(icon1) self.btnCloseYear.setIconSize(QtCore.QSize(128, 128)) self.btnCloseYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnCloseYear.setObjectName(_fromUtf8("btnCloseYear")) self.horizontalLayout.addWidget(self.btnCloseYear) self.btnTeachers = QtGui.QToolButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(11) sizePolicy.setHeightForWidth(self.btnTeachers.sizePolicy().hasHeightForWidth()) self.btnTeachers.setSizePolicy(sizePolicy) self.btnTeachers.setMinimumSize(QtCore.QSize(0, 200)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.btnTeachers.setFont(font) icon2 = QtGui.QIcon() icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/education.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnTeachers.setIcon(icon2) self.btnTeachers.setIconSize(QtCore.QSize(128, 128)) self.btnTeachers.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnTeachers.setObjectName(_fromUtf8("btnTeachers")) self.horizontalLayout.addWidget(self.btnTeachers) self.btnStudents = QtGui.QToolButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(11) sizePolicy.setHeightForWidth(self.btnStudents.sizePolicy().hasHeightForWidth()) self.btnStudents.setSizePolicy(sizePolicy) self.btnStudents.setMinimumSize(QtCore.QSize(0, 200)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.btnStudents.setFont(font) self.btnStudents.setStyleSheet(_fromUtf8("")) icon3 = QtGui.QIcon() icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/System-users.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnStudents.setIcon(icon3) self.btnStudents.setIconSize(QtCore.QSize(128, 128)) self.btnStudents.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnStudents.setObjectName(_fromUtf8("btnStudents")) self.horizontalLayout.addWidget(self.btnStudents) self.btnAdvanced = QtGui.QToolButton(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(11) sizePolicy.setHeightForWidth(self.btnAdvanced.sizePolicy().hasHeightForWidth()) self.btnAdvanced.setSizePolicy(sizePolicy) self.btnAdvanced.setMinimumSize(QtCore.QSize(0, 200)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.btnAdvanced.setFont(font) icon4 = QtGui.QIcon() icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/advanced_options.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btnAdvanced.setIcon(icon4) self.btnAdvanced.setIconSize(QtCore.QSize(128, 128)) self.btnAdvanced.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon) self.btnAdvanced.setObjectName(_fromUtf8("btnAdvanced")) self.horizontalLayout.addWidget(self.btnAdvanced) self.verticalLayout.addLayout(self.horizontalLayout) spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem1) MainWindow.setCentralWidget(self.centralwidget) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1024, 29)) self.menubar.setObjectName(_fromUtf8("menubar")) self.menuImpostazioni = QtGui.QMenu(self.menubar) self.menuImpostazioni.setEnabled(False) self.menuImpostazioni.setObjectName(_fromUtf8("menuImpostazioni")) self.menuHelp = QtGui.QMenu(self.menubar) self.menuHelp.setEnabled(False) self.menuHelp.setObjectName(_fromUtf8("menuHelp")) self.menuArchivi = QtGui.QMenu(self.menubar) self.menuArchivi.setObjectName(_fromUtf8("menuArchivi")) MainWindow.setMenuBar(self.menubar) self.actionAbout = QtGui.QAction(MainWindow) self.actionAbout.setObjectName(_fromUtf8("actionAbout")) self.actionPreferenze = QtGui.QAction(MainWindow) self.actionPreferenze.setObjectName(_fromUtf8("actionPreferenze")) self.actionArchivioAnniPrec = QtGui.QAction(MainWindow) self.actionArchivioAnniPrec.setObjectName(_fromUtf8("actionArchivioAnniPrec")) self.menuImpostazioni.addAction(self.actionPreferenze) self.menuHelp.addAction(self.actionAbout) self.menuArchivi.addAction(self.actionArchivioAnniPrec) self.menubar.addAction(self.menuArchivi.menuAction()) self.menubar.addAction(self.menuImpostazioni.menuAction()) self.menubar.addAction(self.menuHelp.menuAction()) self.retranslateUi(MainWindow) QtCore.QObject.connect(self.btnAdvanced, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execAdvancedUserManager) QtCore.QObject.connect(self.btnCloseYear, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execYearEnd) QtCore.QObject.connect(self.btnNewYear, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execYearNew) QtCore.QObject.connect(self.actionArchivioAnniPrec, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showArchBackup) QtCore.QObject.connect(self.btnStudents, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.showStudentsManager) QtCore.QObject.connect(self.btnTeachers, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.showTeachersManager) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8)) self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Pannello di Amministrazione del Server", None, QtGui.QApplication.UnicodeUTF8)) self.labelServerId.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8)) self.labelYear.setText(QtGui.QApplication.translate("MainWindow", "Anno -", None, QtGui.QApplication.UnicodeUTF8)) self.btnNewYear.setText(QtGui.QApplication.translate("MainWindow", "Nuovo Anno", None, QtGui.QApplication.UnicodeUTF8)) self.btnCloseYear.setText(QtGui.QApplication.translate("MainWindow", "Chiusura Anno", None, QtGui.QApplication.UnicodeUTF8)) self.btnTeachers.setText(QtGui.QApplication.translate("MainWindow", "Gestione Insegnanti", None, QtGui.QApplication.UnicodeUTF8)) self.btnStudents.setText(QtGui.QApplication.translate("MainWindow", "Gestione Alunni", None, QtGui.QApplication.UnicodeUTF8)) self.btnAdvanced.setText(QtGui.QApplication.translate("MainWindow", "Gestione Avanzata", None, QtGui.QApplication.UnicodeUTF8)) self.menuImpostazioni.setTitle(QtGui.QApplication.translate("MainWindow", "Impostazioni", None, QtGui.QApplication.UnicodeUTF8)) self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8)) self.menuArchivi.setTitle(QtGui.QApplication.translate("MainWindow", "Archivi", None, QtGui.QApplication.UnicodeUTF8)) self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8)) self.actionPreferenze.setText(QtGui.QApplication.translate("MainWindow", "Preferenze", None, QtGui.QApplication.UnicodeUTF8)) self.actionArchivioAnniPrec.setText(QtGui.QApplication.translate("MainWindow", "Archivio anni precedenti", None, QtGui.QApplication.UnicodeUTF8)) import classerman_rc
import frappe from frappe.model.document import Document from frappe.website.utils import delete_page_cache class Homepage(Document): def validate(self): if not self.description: self.description = frappe._("This is an example website auto-generated from ERPNext") delete_page_cache('home') def setup_items(self): for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'], filters={'show_in_website': 1}, limit=3): doc = frappe.get_doc('Item', d.name) if not doc.route: # set missing route doc.save() self.append('products', dict(item_code=d.name, item_name=d.item_name, description=d.description, image=d.image))
import abc from ..utils import OrderedDict from ..utils import enum Architecture = enum('Test', 'X86', 'X8664', 'Mips', 'Arm', 'Generic', enum_type='Architecture') class Register(object): _register_fmt = {16: '0x%032lX', 10: '0x%020lX', 8: '0x%016lX', 4: '0x%08lX', 2: '0x%04lX', 1: '0x%02lX'} def __init__(self, name): self._name = name def name(self): return self._name def size(self): raise NotImplementedError def value(self): raise NotImplementedError def str(self): if self.value() is not None: return self._register_fmt[self.size()] % self.value() chars_per_byte = 2 return ''.join(['-' * (self.size() * chars_per_byte)]) def create_static_register(register): class StaticRegister(type(register), object): def __init__(self, name): super(StaticRegister, self).__init__(name) self._size = register.size() self._value = register.value() def size(self): return self._size def value(self): return self._value return StaticRegister(register.name()) class Cpu(object): __metaclass__ = abc.ABCMeta def __init__(self, cpu_factory, registers): self._registers = OrderedDict() for group, register_list in registers.iteritems(): registers = OrderedDict([(x.name(), cpu_factory.create_register(self, x)) for x in register_list]) self._registers[group] = registers @classmethod @abc.abstractmethod def architecture(cls): raise NotImplementedError def register(self, name): for register_dict in self._registers.itervalues(): if name in register_dict: return register_dict[name] return None def registers(self): return self._registers.iteritems() @abc.abstractmethod def stack_pointer(self): raise NotImplementedError @abc.abstractmethod def program_counter(self): raise NotImplementedError class CpuFactory(object): __metaclass__ = abc.ABCMeta def create_cpu(self, architecture): assert architecture in _cpu_map return _cpu_map.get(architecture, None)(self) @abc.abstractmethod def create_register(self, cpu, register): raise NotImplementedError class CpuRepository(object): def __init__(self, cpu_factory): self._cpu_factory = cpu_factory self._cpus = {} def get_cpu(self, architecture): if architecture in self._cpus: return self._cpus[architecture] cpu = self._cpu_factory.create_cpu(architecture) self._cpus[architecture] = cpu return cpu def register_cpu(cls): _cpu_map[cls.architecture()] = cls return cls _cpu_map = {}
import re import traceback import datetime import urlparse import sickbeard import generic from sickbeard.common import Quality from sickbeard import logger from sickbeard import tvcache from sickbeard import db from sickbeard import classes from sickbeard import helpers from sickbeard import show_name_helpers from sickbeard.exceptions import ex, AuthException from sickbeard import clients from lib import requests from lib.requests import exceptions from sickbeard.bs4_parser import BS4Parser from lib.unidecode import unidecode from sickbeard.helpers import sanitizeSceneName from sickbeard.show_name_helpers import allPossibleShowNames class IPTorrentsProvider(generic.TorrentProvider): def __init__(self): generic.TorrentProvider.__init__(self, "IPTorrents") self.supportsBacklog = True self.enabled = False self.username = None self.password = None self.ratio = None self.freeleech = False self.cache = IPTorrentsCache(self) self.urls = {'base_url': 'https://www.iptorrents.com', 'login': 'https://www.iptorrents.com/torrents/', 'search': 'https://www.iptorrents.com/torrents/?%s%s&q=%s&qf=ti', } self.url = self.urls['base_url'] self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1' def isEnabled(self): return self.enabled def imageName(self): return 'iptorrents.png' def getQuality(self, item, anime=False): quality = Quality.sceneQuality(item[0], anime) return quality def _checkAuth(self): if not self.username or not self.password: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def _doLogin(self): login_params = {'username': self.username, 'password': self.password, 'login': 'submit', } try: response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False) except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e: logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR) return False if re.search('tries left', response.text) \ or re.search('<title>IPT</title>', response.text) \ or response.status_code == 401: logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR) return False return True def _get_season_search_strings(self, ep_obj): search_string = {'Season': []} for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): if ep_obj.show.air_by_date or ep_obj.show.sports: ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0] elif ep_obj.show.anime: ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number else: ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX search_string['Season'].append(ep_string) return [search_string] def _get_episode_search_strings(self, ep_obj, add_string=''): search_string = {'Episode': []} if not ep_obj: return [] if self.show.air_by_date: for show_name in set(allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \ str(ep_obj.airdate).replace('-', '|') search_string['Episode'].append(ep_string) elif self.show.sports: for show_name in set(allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \ str(ep_obj.airdate).replace('-', '|') + '|' + \ ep_obj.airdate.strftime('%b') search_string['Episode'].append(ep_string) elif self.show.anime: for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): ep_string = sanitizeSceneName(show_name) + ' ' + \ "%i" % int(ep_obj.scene_absolute_number) search_string['Episode'].append(ep_string) else: for show_name in set(show_name_helpers.allPossibleShowNames(self.show)): ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \ sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season, 'episodenumber': ep_obj.scene_episode} + ' %s' % add_string search_string['Episode'].append(re.sub('\s+', ' ', ep_string)) return [search_string] def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0): results = [] items = {'Season': [], 'Episode': [], 'RSS': []} freeleech = '&free=on' if self.freeleech else '' if not self._doLogin(): return results for mode in search_params.keys(): for search_string in search_params[mode]: if isinstance(search_string, unicode): search_string = unidecode(search_string) # URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile searchURL = self.urls['search'] % (self.categorie, freeleech, search_string) searchURL += ';o=seeders' if mode != 'RSS' else '' logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG) data = self.getURL(searchURL) if not data: continue try: data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0) with BS4Parser(data, features=["html5lib", "permissive"]) as html: if not html: logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG) continue if html.find(text='No Torrents Found!'): logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG) continue torrent_table = html.find('table', attrs={'class': 'torrents'}) torrents = torrent_table.find_all('tr') if torrent_table else [] #Continue only if one Release is found if len(torrents) < 2: logger.log(u"The Data returned from " + self.name + " do not contains any torrent", logger.WARNING) continue for result in torrents[1:]: try: torrent = result.find_all('td')[1].find('a') torrent_name = torrent.string torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'] torrent_details_url = self.urls['base_url'] + torrent['href'] torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string) ## Not used, perhaps in the future ## #torrent_id = int(torrent['href'].replace('/details.php?id=', '')) #torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string) except (AttributeError, TypeError): continue # Filter unseeded torrent and torrents with no name/url if mode != 'RSS' and torrent_seeders == 0: continue if not torrent_name or not torrent_download_url: continue item = torrent_name, torrent_download_url logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG) items[mode].append(item) except Exception, e: logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR) results += items[mode] return results def _get_title_and_url(self, item): title, url = item if title: title = u'' + title title = title.replace(' ', '.') if url: url = str(url).replace('&amp;', '&') return (title, url) def findPropers(self, search_date=datetime.datetime.today()): results = [] myDB = db.DBConnection() sqlResults = myDB.select( 'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' + ' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' + ' WHERE e.airdate >= ' + str(search_date.toordinal()) + ' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' + ' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))' ) if not sqlResults: return [] for sqlshow in sqlResults: self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"])) if self.show: curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"])) searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK') for item in self._doSearch(searchString[0]): title, url = self._get_title_and_url(item) results.append(classes.Proper(title, url, datetime.datetime.today(), self.show)) return results def seedRatio(self): return self.ratio class IPTorrentsCache(tvcache.TVCache): def __init__(self, provider): tvcache.TVCache.__init__(self, provider) # Only poll IPTorrents every 10 minutes max self.minTime = 10 def _getRSSData(self): search_params = {'RSS': ['']} return {'entries': self.provider._doSearch(search_params)} provider = IPTorrentsProvider()
from collections import defaultdict class Solution(object): def minWindow(self, S, T): """ :type S: str :type T: str :rtype: str """ pre = defaultdict(list) for i, c in enumerate(T, -1): pre[c].append(i) for val in pre.values(): val.reverse() start_index = [None] * (len(T) + 1) lo, hi = float('-inf'), 0 for i, c in enumerate(S): start_index[-1] = i for p in pre[c]: if start_index[p] is not None: start_index[p + 1] = start_index[p] if (c == T[-1] and start_index[-2] is not None and i - start_index[-2] < hi - lo): lo, hi = start_index[-2], i if lo < 0: return '' else: return S[lo:hi+1] print(Solution().minWindow("cnhczmccqouqadqtmjjzl", "mm"))
from django.conf.urls import patterns, include, url urlpatterns = patterns('', url(r'^$', 'webinterface.view.dashboard.main'), url(r'^dashboard/$', 'webinterface.view.dashboard.main'), url(r'^login/$', 'webinterface.view.login.main'), url(r'^login/ajax/$', 'webinterface.view.login.ajax'), url(r'^settings/$', 'webinterface.view.settings.main'), url(r'^settings/ajax/$', 'webinterface.view.settings.ajax'), url(r'^orders/$', 'webinterface.view.orders.main'), url(r'^orders/ajax/$', 'webinterface.view.orders.ajax'), )
import numpy class DifferentialEvolutionAbstract: amount_of_individuals = None f = None p = None end_method = None def __init__(self, min_element=-1, max_element=1): self.min_element = min_element self.max_element = max_element self.f = 0.5 self.p = 0.9 self.func = None self.population = None self.func_population = None self.dim = 0 self.child_funcs = None self.cost_list = [] self.end_method = 'max_iter' def set_amount_of_individuals(self, amount_of_individuals): self.amount_of_individuals = amount_of_individuals def set_params(self, f, p): self.f = f self.p = p def set_end_method(self, end_method): self.end_method = end_method def create_population(self): # Создаем популяцию population = [] for _ in range(self.amount_of_individuals): population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim)) return numpy.array(population) def choose_best_individual(self): # Данная функция находит лучшую особь в популяции func_list = list(self.func_population) best_index = func_list.index(min(func_list)) return self.population[best_index] def iteration(self): return [] def optimize(self, func, dim, end_cond, debug_pop_print=-1): return [] def return_cost_list(self): return self.cost_list
import pickle from matplotlib import pyplot as plt plt.style.use('classic') import matplotlib as mpl fs = 12. fw = 'bold' mpl.rc('lines', linewidth=2., color='k') mpl.rc('font', size=fs, weight=fw, family='Arial') mpl.rc('legend', fontsize='small') import numpy def grad( x, u ) : return numpy.gradient(u) / numpy.gradient(x) date = '20160519' base = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/' base_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper2/Paper2/ocv_unif35/' fig_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper3/modeling_paper_p3/figs/' nmc_rest_523 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Yang_523NMC_dchg_restOCV.csv', delimiter=',' ) nmc_cby25_111 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Wu_NMC111_Cby25_dchg.csv' , delimiter=',' ) nmc_YangWu_mix = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/YangWuMix_NMC_20170607.csv' , delimiter=',' ) lfp_prada_dchg = numpy.loadtxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/2012Prada_LFP_U_dchg.csv' , delimiter=',' ) graph_hess_dchg = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/Ua_cell4Fit_NMC_2012Yang_refx.csv' , delimiter=',' ) #graphite_Hess_discharge_x.csv xin, Uin = 1.-nmc_YangWu_mix[:,0], nmc_YangWu_mix[:,1] xin2, Uin2 = graph_hess_dchg[:,0], graph_hess_dchg[:,1]#-0.025 pfiles2 = [ base_dir+'slowOCVdat_cell4_slow_ocv_'+date+'.p', ] d = pickle.load( open( pfiles2[0], 'rb' ) ) max_cap = numpy.amax( d['interp']['cap'] ) x_cell, U_cell = 1-numpy.array(d['interp']['cap'])/max_cap*1., d['interp']['dchg']['volt'] scale_x = 1.42 # 1.55 shift_x = -.03 #-.12 scale_x2 = 1/.8 #1./0.83 # shift_x2 = -.06 #-.035 figres = 300 figname = base_dir+'ocv-plots_'+date+'.pdf' sty = [ '-', '--' ] fsz = (190./25.4,120./25.4) f1, axes = plt.subplots(1,2,figsize=fsz) a1,a2 = axes a1.plot( x_cell, U_cell, '-b', label='Cell C/60 Data' ) a1.plot( xin*scale_x+shift_x, Uin, '-g', label='Cathode' ) a1.plot( xin2*scale_x2+shift_x2, Uin2, '-k', label='Anode' ) if xin[1] < xin[0] : Uc = numpy.interp( x_cell, numpy.flipud(xin*scale_x+shift_x), numpy.flipud(Uin) ) else : Uc = numpy.interp( x_cell, xin*scale_x+shift_x, Uin ) Ua = numpy.interp( x_cell, xin2*scale_x2+shift_x2, Uin2 ) a1.plot( x_cell, Uc-Ua, ':k', label='U$_{cell}$ fit' ) Ua_out = Uc - U_cell xa_out = (x_cell-shift_x2)/scale_x2 yin = 1.-xin xc_lo = 1. - (-shift_x/scale_x) xc_hi = 1. - (1.-shift_x)/scale_x xa_lo = (-shift_x2/scale_x2) xa_hi = (1.-shift_x2)/scale_x2 print 'xc_lo, xc_hi:',xc_lo, xc_hi print 'xa_lo, xa_hi:',xa_lo, xa_hi a1.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw ) a1.set_ylabel( 'Voltage vs. Li [V]', fontsize=fs, fontweight=fw ) a1.set_title( 'Full and Half Cell OCV', fontsize=fs, fontweight=fw ) a1.legend(loc='best') a1.set_axisbelow(True) a1.grid(color='gray') a2.plot( x_cell, grad(x_cell, U_cell), label=r'$\frac{\partial U_{cell}}{\partial SOC}$' ) a2.plot( x_cell, -grad(x_cell, Ua), label=r'$\frac{\partial U_{anode}}{\partial SOC}$' ) a2.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw ) a2.set_ylabel( '$\partial U / \partial SOC$', fontsize=fs, fontweight=fw ) a2.set_title( 'OCV Gradients for Anode Alignment', fontsize=fs, fontweight=fw ) a2.legend(loc='best') a2.set_axisbelow(True) a2.grid(color='gray') a2.set_ylim([-0.1,1.5]) plt.suptitle('NMC/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw) plt.tight_layout(rect=[0,0.03,1,0.97]) plt.show()
../../../../share/pyshared/jockey/xorg_driver.py
"""Holds all pytee logic."""
""" unit test for filters module author: Michael Grupp This file is part of evo (github.com/MichaelGrupp/evo). evo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. evo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with evo. If not, see <http://www.gnu.org/licenses/>. """ import math import unittest import numpy as np from evo.core import filters from evo.core import lie_algebra as lie POSES_1 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])), lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1])) ] POSES_2 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 0.5])), lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 1.0])) ] POSES_3 = [ lie.se3(np.eye(3), np.array([0, 0, 0.0])), lie.se3(np.eye(3), np.array([0, 0, 0.9])), lie.se3(np.eye(3), np.array([0, 0, 0.99])), lie.se3(np.eye(3), np.array([0, 0, 0.999])), lie.se3(np.eye(3), np.array([0, 0, 0.9999])), lie.se3(np.eye(3), np.array([0, 0, 0.99999])), lie.se3(np.eye(3), np.array([0, 0, 0.999999])), lie.se3(np.eye(3), np.array([0, 0, 0.9999999])) ] POSES_4 = [ lie.se3(np.eye(3), np.array([0, 0, 0])), lie.se3(np.eye(3), np.array([0, 0, 1])), lie.se3(np.eye(3), np.array([0, 0, 1])), lie.se3(np.eye(3), np.array([0, 0, 1])) ] class TestFilterPairsByPath(unittest.TestCase): def test_poses1_all_pairs(self): target_path = 1.0 tol = 0.0 id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 2), (2, 3)]) def test_poses1_wrong_target(self): target_path = 2.5 tol = 0.0 id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, []) def test_poses2_all_pairs_low_tolerance(self): target_path = 1.0 tol = 0.001 id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 3)]) def test_convergence_all_pairs(self): target_path = 1.0 tol = 0.2 id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol, all_pairs=True) self.assertEqual(id_pairs, [(0, 7)]) axis = np.array([1, 0, 0]) POSES_5 = [ lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])), lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])) ] TRANSFORM = lie.random_se3() POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5] axis = np.array([1, 0, 0]) p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])) pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3])) p1 = np.dot(p0, pd) p2 = np.dot(p1, pd) p3 = np.dot(p2, pd) POSES_6 = [p0, p1, p2, p3, p3] POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6] class TestFilterPairsByAngle(unittest.TestCase): def test_poses5(self): tol = 0.001 expected_result = [(0, 1), (1, 2), (2, 4)] # Result should be unaffected by global transformation. for poses in (POSES_5, POSES_5_TRANSFORMED): target_angle = math.pi - tol id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False) self.assertEqual(id_pairs, expected_result) # Check for same result when using degrees: target_angle = np.rad2deg(target_angle) id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False, degrees=True) self.assertEqual(id_pairs, expected_result) def test_poses5_all_pairs(self): tol = 0.01 expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)] # Result should be unaffected by global transformation. for poses in (POSES_5, POSES_5_TRANSFORMED): target_angle = math.pi id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True) self.assertEqual(id_pairs, expected_result) # Check for same result when using degrees: target_angle = np.rad2deg(target_angle) id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True, degrees=True) self.assertEqual(id_pairs, expected_result) def test_poses6(self): tol = 0.001 target_angle = math.pi - tol expected_result = [(0, 3)] # Result should be unaffected by global transformation. for poses in (POSES_6, POSES_6_TRANSFORMED): id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=False) self.assertEqual(id_pairs, expected_result) def test_poses6_all_pairs(self): target_angle = math.pi tol = 0.001 expected_result = [(0, 3), (0, 4)] # Result should be unaffected by global transformation. for poses in (POSES_6, POSES_6_TRANSFORMED): id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol, all_pairs=True) self.assertEqual(id_pairs, expected_result) if __name__ == '__main__': unittest.main(verbosity=2)
import string import operator import datetime import SystemTime import Schedule import ScheduleItem STATE_MAIN_MENU = 0 STATE_ADD_SCHEDULE = 1 STATE_DEL_SCHEDULE = 2 STATE_RELAY_STATES = 3 STATE_SCHEDULE = 4 STATE_SET_SYSTEM_TIME = 5 STATE_SHUTDOWN = 6 MODE_STANDARD = 0 MODE_CONFIRM = 1 class UserInterface: def __init__(self, NewWIndow, NewThisSchedule, NewThisRelays): self.ThisWindow = NewWIndow self.ThisSchedule = NewThisSchedule self.ThisRelays = NewThisRelays self.ThisSystemTime = SystemTime.SystemTime() self.DisplaySplash() self.InputBuffer = "" self.SelectPos = 0 self.SelectID = 0 self.InterfaceState = STATE_MAIN_MENU def DisplaySplash(self): self.ThisWindow.clear() self.ThisWindow.refresh() print("{:^20}".format("PiTimer") + "\r") print("{:^20}".format("2015-06-23") + "\r") print("{:^20}".format("Version 1.00") + "\r") print("{:^20}".format("(C) Jason Birch") + "\r") self.ThisWindow.refresh() def KeyPress(self, KeyCode): Result = KeyCode if self.InterfaceState == STATE_MAIN_MENU: Result = self.KeysMainMenu(KeyCode) elif self.InterfaceState == STATE_ADD_SCHEDULE: Result = self.KeysAddSchedule(KeyCode) elif self.InterfaceState == STATE_DEL_SCHEDULE: Result = self.KeysDelSchedule(KeyCode) elif self.InterfaceState == STATE_SCHEDULE: Result = self.KeysSchedule(KeyCode) elif self.InterfaceState == STATE_RELAY_STATES: Result = self.KeysRelayStates(KeyCode) elif self.InterfaceState == STATE_SET_SYSTEM_TIME: Result = self.KeysSetSystemTime(KeyCode) return Result def DisplayRefresh(self): if self.InterfaceState == STATE_MAIN_MENU: self.DisplayMainMenu() elif self.InterfaceState == STATE_ADD_SCHEDULE: self.DisplayAddSchedule() elif self.InterfaceState == STATE_DEL_SCHEDULE: self.DisplayDelSchedule() elif self.InterfaceState == STATE_SCHEDULE: self.DisplaySchedule() elif self.InterfaceState == STATE_RELAY_STATES: self.DisplayRelayStates() elif self.InterfaceState == STATE_SET_SYSTEM_TIME: self.DisplaySetSystemTime() def SetInterfaceState(self, NewInterfaceState): self.Mode = MODE_STANDARD self.InputBuffer = "" self.SelectPos =0 self.SelectID = 0 self.InterfaceState = NewInterfaceState if self.InterfaceState == STATE_MAIN_MENU: self.DisplayMainMenu() elif self.InterfaceState == STATE_ADD_SCHEDULE: self.DisplayAddSchedule() elif self.InterfaceState == STATE_DEL_SCHEDULE: self.DisplayDelSchedule() elif self.InterfaceState == STATE_SCHEDULE: self.DisplaySchedule() elif self.InterfaceState == STATE_RELAY_STATES: self.DisplayRelayStates() elif self.InterfaceState == STATE_SET_SYSTEM_TIME: self.DisplaySetSystemTime() def GetMaskedInput(self, Mask, Input): InputCount = 0 Result = "" for Char in Mask: if Char == "#" and len(Input) > InputCount: Result += Input[InputCount:InputCount + 1] InputCount += 1 else: Result += Char return Result def KeyMaskedInput(self, Mask, Input, KeyCode): if len(Input) < Mask.count("#") and KeyCode >= ord("0") and KeyCode <= ord("9"): Input += chr(KeyCode) elif KeyCode == 127 and len(Input) > 0: Input = Input[:-1] return Input def DisplayMainMenu(self): self.ThisWindow.clear() self.ThisWindow.refresh() print("{:>20}".format(self.ThisSystemTime.SystemTimeString()) + "\r") print("{:^20}".format("1 Add 4 Schedule") + "\r") print("{:^20}".format("2 Delete 5 Set Time") + "\r") print("{:^20}".format("3 Relays 6 Shutdown") + "\r") self.ThisWindow.refresh() def KeysMainMenu(self, KeyCode): Result = KeyCode if KeyCode == ord("1"): self.SetInterfaceState(STATE_ADD_SCHEDULE) if KeyCode == ord("2"): self.SetInterfaceState(STATE_DEL_SCHEDULE) if KeyCode == ord("3"): self.SetInterfaceState(STATE_RELAY_STATES) if KeyCode == ord("4"): self.SetInterfaceState(STATE_SCHEDULE) if KeyCode == ord("5"): self.SetInterfaceState(STATE_SET_SYSTEM_TIME) if KeyCode == ord("6"): Result = 27 return Result def DisplayRelayStates(self): self.ThisWindow.clear() self.ThisWindow.refresh() self.ThisRelays.DisplayRelayStates() self.ThisWindow.refresh() def KeysRelayStates(self, KeyCode): Result = KeyCode if KeyCode == 10: self.SetInterfaceState(STATE_MAIN_MENU) return Result def DisplayAddSchedule(self): self.ThisWindow.clear() self.ThisWindow.refresh() print("{:^20}".format("ADD SCHEDULE") + "\r") print(self.GetMaskedInput("####-##-## ##:##:##\r\nPeriod ### ##:##:##\r\nRelay ## State #\r", self.InputBuffer)) self.ThisWindow.refresh() def KeysAddSchedule(self, KeyCode): Result = KeyCode self.InputBuffer = self.KeyMaskedInput("####-##-## ##:##:## ### ##:##:## ## #", self.InputBuffer, KeyCode) if KeyCode == 10: if len(self.InputBuffer) == 26: UserInput = self.GetMaskedInput("####-##-## ##:##:## ### ##:##:## ## #", self.InputBuffer) RelayState = { "0":ScheduleItem.RELAY_OFF, "1":ScheduleItem.RELAY_ON, "2":ScheduleItem.RELAY_TOGGLE, }.get(UserInput[36:37], ScheduleItem.RELAY_TOGGLE) PeriodSeconds = string.atoi(UserInput[30:32]) + 60 * string.atoi(UserInput[27:29]) + 60 * 60 * string.atoi(UserInput[24:26]) + 24 * 60 * 60 * string.atoi(UserInput[20:23]) PeriodDays = operator.div(PeriodSeconds, 24 * 60 * 60) PeriodSeconds = operator.mod(PeriodSeconds, 24 * 60 * 60) try: self.ThisSchedule.AddSchedule(string.atoi(UserInput[33:35]), datetime.datetime(string.atoi(UserInput[0:4]), string.atoi(UserInput[5:7]), string.atoi(UserInput[8:10]), string.atoi(UserInput[11:13]), string.atoi(UserInput[14:16]), string.atoi(UserInput[17:19])), RelayState, datetime.timedelta(PeriodDays, PeriodSeconds)) except: print("") self.ThisWindow.refresh() self.SetInterfaceState(STATE_MAIN_MENU) return Result def DisplayDelSchedule(self): self.ThisWindow.clear() self.ThisWindow.refresh() if self.Mode == MODE_STANDARD: print("{:^20}".format("DELETE SCHEDULE") + "\r") print("\r") if self.ThisSchedule.GetItemCount(): self.SelectID = self.ThisSchedule.DisplaySchedule(self.SelectPos, 1) else: print("{:^20}".format("Empty") + "\r") elif self.Mode == MODE_CONFIRM: print("{:^20}".format("DELETE SCHEDULE") + "\r") print("\r") print("{:^20}".format("ARE YOU SURE?") + "\r") print("{:^20}".format("(4=N, 6=Y)") + "\r") self.ThisWindow.refresh() def KeysDelSchedule(self, KeyCode): Result = KeyCode if self.Mode == MODE_STANDARD: if (KeyCode == ord("1") or KeyCode == ord("2") or KeyCode == ord("3")) and self.SelectPos > 0: self.SelectPos -= 1 elif (KeyCode == ord("0") or KeyCode == ord("7") or KeyCode == ord("8") or KeyCode == ord("9")) and self.SelectPos < self.ThisSchedule.GetItemCount() - 1: self.SelectPos += 1 if KeyCode == 10: if self.ThisSchedule.GetItemCount(): self.Mode = MODE_CONFIRM else: self.SetInterfaceState(STATE_MAIN_MENU) if KeyCode == 127: self.SetInterfaceState(STATE_MAIN_MENU) elif self.Mode == MODE_CONFIRM: if KeyCode == ord("4"): self.SetInterfaceState(STATE_MAIN_MENU) elif KeyCode == ord("6"): self.ThisSchedule.DelSchedule(self.SelectID) self.SetInterfaceState(STATE_MAIN_MENU) return Result def DisplaySchedule(self): self.ThisWindow.clear() self.ThisWindow.refresh() if self.ThisSchedule.GetItemCount(): self.ThisSchedule.DisplaySchedule(self.SelectPos, 2) else: print("\r") print("{:^20}".format("Empty") + "\r") self.ThisWindow.refresh() def KeysSchedule(self, KeyCode): Result = KeyCode if (KeyCode == ord("1") or KeyCode == ord("2") or KeyCode == ord("3")) and self.SelectPos > 0: self.SelectPos -= 1 elif (KeyCode == ord("0") or KeyCode == ord("7") or KeyCode == ord("8") or KeyCode == ord("9")) and self.SelectPos < self.ThisSchedule.GetItemCount() - 1: self.SelectPos += 1 elif KeyCode == 10: self.SetInterfaceState(STATE_MAIN_MENU) return Result def DisplaySetSystemTime(self): self.ThisWindow.clear() self.ThisWindow.refresh() print("{:^20}".format("SET SYSTEM TIME") + "\r") print(self.GetMaskedInput("####-##-## ##:##:##\r", self.InputBuffer)) self.ThisWindow.refresh() def KeysSetSystemTime(self, KeyCode): Result = KeyCode self.InputBuffer = self.KeyMaskedInput("####-##-## ##:##:##", self.InputBuffer, KeyCode) if KeyCode == 10: if len(self.InputBuffer) == 14: self.ThisSystemTime.SetSystemTime(self.GetMaskedInput("####-##-## ##:##:##", self.InputBuffer)) self.SetInterfaceState(STATE_MAIN_MENU) return Result
import sys,os class Solution(): def reverse(self, x): sign=1 if x<0: sign=-1 x=x*-1 token=str(x) str_rev="" str_len=len(token) for i in range(str_len): str_rev+=token[str_len-i-1] num_rev=int(str_rev) if sign==1 and num_rev>2**31-1: return 0 if sign==-1 and num_rev>2**31: return 0 return num_rev*sign my_sol=Solution() print my_sol.reverse(123)
__author__ = 'LIWEI240' """ Constants definition """ class Const(object): class RetCode(object): OK = 0 InvalidParam = -1 NotExist = -2 ParseError = -3
from modules.base_module import RanaModule import cairo from time import time from math import pi def getModule(*args, **kwargs): return ClickMenu(*args, **kwargs) class ClickMenu(RanaModule): """Overlay info on the map""" def __init__(self, *args, **kwargs): RanaModule.__init__(self, *args, **kwargs) self.lastWaypoint = "(none)" self.lastWaypointAddTime = 0 self.messageLingerTime = 2 def handleMessage(self, message, messageType, args): if message == "addWaypoint": m = self.m.get("waypoints", None) if m is not None: self.lastWaypoint = m.newWaypoint() self.lastWaypointAddTime = time() def drawMapOverlay(self, cr): """Draw an overlay on top of the map, showing various information about position etc.""" # waypoins will be done in another way, so this is disabled for the time being # (x,y,w,h) = self.get('viewport') # # dt = time() - self.lastWaypointAddTime # if(dt > 0 and dt < self.messageLingerTime): # self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3) # else: # m = self.m.get('clickHandler', None) # if(m != None): # m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint") def drawNewWaypoint(self, cr, x, y, size): text = self.lastWaypoint cr.set_font_size(200) extents = cr.text_extents(text) (w, h) = (extents[2], extents[3]) cr.set_source_rgb(0, 0, 0.5) cr.arc(x, y, size, 0, 2 * pi) cr.fill() x1 = x - 0.5 * w y1 = y + 0.5 * h border = 20 cr.set_source_rgb(1, 1, 1) cr.move_to(x1, y1) cr.show_text(text) cr.fill()
""" Created on Sun Sep 17 22:06:52 2017 Based on: print_MODFLOW_inputs_res_NWT.m @author: gcng """ import numpy as np import MODFLOW_NWT_lib as mf # functions to write individual MODFLOW files import os # os functions from ConfigParser import SafeConfigParser parser = SafeConfigParser() parser.read('settings.ini') LOCAL_DIR = parser.get('settings', 'local_dir') GSFLOW_DIR = LOCAL_DIR + "/GSFLOW" sw_2005_NWT = 2 # 1 for MODFLOW-2005; 2 for MODFLOW-NWT algorithm (both can be # carried out with MODFLOW-NWT code) fl_BoundConstH = 0 # 1 for const head at high elev boundary, needed for numerical # convergence for AGU2016 poster. Maybe resolved with MODFLOW-NWT? if sw_2005_NWT == 1: # MODFLOW input files GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_2005/' # MODFLOW output files GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_2005/' elif sw_2005_NWT == 2: # MODFLOW input files GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_NWT/' # MODFLOW output files GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_NWT/' infile_pre = 'test2lay_py'; NLAY = 2; DZ = [100, 50] # [NLAYx1] [m] ***testing perlen_tr = 365*30 + np.ceil(365*30/4) # [d], includes leap years; ok if too long (I think, but maybe run time is longer?) GIS_indir = GSFLOW_DIR + '/DataToReadIn/GIS/'; fil_res_in = '' # empty string to not use restart file surfz_fil = GIS_indir + 'topo.asc' mask_fil = GIS_indir + 'basinmask_dischargept.asc' reach_fil = GIS_indir + 'reach_data.txt' segment_fil_all = [GIS_indir + 'segment_data_4A_INFORMATION_Man.csv', GIS_indir + 'segment_data_4B_UPSTREAM_Man.csv', GIS_indir + 'segment_data_4C_DOWNSTREAM_Man.csv'] if not os.path.isdir(GSFLOW_indir): os.makedirs(GSFLOW_indir) if not os.path.isdir(GSFLOW_outdir): os.makedirs(GSFLOW_outdir) mf.write_dis_MOD2_f(GSFLOW_indir, infile_pre, surfz_fil, NLAY, DZ, perlen_tr); mf.write_ba6_MOD3_2(GSFLOW_indir, infile_pre, mask_fil, fl_BoundConstH); # list this below write_dis_MOD2_f if sw_2005_NWT == 1: mf.write_lpf_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY); elif sw_2005_NWT == 2: # MODFLOW-NWT files mf.write_upw_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY); mf.NWT_write_file(GSFLOW_indir, infile_pre); mf.make_uzf3_f_2(GSFLOW_indir, infile_pre, surfz_fil, mask_fil); mf.make_sfr2_f_Mannings(GSFLOW_indir, infile_pre, reach_fil, segment_fil_all); # list this below write_dis_MOD2_f mf.write_OC_PCG_MOD_f(GSFLOW_indir, infile_pre, perlen_tr); mf.write_nam_MOD_f2_NWT(GSFLOW_indir, GSFLOW_outdir, infile_pre, fil_res_in, sw_2005_NWT);
""" """ __version__ = "$Id$" import EasyDialogs valid_responses = { 1:'yes', 0:'no', -1:'cancel', } response = EasyDialogs.AskYesNoCancel('Select an option') print 'You selected:', valid_responses[response]
from gi.repository import Gtk from gi.repository import GdkPixbuf from GTG.core.tag import ALLTASKS_TAG from GTG.gtk.colors import get_colored_tags_markup, rgba_to_hex from GTG.backends.backend_signals import BackendSignals class BackendsTree(Gtk.TreeView): """ Gtk.TreeView that shows the currently loaded backends. """ COLUMN_BACKEND_ID = 0 # never shown, used for internal lookup. COLUMN_ICON = 1 COLUMN_TEXT = 2 # holds the backend "human-readable" name COLUMN_TAGS = 3 def __init__(self, backendsdialog): """ Constructor, just initializes the gtk widgets @param backends: a reference to the dialog in which this is loaded """ super().__init__() self.dialog = backendsdialog self.req = backendsdialog.get_requester() self._init_liststore() self._init_renderers() self._init_signals() self.refresh() def refresh(self): """refreshes the Gtk.Liststore""" self.backendid_to_iter = {} self.liststore.clear() # Sort backends # 1, put default backend on top # 2, sort backends by human name backends = list(self.req.get_all_backends(disabled=True)) backends = sorted(backends, key=lambda backend: (not backend.is_default(), backend.get_human_name())) for backend in backends: self.add_backend(backend) self.on_backend_state_changed(None, backend.get_id()) def on_backend_added(self, sender, backend_id): """ Signal callback executed when a new backend is loaded @param sender: not used, only here to let this function be used as a callback @param backend_id: the id of the backend to add """ # Add backend = self.req.get_backend(backend_id) if not backend: return self.add_backend(backend) self.refresh() # Select self.select_backend(backend_id) # Update it's enabled state self.on_backend_state_changed(None, backend.get_id()) def add_backend(self, backend): """ Adds a new backend to the list @param backend_id: the id of the backend to add """ if backend: backend_iter = self.liststore.append([ backend.get_id(), self.dialog.get_pixbuf_from_icon_name(backend.get_icon(), 16), backend.get_human_name(), self._get_markup_for_tags(backend.get_attached_tags()), ]) self.backendid_to_iter[backend.get_id()] = backend_iter def on_backend_state_changed(self, sender, backend_id): """ Signal callback executed when a backend is enabled/disabled. @param sender: not used, only here to let this function be used as a callback @param backend_id: the id of the backend to add """ if backend_id in self.backendid_to_iter: b_iter = self.backendid_to_iter[backend_id] b_path = self.liststore.get_path(b_iter) backend = self.req.get_backend(backend_id) backend_name = backend.get_human_name() if backend.is_enabled(): text = backend_name else: # FIXME This snippet is on more than 2 places!!! # FIXME create a function which takes a widget and # flag and returns color as #RRGGBB style_context = self.get_style_context() color = style_context.get_color(Gtk.StateFlags.INSENSITIVE) color = rgba_to_hex(color) text = f"<span color='{color}'>{backend_name}</span>" self.liststore[b_path][self.COLUMN_TEXT] = text # Also refresh the tags new_tags = self._get_markup_for_tags(backend.get_attached_tags()) self.liststore[b_path][self.COLUMN_TAGS] = new_tags def _get_markup_for_tags(self, tag_names): """Given a list of tags names, generates the pango markup to render that list with the tag colors used in GTG @param tag_names: the list of the tags (strings) @return str: the pango markup string """ if ALLTASKS_TAG in tag_names: tags_txt = "" else: tags_txt = get_colored_tags_markup(self.req, tag_names) return "<small>" + tags_txt + "</small>" def remove_backend(self, backend_id): """ Removes a backend from the treeview, and selects the first (to show something in the configuration panel @param backend_id: the id of the backend to remove """ if backend_id in self.backendid_to_iter: self.liststore.remove(self.backendid_to_iter[backend_id]) del self.backendid_to_iter[backend_id] self.select_backend() def _init_liststore(self): """Creates the liststore""" self.liststore = Gtk.ListStore(object, GdkPixbuf.Pixbuf, str, str) self.set_model(self.liststore) def _init_renderers(self): """Initializes the cell renderers""" # We hide the columns headers self.set_headers_visible(False) # For the backend icon pixbuf_cell = Gtk.CellRendererPixbuf() tvcolumn_pixbuf = Gtk.TreeViewColumn('Icon', pixbuf_cell) tvcolumn_pixbuf.add_attribute(pixbuf_cell, 'pixbuf', self.COLUMN_ICON) self.append_column(tvcolumn_pixbuf) # For the backend name text_cell = Gtk.CellRendererText() tvcolumn_text = Gtk.TreeViewColumn('Name', text_cell) tvcolumn_text.add_attribute(text_cell, 'markup', self.COLUMN_TEXT) self.append_column(tvcolumn_text) text_cell.connect('edited', self.cell_edited_callback) text_cell.set_property('editable', True) # For the backend tags tags_cell = Gtk.CellRendererText() tvcolumn_tags = Gtk.TreeViewColumn('Tags', tags_cell) tvcolumn_tags.add_attribute(tags_cell, 'markup', self.COLUMN_TAGS) self.append_column(tvcolumn_tags) def cell_edited_callback(self, text_cell, path, new_text): """If a backend name is changed, it saves the changes in the Backend @param text_cell: not used. The Gtk.CellRendererText that emitted the signal. Only here because it's passed by the signal @param path: the Gtk.TreePath of the edited cell @param new_text: the new name of the backend """ # we strip everything not permitted in backend names new_text = ''.join(c for c in new_text if (c.isalnum() or c in [" ", "-", "_"])) selected_iter = self.liststore.get_iter(path) # update the backend name backend_id = self.liststore.get_value(selected_iter, self.COLUMN_BACKEND_ID) backend = self.dialog.get_requester().get_backend(backend_id) if backend: backend.set_human_name(new_text) # update the text in the liststore self.liststore.set(selected_iter, self.COLUMN_TEXT, new_text) def _init_signals(self): """Initializes the backends and gtk signals """ self.connect("cursor-changed", self.on_select_row) _signals = BackendSignals() _signals.connect(_signals.BACKEND_ADDED, self.on_backend_added) _signals.connect(_signals.BACKEND_STATE_TOGGLED, self.on_backend_state_changed) def on_select_row(self, treeview=None): """When a row is selected, displays the corresponding editing panel @var treeview: not used """ self.dialog.on_backend_selected(self.get_selected_backend_id()) def _get_selected_path(self): """ Helper function to get the selected path @return Gtk.TreePath : returns exactly one path for the selected object or None """ selection = self.get_selection() if selection: model, selected_paths = self.get_selection().get_selected_rows() if selected_paths: return selected_paths[0] return None def select_backend(self, backend_id=None): """ Selects the backend corresponding to backend_id. If backend_id is none, refreshes the current configuration panel. @param backend_id: the id of the backend to select """ selection = self.get_selection() if backend_id in self.backendid_to_iter: backend_iter = self.backendid_to_iter[backend_id] if selection: selection.select_iter(backend_iter) else: if self._get_selected_path(): # We just reselect the currently selected entry self.on_select_row() else: # If nothing is selected, we select the first entry if selection: selection.select_path("0") self.dialog.on_backend_selected(self.get_selected_backend_id()) def get_selected_backend_id(self): """ returns the selected backend id, or none @return string: the selected backend id (or None) """ selected_path = self._get_selected_path() if not selected_path: return None selected_iter = self.liststore.get_iter(selected_path) return self.liststore.get_value(selected_iter, self.COLUMN_BACKEND_ID)
import math import time from browser import doc import browser.timer class Point(object): # 起始方法 def __init__(self, x, y): self.x = x self.y = y # 繪製方法 def drawMe(self, g, r): self.g = g self.r = r self.g.save() self.g.moveTo(self.x,self.y) self.g.beginPath() # 根據 r 半徑繪製一個圓代表點的所在位置 self.g.arc(self.x, self.y, self.r, 0, 2*math.pi, true) self.g.moveTo(self.x,self.y) self.g.lineTo(self.x+self.r, self.y) self.g.moveTo(self.x, self.y) self.g.lineTo(self.x-self.r, self.y) self.g.moveTo(self.x, self.y) self.g.lineTo(self.x, self.y+self.r) self.g.moveTo(self.x, self.y) self.g.lineTo(self.x, self.y-self.r) self.g.restore() self.g.stroke() # 加入 Eq 方法 def Eq(self, pt): self.x = pt.x self.y = pt.y # 加入 setPoint 方法 def setPoint(self, px, py): self.x = px self.y = py # 加上 distance(pt) 方法, 計算點到 pt 的距離 def distance(self, pt): self.pt = pt x = self.x - self.pt.x y = self.y - self.pt.y return math.sqrt(x * x + y * y) # 利用文字標示點的座標位置 def tag(self, g): self.g = g self.g.beginPath() self.g.fillText("%d, %d"%(self.x, self.y),self.x, self.y) self.g.stroke() class Line(object): # 起始方法 def __init__(self, p1, p2): self.p1 = p1 self.p2 = p2 # 直線的第一點, 設為線尾 self.Tail = self.p1 # 直線組成的第二點, 設為線頭 self.Head = self.p2 # 直線的長度屬性 self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2)) # setPP 以指定頭尾座標點來定義直線 def setPP(self, p1, p2): self.p1 = p1 self.p2 = p2 self.Tail = self.p1 self.Head = self.p2 self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2)) # setRT 方法 for Line, 應該已經確定 Tail 點, 然後以 r, t 作為設定 Head 的參考 def setRT(self, r, t): self.r = r self.t = t x = self.r * math.cos(self.t) y = self.r * math.sin(self.t) self.Tail.Eq(self.p1) self.Head.setPoint(self.Tail.x + x,self.Tail.y + y) # getR 方法 for Line def getR(self): # x 分量與 y 分量 x = self.p1.x - self.p2.x y = self.p1.y - self.p2.y return math.sqrt(x * x + y * y) # 根據定義 atan2(y,x), 表示 (x,y) 與 正 x 軸之間的夾角, 介於 pi 與 -pi 間 def getT(self): x = self.p2.x - self.p1.x y = self.p2.y - self.p1.y if (math.fabs(x) < math.pow(10,-100)): if(y < 0.0): return (-math.pi/2) else: return (math.pi/2) else: return math.atan2(y, x) # setTail 方法 for Line def setTail(self, pt): self.pt = pt self.Tail.Eq(pt) self.Head.setPoint(self.pt.x + self.x, self.pt.y + self.y) # getHead 方法 for Line def getHead(self): return self.Head def getTail(self): return self.Tail def drawMe(self, g): self.g = g self.g.beginPath() self.g.moveTo(self.p1.x,self.p1.y) self.g.lineTo(self.p2.x,self.p2.y) self.g.stroke() def test(self): return ("this is pure test to Inherit") class Link(Line): def __init__(self, p1, p2): self.p1 = p1 self.p2 = p2 self.length = math.sqrt(math.pow((self.p2.x - self.p1.x), 2) + math.pow((self.p2.y - self.p1.y), 2)) #g context def drawMe(self, g): self.g = g hole = 5 radius = 10 length = self.getR() # alert(length) # 儲存先前的繪圖狀態 self.g.save() self.g.translate(self.p1.x,self.p1.y) #alert(str(self.p1.x)+","+str(self.p1.y)) #self.g.rotate(-((math.pi/2)-self.getT())) self.g.rotate(-math.pi*0.5 + self.getT()) #alert(str(self.getT())) #self.g.rotate(10*math.pi/180) #this.g.rotate(-(Math.PI/2-this.getT())); # 必須配合畫在 y 軸上的 Link, 進行座標轉換, 也可以改為畫在 x 軸上... self.g.beginPath() self.g.moveTo(0,0) self.g.arc(0, 0, hole, 0, 2*math.pi, true) self.g.stroke() self.g.moveTo(0,length) self.g.beginPath() self.g.arc(0,length, hole, 0, 2*math.pi, true) self.g.stroke() self.g.moveTo(0,0) self.g.beginPath() self.g.arc(0,0, radius, 0, math.pi, true) self.g.moveTo(0+radius,0) self.g.lineTo(0+radius,0+length) self.g.stroke() self.g.moveTo(0,0+length) self.g.beginPath() self.g.arc(0, 0+length, radius, math.pi, 0, true) self.g.moveTo(0-radius,0+length) self.g.lineTo(0-radius,0) self.g.stroke() self.g.restore() self.g.beginPath() self.g.fillStyle = "red" self.g.font = "bold 18px sans-serif" self.g.fillText("%d, %d"%(self.p2.x, self.p2.y),self.p2.x, self.p2.y) self.g.stroke() self.g.restore() class Triangle(object): def __init__(self, p1, p2, p3): self.p1 = p1 self.p2 = p2 self.p3 = p3 def getLenp3(self): p1 = self.p1 ret = p1.distance(self.p2) return ret def getLenp1(self): p2 = self.p2 ret = p2.distance(self.p3) return ret def getLenp2(self): p1 = self.p1 ret = p1.distance(self.p3) return ret # 角度 def getAp1(self): ret = math.acos(((self.getLenp2() * self.getLenp2() + self.getLenp3() * self.getLenp3()) - self.getLenp1() * self.getLenp1()) / (2* self.getLenp2() * self.getLenp3())) return ret # def getAp2(self): ret =math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp3() * self.getLenp3()) - self.getLenp2() * self.getLenp2()) / (2* self.getLenp1() * self.getLenp3())) return ret def getAp3(self): ret = math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp2() * self.getLenp2()) - self.getLenp3() * self.getLenp3()) / (2* self.getLenp1() * self.getLenp2())) return ret def drawMe(self, g): self.g = g r = 5 # 繪出三個頂點 self.p1.drawMe(self.g,r) self.p2.drawMe(self.g,r) self.p3.drawMe(self.g,r) line1 = Line(self.p1,self.p2) line2 = Line(self.p1,self.p3) line3 = Line(self.p2,self.p3) # 繪出三邊線 line1.drawMe(self.g) line2.drawMe(self.g) line3.drawMe(self.g) # ends Triangle def # 透過三個邊長定義三角形 def setSSS(self, lenp3, lenp1, lenp2): self.lenp3 = lenp3 self.lenp1 = lenp1 self.lenp2 = lenp2 self.ap1 = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2* self.lenp2 * self.lenp3)) self.ap2 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2* self.lenp1 * self.lenp3)) self.ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2* self.lenp1 * self.lenp2)) # 透過兩個邊長與夾角定義三角形 def setSAS(self, lenp3, ap2, lenp1): self.lenp3 = lenp3 self.ap2 = ap2 self.lenp1 = lenp1 self.lenp2 = math.sqrt((self.lenp3 * self.lenp3 + self.lenp1 * self.lenp1) - 2* self.lenp3 * self.lenp1 * math.cos(self.ap2)) #等於 SSS(AB, BC, CA) def setSaSS(self, lenp2, lenp3, lenp1): self.lenp2 = lenp2 self.lenp3 = lenp3 self.lenp1 = lenp1 if(self.lenp1 > (self.lenp2 + self.lenp3)): #<CAB 夾角為 180 度, 三點共線且 A 介於 BC 之間 ret = math.pi else : # <CAB 夾角為 0, 三點共線且 A 不在 BC 之間 if((self.lenp1 < (self.lenp2 - self.lenp3)) or (self.lenp1 < (self.lenp3 - self.lenp2))): ret = 0.0 else : # 透過餘絃定理求出夾角 <CAB ret = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2 * self.lenp2 * self.lenp3)) return ret # 取得三角形的三個邊長值 def getSSS(self): temp = [] temp.append( self.getLenp1() ) temp.append( self.getLenp2() ) temp.append( self.getLenp3() ) return temp # 取得三角形的三個角度值 def getAAA(self): temp = [] temp.append( self.getAp1() ) temp.append( self.getAp2() ) temp.append( self.getAp3() ) return temp # 取得三角形的三個角度與三個邊長 def getASASAS(self): temp = [] temp.append(self.getAp1()) temp.append(self.getLenp1()) temp.append(self.getAp2()) temp.append(self.getLenp2()) temp.append(self.getAp3()) temp.append(self.getLenp3()) return temp #2P 2L return mid P def setPPSS(self, p1, p3, lenp1, lenp3): temp = [] self.p1 = p1 self.p3 = p3 self.lenp1 = lenp1 self.lenp3 = lenp3 #bp3 is the angle beside p3 point, cp3 is the angle for line23, p2 is the output line31 = Line(p3, p1) self.lenp2 = line31.getR() #self.lenp2 = self.p3.distance(self.p1) #這裡是求角3 ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2 * self.lenp1 * self.lenp2)) #ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2 * self.lenp1 * self.lenp3)) bp3 = line31.getT() cp3 = bp3 - ap3 temp.append(p3.x + self.lenp1*math.cos(cp3))#p2.x temp.append(p3.y + self.lenp1*math.sin(cp3))#p2.y return temp def tag(g, p): None def draw(): global theta context.clearRect(0, 0, canvas.width, canvas.height) line1.drawMe(context) line2.drawMe(context) line3.drawMe(context) #triangle1.drawMe(context) #triangle2.drawMe(context) theta += dx p2.x = p1.x + line1.length*math.cos(theta*degree) p2.y = p1.y - line1.length*math.sin(theta*degree) p3.x, p3.y = triangle2.setPPSS(p2,p4,link2_len,link3_len) p1.tag(context) x=10 y=10 r=10 theta = 0 degree = math.pi/180.0 dx = 2 dy = 4 p1 = Point(150,100) p2 = Point(150,200) p3 = Point(300,300) p4 = Point(350,100) line1 = Link(p1,p2) line2 = Link(p2,p3) line3 = Link(p3,p4) line4 = Link(p1,p4) line5 = Link(p2,p4) link2_len = p2.distance(p3) link3_len = p3.distance(p4) triangle1 = Triangle(p1,p2,p4) triangle2 = Triangle(p2,p3,p4) canvas = doc["plotarea"] context = canvas.getContext("2d") context.translate(0,canvas.height) context.scale(1,-1) browser.timer.set_interval(draw,10)
import time timeformat='%H:%M:%S' def begin_banner(): print '' print '[*] swarm starting at '+time.strftime(timeformat,time.localtime()) print '' def end_banner(): print '' print '[*] swarm shutting down at '+time.strftime(timeformat,time.localtime()) print ''
""" <div id="content"> <div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div> <h2>Number letter counts</h2><div id="problem_info" class="info"><h3>Problem 17</h3><span>Published on Friday, 17th May 2002, 06:00 pm; Solved by 88413; Difficulty rating: 5%</span></div> <div class="problem_content" role="problem"> <p>If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.</p> <p>If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? </p> <br /> <p class="note"><b>NOTE:</b> Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.</p> </div><br /> <br /></div> """ s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six",7:"seven",8:"eight",9:"nine",10:"ten",11:"eleven",12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen",16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen",20:"twenty",30:"thirty",40:"forty",50:"fifty",60:"sixty",70:"seventy",80:"eighty",90:"ninety"} for i in range(1,1000): if(not i in s.keys()): if(i<100): s[i]=s[i/10*10]+s[i%10] else: s[i]=s[i/100]+"hundred" if(i%100): s[i]+="and"+s[i%100] s[1000]="onethousand" total=0; for i in s.values(): total+=len(i) print total
import gammalib import cscripts from testing import test class Test(test): """ Test class for csiactobs script This test class makes unit tests for the csiactobs script by using it from the command line and from Python. """ # Constructor def __init__(self): """ Constructor """ # Call base class constructor test.__init__(self) # Set data members self._datapath = self._datadir + '/iactdata' self._runlist = self._datadir + '/iact_runlist.dat' # Return return # Set test functions def set(self): """ Set all test functions """ # Set test name self.name('csiactobs') # Append tests self.append(self._test_cmd, 'Test csiactobs on command line') self.append(self._test_python, 'Test csiactobs from Python') # Return return # Test csiactobs on command line def _test_cmd(self): """ Test csiactobs on the command line """ # Set script name csiactobs = self._script('csiactobs') # Setup csiactobs command cmd = csiactobs+' datapath="'+self._datapath+'"'+ \ ' prodname="unit-test"'+ \ ' infile="'+self._runlist+'"'+ \ ' bkgpars=1'+\ ' outobs="csiactobs_obs_cmd1.xml"'+ \ ' outmodel="csiactobs_bgd_cmd1.xml"'+ \ ' logfile="csiactobs_cmd1.log" chatter=1' # Check if execution was successful self.test_assert(self._execute(cmd) == 0, 'Check successful execution from command line') # Check observation definition XML file self._check_obsdef('csiactobs_obs_cmd1.xml', 6) # Check model definition XML file self._check_moddef('csiactobs_bgd_cmd1.xml', 6) # Setup csiactobs command cmd = csiactobs+' datapath="data_path_that_does_not_exist"'+ \ ' prodname="unit-test"'+ \ ' infile="'+self._runlist+'"'+ \ ' bkgpars=1'+\ ' outobs="csiactobs_obs_cmd2.xml"'+ \ ' outmodel="csiactobs_bgd_cmd2.xml"'+ \ ' logfile="csiactobs_cmd2.log" debug=yes debug=yes'+ \ ' chatter=1' # Check if execution failed self.test_assert(self._execute(cmd, success=False) != 0, 'Check invalid input datapath when executed from command line') # Setup csiactobs command cmd = csiactobs+' datapath="'+self._datapath+'"'+ \ ' prodname="unit-test-doesnt-exist"'+ \ ' infile="'+self._runlist+'"'+ \ ' bkgpars=1'+\ ' outobs="csiactobs_obs_cmd3.xml"'+ \ ' outmodel="csiactobs_bgd_cmd3.xml"'+ \ ' logfile="csiactobs_cmd3.log" debug=yes debug=yes'+ \ ' chatter=1' # Check if execution failed self.test_assert(self._execute(cmd, success=False) != 0, 'Check invalid input prodname when executed from command line') # Check csiactobs --help self._check_help(csiactobs) # Return return # Test csiactobs from Python def _test_python(self): """ Test csiactobs from Python """ # Allocate empty csiactobs script iactobs = cscripts.csiactobs() # Check that empty csiactobs sciript has an empty observation container # and energy boundaries self.test_value(iactobs.obs().size(), 0, 'Check that empty csiactobs has an empty observation container') self.test_value(iactobs.ebounds().size(), 0, 'Check that empty csiactobs has empty energy bins') # Check that saving saves an empty model definition file iactobs['outobs'] = 'csiactobs_obs_py0.xml' iactobs['outmodel'] = 'csiactobs_bgd_py0.xml' iactobs['logfile'] = 'csiactobs_py0.log' iactobs.logFileOpen() iactobs.save() # Check empty observation definition XML file self._check_obsdef('csiactobs_obs_py0.xml', 0) # Check empty model definition XML file self._check_moddef('csiactobs_bgd_py0.xml', 0) # Check that clearing does not lead to an exception or segfault #iactobs.clear() # Set-up csiactobs iactobs = cscripts.csiactobs() iactobs['datapath'] = self._datapath iactobs['prodname'] = 'unit-test' iactobs['infile'] = self._runlist iactobs['bkgpars'] = 1 iactobs['outobs'] = 'csiactobs_obs_py1.xml' iactobs['outmodel'] = 'csiactobs_bgd_py1.xml' iactobs['logfile'] = 'csiactobs_py1.log' iactobs['chatter'] = 2 # Run csiactobs script and save run list iactobs.logFileOpen() # Make sure we get a log file iactobs.run() iactobs.save() # Check observation definition XML file self._check_obsdef('csiactobs_obs_py1.xml', 6) # Check model definition XML file self._check_moddef('csiactobs_bgd_py1.xml', 6) # Create test runlist runlist = ['15000','15001'] # Set-up csiactobs using a runlist with 2 background parameters iactobs = cscripts.csiactobs() iactobs['datapath'] = self._datapath iactobs['prodname'] = 'unit-test' iactobs['bkgpars'] = 2 iactobs['outobs'] = 'csiactobs_obs_py2.xml' iactobs['outmodel'] = 'csiactobs_bgd_py2.xml' iactobs['logfile'] = 'csiactobs_py2.log' iactobs['chatter'] = 3 iactobs.runlist(runlist) # Run csiactobs script and save run list iactobs.logFileOpen() # Make sure we get a log file iactobs.run() iactobs.save() # Test return functions self.test_value(iactobs.obs().size(), 2, 'Check number of observations in container') self.test_value(iactobs.ebounds().size(), 0, 'Check number of energy boundaries') # Check observation definition XML file self._check_obsdef('csiactobs_obs_py2.xml',2) # Check model definition XML file self._check_moddef('csiactobs_bgd_py2.xml',2) # Set-up csiactobs with a large number of free parameters and "aeff" # background iactobs = cscripts.csiactobs() iactobs['datapath'] = self._datapath iactobs['prodname'] = 'unit-test' iactobs['infile'] = self._runlist iactobs['bkgpars'] = 8 iactobs['bkg_mod_hiera'] = 'aeff' iactobs['outobs'] = 'csiactobs_obs_py3.xml' iactobs['outmodel'] = 'csiactobs_bgd_py3.xml' iactobs['logfile'] = 'csiactobs_py3.log' iactobs['chatter'] = 4 # Execute csiactobs script iactobs.execute() # Check observation definition XML file self._check_obsdef('csiactobs_obs_py3.xml',6) # Check model definition XML file self._check_moddef('csiactobs_bgd_py3.xml',6) # Set-up csiactobs with a "gauss" background and "inmodel" parameter iactobs = cscripts.csiactobs() iactobs['datapath'] = self._datapath iactobs['inmodel'] = self._model iactobs['prodname'] = 'unit-test' iactobs['infile'] = self._runlist iactobs['bkgpars'] = 1 iactobs['bkg_mod_hiera'] = 'gauss' iactobs['outobs'] = 'NONE' iactobs['outmodel'] = 'NONE' iactobs['logfile'] = 'csiactobs_py4.log' iactobs['chatter'] = 4 # Run csiactobs script iactobs.logFileOpen() # Make sure we get a log file iactobs.run() # Check number of observations self.test_value(iactobs.obs().size(), 6, 'Check number of observations in container') # Check number of models self.test_value(iactobs.obs().models().size(), 8, 'Check number of models in container') # Set-up csiactobs with a "gauss" background and "inmodel" parameter iactobs = cscripts.csiactobs() iactobs['datapath'] = self._datapath iactobs['inmodel'] = self._model iactobs['prodname'] = 'unit-test' iactobs['infile'] = self._runlist iactobs['bkgpars'] = 1 iactobs['bkg_mod_hiera'] = 'irf' iactobs['outobs'] = 'NONE' iactobs['outmodel'] = 'NONE' iactobs['logfile'] = 'csiactobs_py4.log' iactobs['chatter'] = 4 # Run csiactobs script iactobs.logFileOpen() # Make sure we get a log file iactobs.run() # Check number of observations self.test_value(iactobs.obs().size(), 5, 'Check number of observations in container') # Check number of models self.test_value(iactobs.obs().models().size(), 7, 'Check number of models in container') # Return return # Check observation definition XML file def _check_obsdef(self, filename, obs_expected): """ Check observation definition XML file """ # Load observation definition XML file obs = gammalib.GObservations(filename) # Check number of observations self.test_value(obs.size(), obs_expected, 'Check for '+str(obs_expected)+' observations in XML file') # If there are observations in the XML file then check their content if obs_expected > 0: # Get response rsp = obs[0].response() # Test response self.test_value(obs[0].eventfile().file(), 'events_0.fits.gz', 'Check event file name') self.test_value(obs[0].eventfile().extname(), 'EVENTS', 'Check event extension name') self.test_value(rsp.aeff().filename().file(), 'irf_file.fits.gz', 'Check effective area file name') self.test_value(rsp.aeff().filename().extname(), 'EFFECTIVE AREA', 'Check effective area extension name') self.test_value(rsp.psf().filename().file(), 'irf_file.fits.gz', 'Check point spread function file name') self.test_value(rsp.psf().filename().extname(), 'POINT SPREAD FUNCTION', 'Check point spread function extension name') self.test_value(rsp.edisp().filename().file(), 'irf_file.fits.gz', 'Check energy dispersion file name') self.test_value(rsp.edisp().filename().extname(), 'ENERGY DISPERSION', 'Check energy dispersion extension name') self.test_value(rsp.background().filename().file(), 'irf_file.fits.gz', 'Check background file name') self.test_value(rsp.background().filename().extname(), 'BACKGROUND', 'Check background extension name') # Return return # Check model XML file def _check_moddef(self, filename, models_expected): """ Check model definition XML file """ # Load model definition XML file models = gammalib.GModels(filename) # Check number of models self.test_value(models.size(), models_expected, 'Check for '+str(models_expected)+' models in XML file') # Return return
r""" Simulation of standard multiple stochastic integrals, both Ito and Stratonovich I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito) J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich) These multiple integrals I and J are important building blocks that will be used by most of the higher-order algorithms that integrate multi-dimensional SODEs. We first implement the method of Kloeden, Platen and Wright (1992) to approximate the integrals by the first n terms from the series expansion of a Brownian bridge process. By default using n=5. Finally we implement the method of Wiktorsson (2001) which improves on the previous method by also approximating the tail-sum distribution by a multivariate normal distribution. References: P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple stochastic integrals M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous Simulation of Iterated Ito Integrals for Multiple Independent Brownian Motions """ import numpy as np numpy_version = list(map(int, np.version.short_version.split('.'))) if numpy_version >= [1,10,0]: broadcast_to = np.broadcast_to else: from ._broadcast import broadcast_to def deltaW(N, m, h): """Generate sequence of Wiener increments for m independent Wiener processes W_j(t) j=0..m-1 for each of N time intervals of length h. Returns: dW (array of shape (N, m)): The [n, j] element has the value W_j((n+1)*h) - W_j(n*h) """ return np.random.normal(0.0, np.sqrt(h), (N, m)) def _t(a): """transpose the last two axes of a three axis array""" return a.transpose((0, 2, 1)) def _dot(a, b): r""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i) i.e. This is just normal matrix multiplication at each point on first axis """ return np.einsum('ijk,ikl->ijl', a, b) def _Aterm(N, h, m, k, dW): """kth term in the sum of Wiktorsson2001 equation (2.2)""" sqrt2h = np.sqrt(2.0/h) Xk = np.random.normal(0.0, 1.0, (N, m, 1)) Yk = np.random.normal(0.0, 1.0, (N, m, 1)) term1 = _dot(Xk, _t(Yk + sqrt2h*dW)) term2 = _dot(Yk + sqrt2h*dW, _t(Xk)) return (term1 - term2)/k def Ikpw(dW, h, n=5): """matrix I approximating repeated Ito integrals for each of N time intervals, based on the method of Kloeden, Platen and Wright (1992). Args: dW (array of shape (N, m)): giving m independent Weiner increments for each time step N. (You can make this array using sdeint.deltaW()) h (float): the time step size n (int, optional): how many terms to take in the series expansion Returns: (A, I) where A: array of shape (N, m, m) giving the Levy areas that were used. I: array of shape (N, m, m) giving an m x m matrix of repeated Ito integral values for each of the N time intervals. """ N = dW.shape[0] m = dW.shape[1] if dW.ndim < 3: dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1) if dW.shape[2] != 1 or dW.ndim > 3: raise(ValueError) A = _Aterm(N, h, m, 1, dW) for k in range(2, n+1): A += _Aterm(N, h, m, k, dW) A = (h/(2.0*np.pi))*A I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A dW = dW.reshape((N, -1)) # change back to shape (N, m) return (A, I) def Jkpw(dW, h, n=5): """matrix J approximating repeated Stratonovich integrals for each of N time intervals, based on the method of Kloeden, Platen and Wright (1992). Args: dW (array of shape (N, m)): giving m independent Weiner increments for each time step N. (You can make this array using sdeint.deltaW()) h (float): the time step size n (int, optional): how many terms to take in the series expansion Returns: (A, J) where A: array of shape (N, m, m) giving the Levy areas that were used. J: array of shape (N, m, m) giving an m x m matrix of repeated Stratonovich integral values for each of the N time intervals. """ m = dW.shape[1] A, I = Ikpw(dW, h, n) J = I + 0.5*h*np.eye(m).reshape((1, m, m)) return (A, J) def _vec(A): """ Linear operator _vec() from Wiktorsson2001 p478 Args: A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each interval of time j in 0..N-1 Returns: array of shape N x mn x 1, made by stacking the columns of matrix A[j] on top of each other, for each j in 0..N-1 """ N, m, n = A.shape return A.reshape((N, m*n, 1), order='F') def _unvec(vecA, m=None): """inverse of _vec() operator""" N = vecA.shape[0] if m is None: m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64) return vecA.reshape((N, m, -1), order='F') def _kp(a, b): """Special case Kronecker tensor product of a[i] and b[i] at each time interval i for i = 0 .. N-1 It is specialized for the case where both a and b are shape N x m x 1 """ if a.shape != b.shape or a.shape[-1] != 1: raise(ValueError) N = a.shape[0] # take the outer product over the last two axes, then reshape: return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1) def _kp2(A, B): """Special case Kronecker tensor product of A[i] and B[i] at each time interval i for i = 0 .. N-1 Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0] """ N = A.shape[0] if B.shape[0] != N: raise(ValueError) newshape1 = A.shape[1]*B.shape[1] return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1) def _P(m): """Returns m^2 x m^2 permutation matrix that swaps rows i and j where j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2 """ P = np.zeros((m**2,m**2), dtype=np.int64) for i in range(1, m**2 + 1): j = 1 + m*((i - 1) % m) + (i - 1)//m P[i-1, j-1] = 1 return P def _K(m): """ matrix K_m from Wiktorsson2001 """ M = m*(m - 1)//2 K = np.zeros((M, m**2), dtype=np.int64) row = 0 for j in range(1, m): col = (j - 1)*m + j s = m - j K[row:(row+s), col:(col+s)] = np.eye(s) row += s return K def _AtildeTerm(N, h, m, k, dW, Km0, Pm0): """kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)""" M = m*(m-1)//2 Xk = np.random.normal(0.0, 1.0, (N, m, 1)) Yk = np.random.normal(0.0, 1.0, (N, m, 1)) factor1 = np.dot(Km0, Pm0 - np.eye(m**2)) factor1 = broadcast_to(factor1, (N, M, m**2)) factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk) return _dot(factor1, factor2)/k def _sigmainf(N, h, m, dW, Km0, Pm0): r"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)""" M = m*(m-1)//2 Im = broadcast_to(np.eye(m), (N, m, m)) IM = broadcast_to(np.eye(M), (N, M, M)) Ims0 = np.eye(m**2) factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2)) factor2 = _kp2(Im, _dot(dW, _t(dW))) factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M)) return 2*IM + _dot(_dot(factor1, factor2), factor3) def _a(n): r""" \sum_{n+1}^\infty 1/k^2 """ return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1)) def Iwik(dW, h, n=5): """matrix I approximating repeated Ito integrals for each of N time intervals, using the method of Wiktorsson (2001). Args: dW (array of shape (N, m)): giving m independent Weiner increments for each time step N. (You can make this array using sdeint.deltaW()) h (float): the time step size n (int, optional): how many terms to take in the series expansion Returns: (Atilde, I) where Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used. I: array of shape (N, m, m) giving an m x m matrix of repeated Ito integral values for each of the N time intervals. """ N = dW.shape[0] m = dW.shape[1] if dW.ndim < 3: dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1) if dW.shape[2] != 1 or dW.ndim > 3: raise(ValueError) if m == 1: return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0) Pm0 = _P(m) Km0 = _K(m) M = m*(m-1)//2 Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0) for k in range(2, n+1): Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0) Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms S = _sigmainf(N, h, m, dW, Km0, Pm0) normdW2 = np.sum(np.abs(dW)**2, axis=1) radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1)) IM = broadcast_to(np.eye(M), (N, M, M)) Im = broadcast_to(np.eye(m), (N, m, m)) Ims0 = np.eye(m**2) sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical)) G = np.random.normal(0.0, 1.0, (N, M, 1)) tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G) Atilde = Atilde_n + tailsum # our final approximation of the areas factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M)) vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde) I = _unvec(vecI) dW = dW.reshape((N, -1)) # change back to shape (N, m) return (Atilde, I) def Jwik(dW, h, n=5): """matrix J approximating repeated Stratonovich integrals for each of N time intervals, using the method of Wiktorsson (2001). Args: dW (array of shape (N, m)): giving m independent Weiner increments for each time step N. (You can make this array using sdeint.deltaW()) h (float): the time step size n (int, optional): how many terms to take in the series expansion Returns: (Atilde, J) where Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used. J: array of shape (N, m, m) giving an m x m matrix of repeated Stratonovich integral values for each of the N time intervals. """ m = dW.shape[1] Atilde, I = Iwik(dW, h, n) J = I + 0.5*h*np.eye(m).reshape((1, m, m)) return (Atilde, J)
content_template = """<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title></title> </head> <body> </body> </html>""" toc_ncx = u"""<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN" "http://www.daisy.org/z3986/2005/ncx-2005-1.dtd"> <ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1"> <head> <meta name="dtb:uid" content="{book.uuid}" /> <meta name="dtb:depth" content="{book.toc_root.maxlevel}" /> <meta name="dtb:totalPageCount" content="0" /> <meta name="dtb:maxPageNumber" content="0" /> </head> <docTitle> <text>{book.title}</text> </docTitle> {navmap} </ncx>""" container_xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> <container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0"> <rootfiles> <rootfile full-path="OEBPS/content.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container> """
import logging from scap.model.oval_5.defs.windows.TestType import TestType logger = logging.getLogger(__name__) class Process58TestElement(TestType): MODEL_MAP = { 'tag_name': 'process58_test', }
def main(): """Instantiate a DockerStats object and collect stats.""" print('Docker Service Module') if __name__ == '__main__': main()
import capstone import _any_capstone dis = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM) def PROCESSOR_ENTRY(): return _any_capstone.Processor("arm_32", dis)
"""Perform preprocessing and generate raytrace exec scripts for one focal plane. For documentation using the python_control for ImSim/PhoSim version <= v.3.0.x, see README.v3.0.x.txt. For documentation using the python_control for ImSim/PhoSim version == v.3.2.x, see README.txt. The behavior of this script differs depending on the version of ImSim/PhoSim. For versions <= v3.0.x, it functions like the original fullFocalplane.py and calls AllChipsScriptGenerator.makeScripts() to generate a script and some tarballs that can in turn be executed to run the preprocessing step (which in turn calls AllChipsScriptGenerator) to generate shells scripts and tarballs for performing the raytrace stage. See README.v3.0.x.txt for more info. The behavior for ImSim/PhoSim version == 3.2.x is to run the preprocessing step directly through the class PhosimManager.PhosimPrepreprocessor (which in turn calls phosim.py in the phosin.git repository). After the preprocessing is complete, PhosimPreprocessor generates shell scripts for the raytrace phase. A few notes on options: --skip_atmoscreens: Use this to optionally skip the step to generate atmosphere screens during preprocessing and instead perform this operation at the start of the raytrace phase. This is useful in distributed environments where the cost of transferring the atmosphere screens to the compute node is higher than recalculating them. --logtostderr: (only v3.2.x and higher) By default, log output from python_controls is done via the python logging module, and directed to either log_dir in the imsim_config_file or /tmp/fullFocalplane.log if log_dir is not specified. This option overrides this behavior and prints logging information to stdout. Note: output from phosim.py and the phosim binaries are still printed to stdout. TODO(gardnerj): Add stdout log redirect TODO(gardnerj): Support sensor_ids argument for phosim.py. TODO(gardnerj): Support not running e2adc step. """ from __future__ import with_statement import ConfigParser from distutils import version import logging from optparse import OptionParser # Can't use argparse yet, since we must work in 2.5 import os import sys from AllChipsScriptGenerator import AllChipsScriptGenerator import PhosimManager import PhosimUtil import PhosimVerifier import ScriptWriter __author__ = 'Jeff Gardner (gardnerj@phys.washington.edu)' logger = logging.getLogger(__name__) def DoPreprocOldVersion(trimfile, policy, extra_commands, scheduler, sensor_id): """Do preprocessing for v3.1.0 and earlier. Args: trimfile: Full path to trim metadata file. policy: ConfigParser object from python_controls config file. extra_commands: Full path to extra commands or 'extraid' file. scheduler: Name of scheduler (currently, just 'csh' is supported). sensor_id: If not '', run just this single sensor ID. Returns: 0 (success) """ with PhosimUtil.WithTimer() as t: # Determine the pre-processing scheduler so that we know which class to use if scheduler == 'csh': scriptGenerator = AllChipsScriptGenerator(trimfile, policy, extra_commands) scriptGenerator.makeScripts(sensor_id) elif scheduler == 'pbs': scriptGenerator = AllChipsScriptGenerator_Pbs(trimfile, policy, extra_commands) scriptGenerator.makeScripts(sensor_id) elif scheduler == 'exacycle': print 'Exacycle funtionality not added yet.' return 1 else: print 'Scheduler "%s" unknown. Use -h or --help for help.' % scheduler t.LogWall('makeScripts') return 0 def DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler, skip_atmoscreens=False, keep_scratch_dirs=False): """Do preprocessing for v3.2.0 and later. Args: trimfile: Full path to trim metadata file. imsim_config_file: Full path to the python_controls config file. extra_commands: Full path to extra commands or 'extraid' file. scheduler: Name of scheduler (currently, just 'csh' is supported). skip_atmoscreens: Generate atmosphere screens in raytrace stage instead of preprocessing stage. keep_scratch_dirs: Do not delete the working directories at the end of execution. Returns: 0 upon success, 1 upon failure. """ if scheduler == 'csh': preprocessor = PhosimManager.Preprocessor(imsim_config_file, trimfile, extra_commands) elif scheduler == 'pbs': # Construct PhosimPreprocessor with PBS-specific ScriptWriter preprocessor = PhosimManager.Preprocessor( imsim_config_file, trimfile, extra_commands, script_writer_class=ScriptWriter.PbsRaytraceScriptWriter) # Read in PBS-specific config policy = ConfigParser.RawConfigParser() policy.read(imsim_config_file) preprocessor.script_writer.ParsePbsConfig(policy) else: logger.critical('Unknown scheduler: %s. Use -h or --help for help', scheduler) return 1 preprocessor.InitExecEnvironment() with PhosimUtil.WithTimer() as t: if not preprocessor.DoPreprocessing(skip_atmoscreens=skip_atmoscreens): logger.critical('DoPreprocessing() failed.') return 1 t.LogWall('DoPreprocessing') exec_manifest_fn = 'execmanifest_raytrace_%s.txt' % preprocessor.focalplane.observationID files_to_stage = preprocessor.ArchiveRaytraceInputByExt(exec_archive_name=exec_manifest_fn) if not files_to_stage: logger.critical('Output archive step failed.') return 1 with PhosimUtil.WithTimer() as t: preprocessor.StageOutput(files_to_stage) t.LogWall('StageOutput') if not keep_scratch_dirs: preprocessor.Cleanup() verifier = PhosimVerifier.PreprocVerifier(imsim_config_file, trimfile, extra_commands) missing_files = verifier.VerifySharedOutput() if missing_files: logger.critical('Verification failed with the following files missing:') for fn in missing_files: logger.critical(' %s', fn) sys.stderr.write('Verification failed with the following files missing:\n') for fn in missing_files: sys.stderr.write(' %s\n', fn) else: logger.info('Verification completed successfully.') return 0 def ConfigureLogging(trimfile, policy, log_to_stdout, imsim_config_file, extra_commands=None): """Configures logger. If log_to_stdout, the logger will write to stdout. Otherwise, it will write to: 'log_dir' in the config file, if present /tmp/fullFocalplane.log if 'log_dir' is not present. Stdout from phosim.py and PhoSim binaries always goes to stdout. """ if log_to_stdout: log_fn = None else: if policy.has_option('general', 'log_dir'): # Log to file in log_dir obsid, filter_num = PhosimManager.ObservationIdFromTrimfile( trimfile, extra_commands=options.extra_commands) log_dir = os.path.join(policy.get('general', 'log_dir'), obsid) log_fn = os.path.join(log_dir, 'fullFocalplane_%s.log' % obsid) else: log_fn = '/tmp/fullFocalplane.log' PhosimUtil.ConfigureLogging(policy.getint('general', 'debug_level'), logfile_fullpath=log_fn) params_str = 'trimfile=%s\nconfig_file=%s\n' % (trimfile, imsim_config_file) if extra_commands: params_str += 'extra_commands=%s\n' % extra_commands PhosimUtil.WriteLogHeader(__file__, params_str=params_str) def main(trimfile, imsim_config_file, extra_commands, skip_atmoscreens, keep_scratch_dirs, sensor_ids, log_to_stdout=False): """ Run the fullFocalplanePbs.py script, populating it with the correct user and cluster job submission information from an LSST policy file. """ policy = ConfigParser.RawConfigParser() policy.read(imsim_config_file) if policy.has_option('general', 'phosim_version'): phosim_version = policy.get('general', 'phosim_version') else: phosim_version = '3.0.1' ConfigureLogging(trimfile, policy, log_to_stdout, imsim_config_file, extra_commands) # print 'Running fullFocalPlane on: ', trimfile logger.info('Running fullFocalPlane on: %s ', trimfile) # print 'Using Imsim/Phosim version', phosim_version logger.info('Using Imsim/Phosim version %s', phosim_version) # Must pass absolute paths to imsim/phosim workers if not os.path.isabs(trimfile): trimfile = os.path.abspath(trimfile) if not os.path.isabs(imsim_config_file): imsim_config_file = os.path.abspath(imsim_config_file) if not os.path.isabs(extra_commands): extra_commands = os.path.abspath(extra_commands) scheduler = policy.get('general','scheduler2') if version.LooseVersion(phosim_version) < version.LooseVersion('3.1.0'): if len(sensor_ids.split('|')) > 1: logger.critical('Multiple sensors not supported in version < 3.1.0.') return 1 sensor_id = '' if sensor_ids == 'all' else sensor_ids return DoPreprocOldVersion(trimfile, policy, extra_commandsm,scheduler, sensor_id) elif version.LooseVersion(phosim_version) > version.LooseVersion('3.2.0'): if sensor_ids != 'all': logger.critical('Single exposure mode is currently not supported for' ' phosim > 3.2.0') return 1 return DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler, skip_atmoscreens=skip_atmoscreens, keep_scratch_dirs=keep_scratch_dirs) logger.critical('Unsupported phosim version %s', phosim_version) return 1 if __name__ == '__main__': usage = 'usage: %prog trimfile imsim_config_file [options]' parser = OptionParser(usage=usage) parser.add_option('-a', '--skip_atmoscreens', dest='skip_atmoscreens', action='store_true', default=False, help='Generate atmospheric screens in raytrace stage instead' ' of preprocessing stage.') parser.add_option('-c', '--command', dest='extra_commands', help='Extra commands filename.') parser.add_option('-k', '--keep_scratch', dest='keep_scratch_dirs', action='store_true', default=False, help='Do not cleanup working directories.' ' (version 3.2.x and higher only).') parser.add_option('-l', '--logtostdout', dest='log_to_stdout', action='store_true', default=False, help='Write logging output to stdout instead of log file' ' (version 3.2.x and higher only).') parser.add_option('-s', '--sensor', dest='sensor_ids', default='all', help='Specify a list of sensor ids to use delimited by "|",' ' or use "all" for all.') (options, args) = parser.parse_args() if len(args) != 2: print 'Incorrect number of arguments. Use -h or --help for help.' print usage quit() trimfile = args[0] imsim_config_file = args[1] sys.exit(main(trimfile, imsim_config_file, options.extra_commands, options.skip_atmoscreens, options.keep_scratch_dirs, options.sensor_ids, options.log_to_stdout))
from svgpathtools import svg2paths, wsvg import numpy as np import uArmRobot import time serialport = "/dev/ttyACM0" # for linux like system myRobot = uArmRobot.robot(serialport,0) # user 0 for firmware < v4 and use 1 for firmware v4 myRobot.debug = True # Enable / Disable debug output on screen, by default disabled myRobot.connect() myRobot.mode(1) # Set mode to Normal paths, attributes = svg2paths('drawing.svg') scale = .25 steps_per_seg = 3 coords = [] x_offset = 200 height = 90 draw_speed = 1000 for i in range(len(paths)): path = paths[i] attribute = attributes[i] # A crude check for whether a path should be drawn. Does it have a style defined? if 'style' in attribute: for seg in path: segcoords = [] for p in range(steps_per_seg+1): cp = seg.point(float(p)/float(steps_per_seg)) segcoords.append([-np.real(cp)*scale+x_offset, np.imag(cp)*scale]) coords.append(segcoords) myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000) for seg in coords: myRobot.goto(seg[0][0], seg[0][1], height, 6000) time.sleep(0.15) for p in seg: myRobot.goto_laser(p[0], p[1], height, draw_speed) myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
import pymongo from pymongo.errors import AutoReconnect from lai.db.base import DBBase from lai.database import UPDATE_PROCESS, COMMIT_PROCESS from lai.database import DatabaseException, NotFoundError from lai import Document class DBMongo(DBBase): def __init__(self, name, host='127.0.0.1', port=27017): self.name = name self.host = host self.port = port def connect(self): try: self.connection = pymongo.Connection(self.host, self.port) self.db = self.connection[self.name] except AutoReconnect: raise DatabaseException("It's not possible connect to the database") def get_next_id(self): try: query = {'_id': 'last_id'} update = {'$inc': {'id': 1}} fn = self.db.internal.find_and_modify row = fn(query, update, upsert=True, new=True) except Exception as e: raise DatabaseException(e) return row['id'] def search(self, regex): try: spec = {'$or': [{'data.content' : {'$regex': regex, '$options': 'im'}}, {'data.description': {'$regex': regex, '$options': 'im'}}]} fields = {'_id': 0} cur = self.db.docs.find(spec, fields) except Exception as e: raise DatabaseException(e) return [Document(**row) for row in cur] def get(self, id, pk='id', deleted=False): try: if pk == 'id': id = int(id) if deleted: spec = {pk: id} else: spec = {pk: id, 'data': {'$exists': 1}} fields = {'_id': 0} row = self.db.docs.find_one(spec, fields) except Exception as e: raise DatabaseException(e) if row: return Document(**row) raise NotFoundError('%s %s not found' % (pk, id)) def getall(self): try: spec = {'data': {'$exists': 1}} fields = {'_id': 0} sort = [('tid', 1)] cur = self.db.docs.find(spec, fields, sort=sort) except Exception as e: raise DatabaseException(e) return [Document(**row) for row in cur] def save(self, doc): if doc.id: return self.update(doc) else: return self.insert(doc) def insert(self, doc, synced=False): doc.id = self.get_next_id() doc.synced = synced try: self.db.docs.insert(doc) except Exception as e: raise DatabaseException(e) return doc def update(self, doc, process=None): if process is None: pk = 'id' id = doc.id doc.synced = False set = doc elif process == UPDATE_PROCESS: if self.db.docs.find({'sid': doc.sid}).count() == 0: return self.insert(doc, synced=True) pk = 'sid' id = doc.sid doc.synced = not doc.merged() # must be commited if was merged doc.merged(False) set = {'tid': doc.tid, 'data': doc.data, 'user': doc.user, 'public': doc.public, 'synced': doc.synced} elif process == COMMIT_PROCESS: pk = 'id' id = doc.id doc.synced = True set = {'sid': doc.sid, 'tid': doc.tid, 'synced': doc.synced} else: raise DatabaseException('Incorrect update process') try: rs = self.db.docs.update({pk: id}, {'$set': set}, safe=True) assert rs['n'] == 1 except Exception as e: raise DatabaseException(e) return doc def delete(self, doc): if doc.id is None: raise DatabaseException('Document does not have id') if doc.sid is None: try: rs = self.db.docs.remove({'id': doc.id}, safe=True) assert rs['n'] == 1 except Exception as e: raise DatabaseException(e) return None doc.data = None return self.update(doc) def save_last_sync(self, ids, process): try: spec = {'_id': 'last_sync'} document = {'$set': {process: ids}} self.db.internal.update(spec, document, upsert=True) except Exception as e: raise DatabaseException(e) def get_docs_to_commit(self): try: spec = {'synced': False} fields = {'_id': 0} cur = self.db.docs.find(spec, fields) except Exception as e: raise DatabaseException(e) return list(cur) def get_last_tid(self): try: spec = {'tid': {'$gt': 0}} sort = [('tid', -1)] row = self.db.docs.find_one(spec, sort=sort) except Exception as e: raise DatabaseException(e) if row: return row['tid'] return 0 def status(self): docs = {'updated' : [], 'committed': [], 'to_commit': []} row = self.db.internal.find_one({'_id': 'last_sync'}) if row and 'update' in row: for id in row['update']: docs['updated'].append(self.get(id, deleted=True)) if row and 'commit' in row: for id in row['commit']: docs['committed'].append(self.get(id, deleted=True)) to_commit = self.get_docs_to_commit() for row in to_commit: doc = Document(**row) docs['to_commit'].append(doc) return docs def __str__(self): return "%s://%s:%s/%s" % ('mongo', self.host, self.port, self.name)
from matplotlib import pyplot as plt path = "C:/Temp/mnisterrors/chunk" + str(input("chunk: ")) + ".txt" with open(path, "r") as f: errorhistory = [float(line.rstrip('\n')) for line in f] plt.plot(errorhistory) plt.show()
from numpy import * from scipy.optimize import root def eps(omk): return omk**2/(2+omk**2) def om_k(omc): khi=arcsin(omc) return sqrt(6*sin(khi/3)/omc-2) omc=0.88 print 'omc=',omc,' omk=',om_k(omc)
import sys import os output_dir = "erc2-chromatin15state-all-files" if not os.path.exists(output_dir): sys.stderr.write("Creating dir [%s]...\n" % (output_dir)) os.makedirs(output_dir) prefix = "/home/cbreeze/for_Alex" suffix = "_15_coreMarks_mnemonics.bed" marks = [ '1_TssA', '2_TssAFlnk', '3_TxFlnk', '4_Tx', '5_TxWk', '6_EnhG', '7_Enh', '8_ZNF/Rpts', '9_Het', '10_TssBiv', '11_BivFlnk', '12_EnhBiv', '13_ReprPC', '14_ReprPCWk', '15_Quies' ] all = [ 'E001', 'E002', 'E003', 'E004', 'E005', 'E006', 'E007', 'E008', 'E009', 'E010', 'E011', 'E012', 'E013', 'E014', 'E015', 'E016', 'E017', 'E018', 'E019', 'E020', 'E021', 'E022', 'E023', 'E024', 'E025', 'E026', 'E027', 'E028', 'E029', 'E030', 'E031', 'E032', 'E033', 'E034', 'E035', 'E036', 'E037', 'E038', 'E039', 'E040', 'E041', 'E042', 'E043', 'E044', 'E045', 'E046', 'E047', 'E048', 'E049', 'E050', 'E051', 'E052', 'E053', 'E054', 'E055', 'E056', 'E057', 'E058', 'E059', 'E061', 'E062', 'E063', 'E065', 'E066', 'E067', 'E068', 'E069', 'E070', 'E071', 'E072', 'E073', 'E074', 'E075', 'E076', 'E077', 'E078', 'E079', 'E080', 'E081', 'E082', 'E083', 'E084', 'E085', 'E086', 'E087', 'E088', 'E089', 'E090', 'E091', 'E092', 'E093', 'E094', 'E095', 'E096', 'E097', 'E098', 'E099', 'E100', 'E101', 'E102', 'E103', 'E104', 'E105', 'E106', 'E107', 'E108', 'E109', 'E110', 'E111', 'E112', 'E113', 'E114', 'E115', 'E116', 'E117', 'E118', 'E119', 'E120', 'E121', 'E122', 'E123', 'E124', 'E125', 'E126', 'E127', 'E128', 'E129' ] for sample in all: fns = {} fhs = {} # set up output file handles for all combinations of per-sample and marks for mark in marks: fns[mark] = os.path.join(output_dir, "%s_%s.bed" % (sample, mark.replace('/', '-'))) sys.stderr.write("Setting up output handle to [%s]...\n" % (fns[mark])) fhs[mark] = open(fns[mark], "w") # split per-sample mnemonics to per-sample, per-mark file psm_fn = "%s/%s%s" % (prefix, sample, suffix) sys.stderr.write("Reading PSM [%s]...\n" % (psm_fn)) with open(psm_fn, "r") as psm_fh: for line in psm_fh: (chr, start, stop, state_call) = line.strip().split('\t') fhs[state_call].write('\t'.join([chr, start, stop]) + '\n') # close handles for mark in marks: sys.stderr.write("Closing output handle to [%s]...\n" % (fns[mark])) fhs[mark].close() fns[mark] = None fhs[mark] = None
from ._common import * from .rethinkdb import RethinkDBPipe from .mongodb import MongoDBPipe
import pandas as pd from larray.core.array import Array from larray.inout.pandas import from_frame __all__ = ['read_stata'] def read_stata(filepath_or_buffer, index_col=None, sort_rows=False, sort_columns=False, **kwargs) -> Array: r""" Reads Stata .dta file and returns an Array with the contents Parameters ---------- filepath_or_buffer : str or file-like object Path to .dta file or a file handle. index_col : str or None, optional Name of column to set as index. Defaults to None. sort_rows : bool, optional Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting). This only makes sense in combination with index_col. Defaults to False. sort_columns : bool, optional Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting). Defaults to False. Returns ------- Array See Also -------- Array.to_stata Notes ----- The round trip to Stata (Array.to_stata followed by read_stata) loose the name of the "column" axis. Examples -------- >>> read_stata('test.dta') # doctest: +SKIP {0}\{1} row country sex 0 0 BE F 1 1 FR M 2 2 FR F >>> read_stata('test.dta', index_col='row') # doctest: +SKIP row\{1} country sex 0 BE F 1 FR M 2 FR F """ df = pd.read_stata(filepath_or_buffer, index_col=index_col, **kwargs) return from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns)
"""Provides a way to hook GunGame messages.""" from core import AutoUnload from .manager import message_manager class MessageHook(AutoUnload): """Decorator used to register message hooks.""" def __init__(self, message_name): """Store the message name.""" self.message_name = message_name self.callback = None def __call__(self, callback): """Store the callback and register the hook.""" self.callback = callback message_manager.hook_message(self.message_name, self.callback) def _unload_instance(self): """Unregister the message hook.""" message_manager.unhook_message(self.message_name, self.callback) class MessagePrefixHook(AutoUnload): """Decorator used to register message prefix hooks.""" def __init__(self, message_prefix): """Store the message prefix.""" self.message_prefix = message_prefix self.callback = None def __call__(self, callback): """Store the callback and register the hook.""" self.callback = callback message_manager.hook_prefix(self.message_prefix, self.callback) def _unload_instance(self): """Unregister the message prefix hook.""" message_manager.unhook_prefix(self.message_prefix, self.callback)
""" NodeChains are sequential orders of :mod:`~pySPACE.missions.nodes` .. image:: ../../graphics/node_chain.png :width: 500 There are two main use cases: * the application for :mod:`~pySPACE.run.launch_live` and the :mod:`~pySPACE.environments.live` using the default :class:`NodeChain` and * the benchmarking with :mod:`~pySPACE.run.launch` using the :class:`BenchmarkNodeChain` with the :mod:`~pySPACE.missions.operations.node_chain` operation. .. seealso:: - :mod:`~pySPACE.missions.nodes` - :ref:`node_list` - :mod:`~pySPACE.missions.operations.node_chain` operation .. image:: ../../graphics/launch_live.png :width: 500 .. todo:: Documentation This module extends/reimplements the original MDP flow class and has some additional methods like reset(), save() etc. Furthermore it supports the construction of NodeChains and also running them inside nodes in parallel. MDP is distributed under the following BSD license:: This file is part of Modular toolkit for Data Processing (MDP). All the code in this package is distributed under the following conditions: Copyright (c) 2003-2012, MDP Developers <mdp-toolkit-devel@lists.sourceforge.net> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Modular toolkit for Data Processing (MDP) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys import os if __name__ == '__main__': # add root of the code to system path file_path = os.path.dirname(os.path.abspath(__file__)) pyspace_path = file_path[:file_path.rfind('pySPACE')-1] if not pyspace_path in sys.path: sys.path.append(pyspace_path) import cPickle import gc import logging import multiprocessing import shutil import socket import time import uuid import yaml import pySPACE from pySPACE.tools.filesystem import create_directory from pySPACE.tools.socket_utils import talk, inform from pySPACE.tools.conversion import python2yaml, replace_parameters_and_convert, replace_parameters import copy import warnings import traceback import numpy class CrashRecoveryException(Exception): """Class to handle crash recovery """ def __init__(self, *args): """Allow crash recovery. Arguments: (error_string, crashing_obj, parent_exception) The crashing object is kept in self.crashing_obj The triggering parent exception is kept in ``self.parent_exception``. """ errstr = args[0] self.crashing_obj = args[1] self.parent_exception = args[2] # ?? python 2.5: super(CrashRecoveryException, self).__init__(errstr) super(CrashRecoveryException,self).__init__(self, errstr) def dump(self, filename = None): """ Save a pickle dump of the crashing object on filename. If filename is None, the crash dump is saved on a file created by the tempfile module. Return the filename. """ import cPickle import tempfile if filename is None: (fd, filename)=tempfile.mkstemp(suffix=".pic", prefix="NodeChainCrash_") fl = os.fdopen(fd, 'w+b', -1) else: fl = open(filename, 'w+b', -1) cPickle.dump(self.crashing_obj, fl) fl.close() return filename class NodeChainException(Exception): """Base class for exceptions in node chains.""" pass class NodeChainExceptionCR(CrashRecoveryException, NodeChainException): """Class to handle crash recovery """ def __init__(self, *args): """Allow crash recovery. Arguments: (error_string, flow_instance, parent_exception) The triggering parent exception is kept in self.parent_exception. If ``flow_instance._crash_recovery`` is set, save a crash dump of flow_instance on the file self.filename """ CrashRecoveryException.__init__(self, *args) rec = self.crashing_obj._crash_recovery errstr = args[0] if rec: if isinstance(rec, str): name = rec else: name = None name = CrashRecoveryException.dump(self, name) dumpinfo = '\nA crash dump is available on: "%s"' % name self.filename = name errstr = errstr+dumpinfo Exception.__init__(self, errstr) class NodeChain(object): """ Reimplement/overwrite mdp.Flow methods e.g., for supervised learning """ def __init__(self, node_sequence, crash_recovery=False, verbose=False): """ Creates the NodeChain based on the node_sequence .. note:: The NodeChain cannot be executed before not all trainable nodes have been trained, i.e. self.trained() == True. """ self._check_nodes_consistency(node_sequence) self.flow = node_sequence self.verbose = verbose self.set_crash_recovery(crash_recovery) # Register the direct predecessor of a node as its input # (i.e. we assume linear flows) for i in range(len(node_sequence) - 1): node_sequence[i+1].register_input_node(node_sequence[i]) self.use_test_data = False # set a default run number self[-1].set_run_number(0) # give this flow a unique identifier self.id = str(uuid.uuid4()) self.handler = None self.store_intermediate_results = True def train(self, data_iterators=None): """ Train NodeChain with data from iterator or source node The method can proceed in two different ways: * If no data is provided, it is checked that the first node of the flow is a source node. If that is the case, the data provided by this node is passed forward through the flow. During this forward propagation, the flow is trained. The request of the data is done in the last node. * If a list of data iterators is provided, then it is checked that no source and split nodes are contained in the NodeChain. these nodes only include already a data handling and should not be used, when training is done in different way. Furthermore, split nodes are relevant for benchmarking. One iterator for each node has to be given. If only one is given, or no list, it is mapped to a list with the same iterator for each node. .. note:: The iterator approach is normally not used in pySPACE, because pySPACE supplies the data with special source nodes and is doing the training automatically without explicit calls on data samples. The approach came with MDP. .. todo:: The iterator approach needs some use cases and testings, especially, because it is not used in the normal setting. """ if data_iterators is not None: # Check if no source and split nodes are contained in the node chain assert(not self[0].is_source_node()), \ "Node chains with source nodes cannot be trained "\ "with external data_iterators!" for node in self: assert(not node.is_split_node()), \ "Node chains with split nodes cannot be trained "\ "with external data_iterators!" # prepare iterables if not type(data_iterators) == list: data_iterators = [data_iterators] * len(self.flow) elif not len(data_iterators)==len(self.flow): data_iterators = [data_iterators] * len(self.flow) # Delegate to iterative training self.iter_train(data_iterators) else: # Use the pySPACE train semantic and not MDP type # Check if the first node of the node chain is a source node assert(self[0].is_source_node()), \ "Training of a node chain without source node requires a "\ "data_iterator argument!" # Training is accomplished by requesting the iterator # of the last node of the chain. This node will recursively call # the train method of all its predecessor nodes. # As soon as the first element is yielded the node has been trained. for _ in self[-1].request_data_for_training( use_test_data=self.use_test_data): return def iter_train(self, data_iterables): """ Train all trainable nodes in the NodeChain with data from iterator *data_iterables* is a list of iterables, one for each node in the chain. The iterators returned by the iterables must return data arrays that are then used for the node training (so the data arrays are the data for the nodes). Note that the data arrays are processed by the nodes which are in front of the node that gets trained, so the data dimension must match the input dimension of the first node. If a node has only a single training phase then instead of an iterable you can alternatively provide an iterator (including generator-type iterators). For nodes with multiple training phases this is not possible, since the iterator cannot be restarted after the first iteration. For more information on iterators and iterables see http://docs.python.org/library/stdtypes.html#iterator-types . In the special case that *data_iterables* is one single array, it is used as the data array *x* for all nodes and training phases. Instead of a data array *x* the iterators can also return a list or tuple, where the first entry is *x* and the following are args for the training of the node (e.g., for supervised training). """ data_iterables = self._train_check_iterables(data_iterables) # train each Node successively for i in range(len(self.flow)): if self.verbose: print "Training node #%d (%s)" % (i, str(self.flow[i])) self._train_node(data_iterables[i], i) if self.verbose: print "Training finished" self._close_last_node() def trained(self): """ Returns whether the complete training is finished, i.e. if all nodes have been trained. """ return self[-1].get_remaining_train_phase() == 0 def execute(self, data_iterators=None): """ Process the data through all nodes """ if data_iterators is not None: # Delegate to super class return self.iter_execute(data_iterators) else: # Use the evaluate semantic # Check if the first node of the flow is a source node assert (self[0].is_source_node()), \ "Evaluation of a node chain without source node requires a " \ "data_iterator argument!" # This is accomplished by calling the request_data_for_testing # method of the last node of the chain. This node will recursively # call the request_data_for_testing method of all its predecessor # nodes return self[-1].process() def iter_execute(self, iterable, nodenr = None): """ Process the data through all nodes in the chain till *nodenr* 'iterable' is an iterable or iterator (note that a list is also an iterable), which returns data arrays that are used as input. Alternatively, one can specify one data array as input. If 'nodenr' is specified, the flow is executed only up to node nr. 'nodenr'. This is equivalent to 'flow[:nodenr+1](iterable)'. .. note:: In contrary to MDP, results are not concatenated to one big object. Each data object remains separate. """ if isinstance(iterable, numpy.ndarray): return self._execute_seq(iterable, nodenr) res = [] empty_iterator = True for x in iterable: empty_iterator = False res.append(self._execute_seq(x, nodenr)) if empty_iterator: errstr = ("The execute data iterator is empty.") raise NodeChainException(errstr) return res def _inc_train(self, data, class_label=None): """ Iterate through the nodes to train them """ for node in self: if node.is_retrainable() and not node.buffering and hasattr(node, "_inc_train"): if not node.retraining_phase: node.retraining_phase=True node.start_retraining() node._inc_train(data,class_label) if not (hasattr(self, "buffering") and self.buffering): data = node.execute(data) else: # workaround to inherit meta data self.buffering = False data = node.execute(data) self.buffering = True def save(self, filename, protocol = -1): """ Save a pickled representation to *filename* If *filename* is None, return a string. .. note:: the pickled NodeChain is not guaranteed to be upward or backward compatible. .. note:: Having C-Code in the node might cause problems with saving. Therefore, the code has special handling for the LibSVMClassifierNode. .. todo:: Intrinsic node methods for storing should be used. .. seealso:: :func:`store_node_chain` """ if self[-1].__class__.__name__ in ["LibSVMClassifierNode"] \ and self[-1].multinomial: indx = filename.find(".pickle") if indx != -1: self[-1].save_model(filename[0:indx]+'.model') else: self[-1].save_model(filename+'.model') import cPickle odict = self.__dict__.copy() # copy the dict since we change it # Remove other non-pickable stuff remove_keys=[] k = 0 for key, value in odict.iteritems(): if key == "input_node" or key == "flow": continue try: cPickle.dumps(value) except (ValueError, TypeError, cPickle.PicklingError): remove_keys.append(key) for key in remove_keys: odict.pop(key) self.__dict__ = odict if filename is None: return cPickle.dumps(self, protocol) else: # if protocol != 0 open the file in binary mode if protocol != 0: mode = 'wb' else: mode = 'w' flh = open(filename , mode) cPickle.dump(self, flh, protocol) flh.close() def get_output_type(self, input_type, as_string=True): """ Returns the output type of the entire node chain Recursively iterate over nodes in flow """ output = input_type for i in range(len(self.flow)): if i == 0: output = self.flow[i].get_output_type( input_type, as_string=True) else: output = self.flow[i].get_output_type(output, as_string=True) if as_string: return output else: return self.string_to_class(output) @staticmethod def string_to_class(string_encoding): """ given a string variable, outputs a class instance e.g. obtaining a TimeSeries """ from pySPACE.resources.data_types.time_series import TimeSeries from pySPACE.resources.data_types.feature_vector import FeatureVector from pySPACE.resources.data_types.prediction_vector import PredictionVector if "TimeSeries" in string_encoding: return TimeSeries elif "PredictionVector" in string_encoding: return PredictionVector elif "FeatureVector" in string_encoding: return FeatureVector else: raise NotImplementedError def _propagate_exception(self, exception, nodenr): # capture exception. the traceback of the error is printed and a # new exception, containing the identity of the node in the NodeChain # is raised. Allow crash recovery. (etype, val, tb) = sys.exc_info() prev = ''.join(traceback.format_exception(exception.__class__, exception,tb)) act = "\n! Exception in node #%d (%s):\n" % (nodenr, str(self.flow[nodenr])) errstr = ''.join(('\n', 40*'-', act, 'Node Traceback:\n', prev, 40*'-')) raise NodeChainExceptionCR(errstr, self, exception) def _train_node(self, data_iterable, nodenr): """ Train a single node in the flow. nodenr -- index of the node in the flow """ node = self.flow[nodenr] if (data_iterable is not None) and (not node.is_trainable()): # attempted to train a node although it is not trainable. # raise a warning and continue with the next node. # wrnstr = "\n! Node %d is not trainable" % nodenr + \ # "\nYou probably need a 'None' iterable for"+\ # " this node. Continuing anyway." #warnings.warn(wrnstr, UserWarning) return elif (data_iterable is None) and node.is_training(): # None instead of iterable is passed to a training node err_str = ("\n! Node %d is training" " but instead of iterable received 'None'." % nodenr) raise NodeChainException(err_str) elif (data_iterable is None) and (not node.is_trainable()): # skip training if node is not trainable return try: train_arg_keys = self._get_required_train_args(node) train_args_needed = bool(len(train_arg_keys)) ## We leave the last training phase open for the ## CheckpointFlow class. ## Checkpoint functions must close it explicitly if needed! ## Note that the last training_phase is closed ## automatically when the node is executed. while True: empty_iterator = True for x in data_iterable: empty_iterator = False # the arguments following the first are passed only to the # currently trained node, allowing the implementation of # supervised nodes if (type(x) is tuple) or (type(x) is list): arg = x[1:] x = x[0] else: arg = () # check if the required number of arguments was given if train_args_needed: if len(train_arg_keys) != len(arg): err = ("Wrong number of arguments provided by " + "the iterable for node #%d " % nodenr + "(%d needed, %d given).\n" % (len(train_arg_keys), len(arg)) + "List of required argument keys: " + str(train_arg_keys)) raise NodeChainException(err) # filter x through the previous nodes if nodenr > 0: x = self._execute_seq(x, nodenr-1) # train current node node.train(x, *arg) if empty_iterator: if node.get_current_train_phase() == 1: err_str = ("The training data iteration for node " "no. %d could not be repeated for the " "second training phase, you probably " "provided an iterator instead of an " "iterable." % (nodenr+1)) raise NodeChainException(err_str) else: err_str = ("The training data iterator for node " "no. %d is empty." % (nodenr+1)) raise NodeChainException(err_str) self._stop_training_hook() # close the previous training phase node.stop_training() if node.get_remaining_train_phase() > 0: continue else: break except self.flow[-1].TrainingFinishedException, e: # attempted to train a node although its training phase is already # finished. raise a warning and continue with the next node. wrnstr = ("\n! Node %d training phase already finished" " Continuing anyway." % nodenr) warnings.warn(wrnstr, UserWarning) except NodeChainExceptionCR, e: # this exception was already propagated, # probably during the execution of a node upstream in the flow (exc_type, val) = sys.exc_info()[:2] prev = ''.join(traceback.format_exception_only(e.__class__, e)) prev = prev[prev.find('\n')+1:] act = "\nWhile training node #%d (%s):\n" % (nodenr, str(self.flow[nodenr])) err_str = ''.join(('\n', 40*'=', act, prev, 40*'=')) raise NodeChainException(err_str) except Exception, e: # capture any other exception occurred during training. self._propagate_exception(e, nodenr) def _stop_training_hook(self): """Hook method that is called before stop_training is called.""" pass @staticmethod def _get_required_train_args(node): """Return arguments in addition to self and x for node.train. Arguments that have a default value are ignored. """ import inspect train_arg_spec = inspect.getargspec(node.train) train_arg_keys = train_arg_spec[0][2:] # ignore self, x if train_arg_spec[3]: # subtract arguments with a default value train_arg_keys = train_arg_keys[:-len(train_arg_spec[3])] return train_arg_keys def _train_check_iterables(self, data_iterables): """Return the data iterables after some checks and sanitizing. Note that this method does not distinguish between iterables and iterators, so this must be taken care of later. """ # verifies that the number of iterables matches that of # the signal nodes and multiplies them if needed. flow = self.flow # # if a single array is given wrap it in a list of lists, # # note that a list of 2d arrays is not valid # if isinstance(data_iterables, numpy.ndarray): # data_iterables = [[data_iterables]] * len(flow) if not isinstance(data_iterables, list): err_str = ("'data_iterables' must be either a list of " "iterables or an array, but got %s" % str(type(data_iterables))) raise NodeChainException(err_str) # check that all elements are iterable for i, iterable in enumerate(data_iterables): if (iterable is not None) and (not hasattr(iterable, '__iter__')): err = ("Element number %d in the data_iterables" " list is not an iterable." % i) raise NodeChainException(err) # check that the number of data_iterables is correct if len(data_iterables) != len(flow): err_str = ("%d data iterables specified," " %d needed" % (len(data_iterables), len(flow))) raise NodeChainException(err_str) return data_iterables def _close_last_node(self): if self.verbose: print "Close the training phase of the last node" try: self.flow[-1].stop_training() except self.flow[-1].TrainingFinishedException: pass except Exception, e: self._propagate_exception(e, len(self.flow)-1) def set_crash_recovery(self, state = True): """Set crash recovery capabilities. When a node raises an Exception during training, execution, or inverse execution that the flow is unable to handle, a NodeChainExceptionCR is raised. If crash recovery is set, a crash dump of the flow instance is saved for later inspection. The original exception can be found as the 'parent_exception' attribute of the NodeChainExceptionCR instance. - If 'state' = False, disable crash recovery. - If 'state' is a string, the crash dump is saved on a file with that name. - If 'state' = True, the crash dump is saved on a file created by the tempfile module. """ self._crash_recovery = state def _execute_seq(self, x, nodenr = None): """ Executes input data 'x' through the nodes 0..'node_nr' included If no *nodenr* is specified, the complete node chain is used for processing. """ flow = self.flow if nodenr is None: nodenr = len(flow)-1 for node_index in range(nodenr+1): try: x = flow[node_index].execute(x) except Exception, e: self._propagate_exception(e, node_index) return x def copy(self, protocol=None): """Return a deep copy of the flow. The protocol parameter should not be used. """ import copy if protocol is not None: warnings.warn("protocol parameter to copy() is ignored", DeprecationWarning, stacklevel=2) return copy.deepcopy(self) def __call__(self, iterable, nodenr = None): """Calling an instance is equivalent to call its 'execute' method.""" return self.iter_execute(iterable, nodenr=nodenr) ###### string representation def __str__(self): nodes = ', '.join([str(x) for x in self.flow]) return '['+nodes+']' def __repr__(self): # this should look like a valid Python expression that # could be used to recreate an object with the same value # eval(repr(object)) == object name = type(self).__name__ pad = len(name)+2 sep = ',\n'+' '*pad nodes = sep.join([repr(x) for x in self.flow]) return '%s([%s])' % (name, nodes) ###### private container methods def __len__(self): return len(self.flow) def _check_dimension_consistency(self, out, inp): """Raise ValueError when both dimensions are set and different.""" if ((out and inp) is not None) and out != inp: errstr = "dimensions mismatch: %s != %s" % (str(out), str(inp)) raise ValueError(errstr) def _check_nodes_consistency(self, flow = None): """Check the dimension consistency of a list of nodes.""" if flow is None: flow = self.flow len_flow = len(flow) for i in range(1, len_flow): out = flow[i-1].output_dim inp = flow[i].input_dim self._check_dimension_consistency(out, inp) def _check_value_type_isnode(self, value): if not isinstance(value, pySPACE.missions.nodes.base.BaseNode): raise TypeError("flow item must be Node instance") def __getitem__(self, key): if isinstance(key, slice): flow_slice = self.flow[key] self._check_nodes_consistency(flow_slice) return self.__class__(flow_slice) else: return self.flow[key] def __setitem__(self, key, value): if isinstance(key, slice): [self._check_value_type_isnode(item) for item in value] else: self._check_value_type_isnode(value) # make a copy of list flow_copy = list(self.flow) flow_copy[key] = value # check dimension consistency self._check_nodes_consistency(flow_copy) # if no exception was raised, accept the new sequence self.flow = flow_copy def __delitem__(self, key): # make a copy of list flow_copy = list(self.flow) del flow_copy[key] # check dimension consistency self._check_nodes_consistency(flow_copy) # if no exception was raised, accept the new sequence self.flow = flow_copy def __contains__(self, item): return self.flow.__contains__(item) def __iter__(self): return self.flow.__iter__() def __add__(self, other): # append other to self if isinstance(other, NodeChain): flow_copy = list(self.flow).__add__(other.flow) # check dimension consistency self._check_nodes_consistency(flow_copy) # if no exception was raised, accept the new sequence return self.__class__(flow_copy) elif isinstance(other, pySPACE.missions.nodes.base.BaseNode): flow_copy = list(self.flow) flow_copy.append(other) # check dimension consistency self._check_nodes_consistency(flow_copy) # if no exception was raised, accept the new sequence return self.__class__(flow_copy) else: err_str = ('can only concatenate flow or node' ' (not \'%s\') to flow' % (type(other).__name__)) raise TypeError(err_str) def __iadd__(self, other): # append other to self if isinstance(other, NodeChain): self.flow += other.flow elif isinstance(other, pySPACE.missions.nodes.base.BaseNode): self.flow.append(other) else: err_str = ('can only concatenate flow or node' ' (not \'%s\') to flow' % (type(other).__name__)) raise TypeError(err_str) self._check_nodes_consistency(self.flow) return self ###### public container methods def append(self, x): """flow.append(node) -- append node to flow end""" self[len(self):len(self)] = [x] def extend(self, x): """flow.extend(iterable) -- extend flow by appending elements from the iterable""" if not isinstance(x, NodeChain): err_str = ('can only concatenate flow' ' (not \'%s\') to flow' % (type(x).__name__)) raise TypeError(err_str) self[len(self):len(self)] = x def insert(self, i, x): """flow.insert(index, node) -- insert node before index""" self[i:i] = [x] def pop(self, i = -1): """flow.pop([index]) -> node -- remove and return node at index (default last)""" x = self[i] del self[i] return x def reset(self): """ Reset the flow and obey permanent_attributes where available Method was moved to the end of class code, due to program environment problems which needed the __getitem__ method beforehand. """ for i in range(len(self)): self[i].reset() class BenchmarkNodeChain(NodeChain): """ This subclass overwrites the train method in order to provide a more convenient way of doing supervised learning. Furthermore, it contains a benchmark method that can be used for benchmarking. This includes logging, setting of run numbers, delivering the result collection, handling of source and sink nodes, ... :Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de) :Created: 2008/08/18 """ def __init__(self, node_sequence): """ Creates the BenchmarkNodeChain based on the node_sequence """ super(BenchmarkNodeChain, self).__init__(node_sequence) # Each BenchmarkNodeChain must start with an source node # and end with a sink node assert(self[0].is_source_node()), \ "A benchmark flow must start with a source node" assert(self[-1].is_sink_node()), \ "A benchmark flow must end with a sink node" def use_next_split(self): """ Use the next split of the data into training and test data This method is useful for pySPACE-benchmarking """ # This is handled by calling use_next_split() of the last node of # the flow which will recursively call predecessor nodes in the flow # until a node is found that handles the splitting return self[-1].use_next_split() def benchmark(self, input_collection, run=0, persistency_directory=None, store_node_chain=False): """ Perform the benchmarking of this data flow with the given collection Benchmarking is accomplished by iterating through all splits of the data into training and test data. **Parameters**: :input_collection: A sequence of data/label-tuples that serves as a generator or a BaseDataset which contains the data to be processed. :run: The current run which defines all random seeds within the flow. :persistency_directory: Optional information of the nodes as well as the trained node chain (if *store_node_chain* is not False) are stored to the given *persistency_directory*. :store_node_chain: If True the trained flow is stored to *persistency_directory*. If *store_node_chain* is a tuple of length 2---lets say (i1,i2)-- only the subflow starting at the i1-th node and ending at the (i2-1)-th node is stored. This may be useful when the stored flow should be used in an ensemble. """ # Inform the first node of this flow about the input collection if hasattr(input_collection,'__iter__'): # assume a generator is given self[0].set_generator(input_collection) else: # assume BaseDataset self[0].set_input_dataset(input_collection) # Inform all nodes recursively about the number of the current run self[-1].set_run_number(int(run)) # set temp file folder if persistency_directory != None: self[-1].set_temp_dir(persistency_directory+os.sep+"temp_dir") split_counter = 0 # For every split of the dataset while True: # As long as more splits are available # Compute the results for the current split # by calling the method on its last node self[-1].process_current_split() if persistency_directory != None: if store_node_chain: self.store_node_chain(persistency_directory + os.sep + \ "node_chain_sp%s.pickle" % split_counter, store_node_chain) # Store nodes that should be persistent self.store_persistent_nodes(persistency_directory) # If no more splits are available if not self.use_next_split(): break split_counter += 1 # print "Input benchmark" # print gc.get_referrers(self[0].collection) # During the flow numerous pointers are put to the flow but they are # not deleted. So memory is not given free, which can be seen by the # upper comment. Therefore we now free the input collection and only # then the gc collector can free the memory. Otherwise under not yet # found reasons, the pointers to the input collection will remain even # between processes. if hasattr(input_collection,'__iter__'): self[0].set_generator(None) else: self[0].set_input_dataset(None) gc.collect() # Return the result collection of this flow return self[-1].get_result_dataset() def __call__(self, iterable=None, train_instances=None, runs=[]): """ Call *execute* or *benchmark* and return (id, PerformanceResultSummary) If *iterable* is given, calling an instance is equivalent to call its 'execute' method. If *train_instances* and *runs* are given, 'benchmark' is called for every run number specified and results are merged. This is useful for e.g. parallel execution of subflows with the multiprocessing module, since instance methods can not be serialized in Python but whole objects. """ if iterable != None: return self.execute(iterable) elif train_instances != None and runs != []: # parallelization case # we have to reinitialize logging cause otherwise deadlocks occur # when parallelization is done via multiprocessing.Pool self.prepare_logging() for ind, run in enumerate(runs): result = self.benchmark(train_instances, run=run) if ind == 0: result_collection = result else: result_collection.data.update(result.data) # reset node chain for new training if another call of # :func:`benchmark` is expected. if not ind == len(runs) - 1: self.reset() self.clean_logging() return (self.id, result_collection) else: import warnings warnings.warn("__call__ methods needs at least one parameter (data)") return None def store_node_chain(self, result_dir, store_node_chain): """ Pickle this flow into *result_dir* for later usage""" if isinstance(store_node_chain,basestring): store_node_chain = eval(store_node_chain) if isinstance(store_node_chain,tuple): assert(len(store_node_chain) == 2) # Keep only subflow starting at the i1-th node and ending at the # (i2-1) node. flow = NodeChain(self.flow[store_node_chain[0]:store_node_chain[1]]) elif isinstance(store_node_chain,list): # Keep only nodes with indices contained in the list # nodes have to be copied, otherwise input_node-refs of current flow # are overwritten from copy import copy store_node_list = [copy(node) for ind, node in enumerate(self.flow) \ if ind in store_node_chain] flow = NodeChain(store_node_list) else: # Per default, get rid of source and sink nodes flow = NodeChain(self.flow[1:-1]) input_node = flow[0].input_node flow[0].input_node = None flow.save(result_dir) def prepare_logging(self): """ Set up logging This method is only needed if one forks subflows, i.e. to execute them via multiprocessing.Pool """ # Prepare remote logging root_logger = logging.getLogger("%s-%s" % (socket.gethostname(), os.getpid())) root_logger.setLevel(logging.DEBUG) root_logger.propagate = False if len(root_logger.handlers)==0: self.handler = logging.handlers.SocketHandler(socket.gethostname(), logging.handlers.DEFAULT_TCP_LOGGING_PORT) root_logger.addHandler(self.handler) def clean_logging(self): """ Remove logging handlers if existing Call this method only if you have called *prepare_logging* before. """ # Remove potential logging handlers if self.handler is not None: self.handler.close() root_logger = logging.getLogger("%s-%s" % (socket.gethostname(), os.getpid())) root_logger.removeHandler(self.handler) def store_persistent_nodes(self, result_dir): """ Store all nodes that should be persistent """ # For all node for index, node in enumerate(self): # Store them in the result dir if they enabled storing node.store_state(result_dir, index) class NodeChainFactory(object): """ Provide static methods to create and instantiate data flows :Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de) :Created: 2009/01/26 """ @staticmethod def flow_from_yaml(Flow_Class, flow_spec): """ Creates a Flow object Reads from the given *flow_spec*, which should be a valid YAML specification of a NodeChain object, and returns this dataflow object. **Parameters** :Flow_Class: The class name of node chain to create. Valid are 'NodeChain' and 'BenchmarkNodeChain'. :flow_spec: A valid YAML specification stream; this could be a file object, a string representation of the YAML file or the Python representation of the YAML file (list of dicts) """ from pySPACE.missions.nodes.base_node import BaseNode # Reads and parses the YAML file if necessary if type(flow_spec) != list: dataflow_spec = yaml.load(flow_spec) else: dataflow_spec = flow_spec node_sequence = [] # For all nodes of the flow for node_spec in dataflow_spec: # Use factory method to create node node_obj = BaseNode.node_from_yaml(node_spec) # Append this node to the sequence of node node_sequence.append(node_obj) # Check if the nodes have to cache their outputs for index, node in enumerate(node_sequence): # If a node is trainable, it uses the outputs of its input node # at least twice, so we have to cache. if node.is_trainable(): node_sequence[index - 1].set_permanent_attributes(caching = True) # Split node might also request the data from their input nodes # (once for each split), depending on their implementation. We # assume the worst case and activate caching if node.is_split_node(): node_sequence[index - 1].set_permanent_attributes(caching = True) # Create the flow based on the node sequence and the given flow class # and return it return Flow_Class(node_sequence) @staticmethod def instantiate(template, parametrization): """ Instantiate a template recursively for the given parameterization Instantiate means to replace the parameter in the template by the chosen value. **Parameters** :template: A dictionary with key-value pairs, where values might contain parameter keys which have to be replaced. A typical example of a template would be a Python representation of a node read from YAML. :parametrization: A dictionary with parameter names as keys and exact one value for this parameter as value. """ instance = {} for key, value in template.iteritems(): if value in parametrization.keys(): # Replacement instance[key] = parametrization[value] elif isinstance(value, dict): # Recursive call instance[key] = NodeChainFactory.instantiate(value, parametrization) elif isinstance(value, basestring): # String replacement for param_key, param_value in parametrization.iteritems(): try: value = value.replace(param_key, repr(param_value)) except: value = value.replace(param_key, python2yaml(param_value)) instance[key] = value elif hasattr(value, "__iter__"): # Iterate over all items in sequence instance[key] = [] for iter_item in value: if iter_item in parametrization.keys(): # Replacement instance[key].append(parametrization[iter_item]) elif isinstance(iter_item, dict): instance[key].append(NodeChainFactory.instantiate( iter_item, parametrization)) elif isinstance(value, basestring): # String replacement for param_key, param_value in parametrization.iteritems(): try: iter_item = iter_item.replace(param_key, repr(param_value)) except: iter_item = iter_item.replace( param_key, python2yaml(param_value)) instance[key] = value else: instance[key].append(iter_item) else: # Not parameterized instance[key] = value return instance @staticmethod def replace_parameters_in_node_chain(node_chain_template, parametrization): node_chain_template = copy.copy(node_chain_template) if parametrization == {}: return node_chain_template elif type(node_chain_template) == list: return [NodeChainFactory.instantiate( template=node,parametrization=parametrization) for node in node_chain_template] elif isinstance(node_chain_template, basestring): node_chain_template = \ replace_parameters(node_chain_template, parametrization) return node_chain_template class SubflowHandler(object): """ Interface for nodes to generate and execute subflows (subnode-chains) A subflow means a node chain used inside a node for processing data. This class provides functions that can be used by nodes to generate and execute subflows. It serves thereby as a communication daemon to the backend (if it is used). Most important when inheriting from this class is that the subclass MUST be a node. The reason is that this class uses node functionality, e.g. logging, the *temp_dir*-variable and so on. **Parameters** :processing_modality: One of the valid strings: 'backend', 'serial', 'local'. :backend: The current backends modality is used. This is implemented at the moment only for 'LoadlevelerBackend' and 'LocalBackend'. :serial: All subflows are executed sequentially, i.e. one after the other. :local: Subflows are executed in a Pool using *pool_size* cpus. This may be also needed when no backend is used. (*optional, default: 'serial'*) :pool_size: If a parallelization is based on using several processes on a local system in parallel, e.g. option 'backend' and :class:`pySPACEMulticoreBackend` or option 'local', the number of worker processes for subflow evaluation has to be specified. .. note:: When using the LocalBackend, there is also the possibility to specify the pool size of parallel executed processes, e.g. data sets. Your total number of cpu's should be pool size (pySPACE) + pool size (subflows). (*optional, default: 2*) :batch_size: If parallelization of subflow execution is done together with the :class:`~pySPACE.environments.backends.ll_backend.LoadLevelerBackend`, *batch_size* determines how many subflows are executed in one serial LoadLeveler job. This option is useful if execution of a single subflow is really short (range of seconds) since there is significant overhead in creating new jobs. (*optional, default: 1*) :Author: Anett Seeland (anett.seeland@dfki.de) :Created: 2012/09/04 :LastChange: 2012/11/06 batch_size option added """ def __init__(self, processing_modality='serial', pool_size=2, batch_size=1, **kwargs): self.modality = processing_modality self.pool_size = int(pool_size) self.batch_size = int(batch_size) # a flag to send pool_size / batch_size only once to the backend self.already_send = False self.backend_com = None self.backend_name = None # to indicate the end of a message received over a socket self.end_token = '!END!' if processing_modality not in ["serial", "local", "backend"]: import warnings warnings.warn("Processing modality not found! Serial mode is used!") self.modality = 'serial' @staticmethod def generate_subflow(flow_template, parametrization=None, flow_class=None): """ Return a *flow_class* object of the given *flow_template* This methods wraps two function calls (NodeChainFactory.instantiate and NodeChainFactory.flow_from_yaml. **Parameters** :flow_template: List of dicts - a valid representation of a node chain. Alternatively, a YAML-String representation could be used, which simplifies parameter replacement. :parametrization: A dictionary with parameter names as keys and exact one value for this parameter as value. Passed to NodeChainFactory.instantiate (*optional, default: None*) :flow_class: The flow class name of which an object should be returned (*optional, default: BenchmarkNodeChain*) """ if flow_class is None: flow_class = BenchmarkNodeChain flow_spec = NodeChainFactory.replace_parameters_in_node_chain( flow_template,parametrization) # create a new Benchmark flow flow = NodeChainFactory.flow_from_yaml(flow_class, flow_spec) return flow def execute_subflows(self, train_instances, subflows, run_numbers=None): """ Execute subflows and return result collection. **Parameters** :training_instances: List of training instances which should be used to execute *subflows*. :subflows: List of BenchmarkNodeChain objects. ..note:: Note that every subflow object is stored in memory! :run_numbers: All subflows will be executed with every run_number specified in this list. If None, the current self.run_number (from the node class) is used. (*optional, default: None*) """ if run_numbers == None: run_numbers = [self.run_number] # in case of serial backend, modality is mapped to serial # in the other case communication must be set up and # jobs need to be submitted to backend if self.modality == 'backend': self.backend_com = pySPACE.configuration.backend_com if not self.backend_com is None: # ask for backend_name # create a socket and keep it alive as long as possible since # handshaking costs really time client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client_socket.connect(self.backend_com) client_socket, self.backend_name = talk('name' + self.end_token, client_socket, self.backend_com) else: import warnings #necessary for serial backend! warnings.warn("Seems that no backend is used! Modality of subflow execution "\ "has to be specified! Assuming serial backend.") self.backend_name = 'serial' self._log("Preparing subflows for backend execution.") if self.backend_name in ['loadl','mcore'] : # we have to pickle training instances and store it on disk store_path = os.path.join(self.temp_dir, "sp%d" % self.current_split) create_directory(store_path) filename = os.path.join(store_path, "subflow_data.pickle") if not os.path.isfile(filename): cPickle.dump(train_instances, open(filename,'wb'), protocol=cPickle.HIGHEST_PROTOCOL) subflows_to_compute = [subflows[ind].id for ind in \ range(len(subflows))] if self.backend_name == 'loadl': # send batch_size to backend if not already done if not self.already_send: client_socket = inform("subflow_batchsize;%d%s" % \ (self.batch_size, self.end_token), client_socket, self.backend_com) self.already_send = True for subflow in subflows: cPickle.dump(subflow, open(os.path.join(store_path, subflow.id+".pickle"),"wb"), protocol=cPickle.HIGHEST_PROTOCOL) send_flows = subflows_to_compute else: # backend_name == mcore # send pool_size to backend if not already done if not self.already_send: client_socket = inform("subflow_poolsize;%d%s" % \ (self.pool_size, self.end_token), client_socket, self.backend_com) self.already_send = True # send flow objects via socket send_flows = [cPickle.dumps(subflow, cPickle.HIGHEST_PROTOCOL) \ for subflow in subflows] # inform backend client_socket,msg = talk('execute_subflows;%s;%d;%s;%s%s' % \ (store_path, len(subflows), str(send_flows), str(run_numbers), self.end_token), client_socket, self.backend_com) time.sleep(10) not_finished_subflows = set(subflows_to_compute) while len(not_finished_subflows) != 0: # ask backend for finished jobs client_socket, msg = talk('is_ready;%d;%s%s' % \ (len(not_finished_subflows), str(not_finished_subflows), self.end_token), client_socket, self.backend_com) # parse message finished_subflows = eval(msg) #should be a set # set difference not_finished_subflows -= finished_subflows time.sleep(10) if self.backend_name == 'loadl': # read results and delete store_dir result_pattern = os.path.join(store_path, '%s_result.pickle') result_collections = [cPickle.load(open(result_pattern % \ subflows[ind].id,'rb')) for ind in range(len(subflows))] # ..todo:: check if errors have occurred and if so do not delete! shutil.rmtree(store_path) else: # backend_name == mcore # ask backend to send results client_socket, msg = talk("send_results;%s!END!" % \ subflows_to_compute, client_socket, self.backend_com) # should be a list of collections results = eval(msg) result_collections = [cPickle.loads(result) for result in results] self._log("Finished subflow execution.") client_socket.shutdown(socket.SHUT_RDWR) client_socket.close() return result_collections elif self.backend_name == 'serial': # do the same as modality=='serial' self.modality = 'serial' else: # e.g. mpi backend : import warnings warnings.warn("Subflow Handling with %s backend not supported,"\ " serial-modality is used!" % self.backend_name) self.modality = 'serial' if self.modality == 'serial': # serial execution # .. note:: the here executed flows can not store anything. # meta data of result collection is NOT updated! results = [subflow(train_instances=train_instances, runs=run_numbers) for subflow in subflows] result_collections = [result[1] for result in results] return result_collections else: # modality local, e.g. usage without backend in application case self._log("Subflow Handler starts processes in pool.") pool = multiprocessing.Pool(processes=self.pool_size) results = [pool.apply_async(func=subflow, kwds={"train_instances": train_instances, "runs": run_numbers}) \ for subflow in subflows] pool.close() self._log("Waiting for parallel processes to finish.") pool.join() result_collections = [result.get()[1] for result in results] del pool return result_collections
import pandas as pd adv = pd.read_csv('Advertising.csv') tv_budget_x = adv.TV.tolist() print(tv_budget_x)
from django.contrib import admin
""" Created on Mon Jul 22 17:01:36 2019 @author: raf """ from pdb import set_trace as stop import copy import numpy as np from collections import OrderedDict import string as st import os import pandas as pd from vison.datamodel import cdp from vison.support import files from vison.fpa import fpa as fpamod from vison.metatests.metacal import MetaCal from vison.plot import plots_fpa as plfpa from vison.support import vcal, utils from vison.datamodel import core as vcore from vison.ogse import ogse from vison.inject import lib as ilib import matplotlib.cm as cm from matplotlib import pyplot as plt plt.switch_backend('TkAgg') from matplotlib.colors import Normalize cols2keep = [ 'test', 'sn_ccd1', 'sn_ccd2', 'sn_ccd3', 'sn_roe', 'sn_rpsu', 'exptime', 'vstart', 'vend', 'rdmode', 'flushes', 'siflsh', 'siflsh_p', 'swellw', 'swelldly', 'inisweep', 'cdpu_clk', 'chinj', 'chinj_on', 'chinj_of', 'id_wid', 'id_dly', 'chin_dly', 'v_tpump', 's_tpump', 'v_tp_mod', 's_tp_mod', 'v_tp_cnt', 's_tp_cnt', 'dwell_v', 'dwell_s', 'toi_fl', 'toi_tp', 'toi_ro', 'toi_ch', 'motr', 'motr_cnt', 'motr_siz', 'source', 'wave', 'mirr_on', 'mirr_pos', 'R1C1_TT', 'R1C1_TB', 'R1C2_TT', 'R1C2_TB', 'R1C3_TT', 'R1C3_TB', 'IDL', 'IDH', 'IG1_1_T', 'IG1_2_T', 'IG1_3_T', 'IG1_1_B', 'IG1_2_B', 'IG1_3_B', 'IG2_T', 'IG2_B', 'OD_1_T', 'OD_2_T', 'OD_3_T', 'OD_1_B', 'OD_2_B', 'OD_3_B', 'RD_T', 'RD_B', 'time', 'HK_CCD1_TEMP_T', 'HK_CCD2_TEMP_T', 'HK_CCD3_TEMP_T', 'HK_CCD1_TEMP_B', 'HK_CCD2_TEMP_B', 'HK_CCD3_TEMP_B', 'HK_CCD1_OD_T', 'HK_CCD2_OD_T', 'HK_CCD3_OD_T', 'HK_CCD1_OD_B', 'HK_CCD2_OD_B', 'HK_CCD3_OD_B', 'HK_COMM_RD_T', 'HK_COMM_RD_B', 'HK_CCD1_IG1_T', 'HK_CCD2_IG1_T', 'HK_CCD3_IG1_T', 'HK_CCD1_IG1_B', 'HK_CCD2_IG1_B', 'HK_CCD3_IG1_B', 'HK_COMM_IG2_T', 'HK_COMM_IG2_B', 'HK_FPGA_BIAS_ID2', 'HK_VID_PCB_TEMP_T', 'HK_VID_PCB_TEMP_B', 'HK_RPSU_TEMP1', 'HK_FPGA_PCB_TEMP_T', 'HK_FPGA_PCB_TEMP_B', 'HK_RPSU_TEMP_2', 'HK_RPSU_28V_PRI_I', 'chk_NPIXOFF', 'chk_NPIXSAT', 'offset_pre', 'offset_ove', 'std_pre', 'std_ove'] class MetaChinj01(MetaCal): """ """ def __init__(self, **kwargs): """ """ super(MetaChinj01, self).__init__(**kwargs) self.testnames = ['CHINJ01'] self.incols = cols2keep self.ParsedTable = OrderedDict() allgains = files.cPickleRead(kwargs['cdps']['gain']) self.cdps['GAIN'] = OrderedDict() for block in self.blocks: self.cdps['GAIN'][block] = allgains[block]['PTC01'].copy() self.products['METAFIT'] = OrderedDict() self.products['VERPROFILES'] = OrderedDict() self.products['HORPROFILES'] = OrderedDict() self.init_fignames() self.init_outcdpnames() def parse_single_test(self, jrep, block, testname, inventoryitem): """ """ NCCDs = len(self.CCDs) NQuads = len(self.Quads) session = inventoryitem['session'] CCDkeys = ['CCD%i' % CCD for CCD in self.CCDs] IndexS = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0])]) IndexCQ = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0]), vcore.vIndex('CCD', vals=self.CCDs), vcore.vIndex('Quad', vals=self.Quads)]) #idd = copy.deepcopy(inventoryitem['dd']) sidd = self.parse_single_test_gen(jrep, block, testname, inventoryitem) # TEST SCPECIFIC # TO BE ADDED: # OFFSETS: pre, img, ove # RON: pre, img, ove # REFERENCES TO PROFILES CHAMBER = sidd.meta['inputs']['CHAMBER'] CHAMBER_key = CHAMBER[0] chamber_v = np.array([CHAMBER_key]) sidd.addColumn(chamber_v, 'CHAMBERKEY', IndexS, ix=0) block_v = np.array([block]) sidd.addColumn(block_v, 'BLOCK', IndexS, ix=0) test_v = np.array([jrep + 1]) sidd.addColumn(test_v, 'REP', IndexS, ix=0) test_v = np.array([session]) sidd.addColumn(test_v, 'SESSION', IndexS, ix=0) test_v = np.array([testname]) sidd.addColumn(test_v, 'TEST', IndexS, ix=0) productspath = os.path.join(inventoryitem['resroot'], 'products') metafitcdp_pick = os.path.join(productspath, os.path.split(sidd.products['METAFIT_CDP'])[-1]) metafitcdp = files.cPickleRead(metafitcdp_pick) metafit = copy.deepcopy(metafitcdp['data']['ANALYSIS']) metafitkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['METAFIT'][metafitkey] = copy.deepcopy(metafit) metafitkey_v = np.array([metafitkey]) sidd.addColumn(metafitkey_v, 'METAFIT', IndexS, ix=0) metacdp_pick = os.path.join(productspath, os.path.split( sidd.products['META_CDP'])[-1]) # change to META_CDP metacdp = files.cPickleRead(metacdp_pick) meta = metacdp['data']['ANALYSIS'] # this is a pandas DataFrame tmp_v_CQ = np.zeros((1, NCCDs, NQuads)) bgd_adu_v = tmp_v_CQ.copy() ig1_thresh_v = tmp_v_CQ.copy() ig1_notch_v = tmp_v_CQ.copy() slope_v = tmp_v_CQ.copy() n_adu_v = tmp_v_CQ.copy() for iCCD, CCDk in enumerate(CCDkeys): for kQ, Q in enumerate(self.Quads): ixloc = np.where((meta['CCD'] == iCCD + 1) & (meta['Q'] == kQ + 1)) bgd_adu_v[0, iCCD, kQ] = meta['BGD_ADU'][ixloc[0][0]] ig1_thresh_v[0, iCCD, kQ] = meta['IG1_THRESH'][ixloc[0][0]] ig1_notch_v[0, iCCD, kQ] = meta['IG1_NOTCH'][ixloc[0][0]] slope_v[0, iCCD, kQ] = meta['S'][ixloc[0][0]] n_adu_v[0, iCCD, kQ] = meta['N_ADU'][ixloc[0][0]] sidd.addColumn(bgd_adu_v, 'FIT_BGD_ADU', IndexCQ) sidd.addColumn(ig1_thresh_v, 'FIT_IG1_THRESH', IndexCQ) sidd.addColumn(ig1_notch_v, 'FIT_IG1_NOTCH', IndexCQ) sidd.addColumn(slope_v, 'FIT_SLOPE', IndexCQ) sidd.addColumn(n_adu_v, 'FIT_N_ADU', IndexCQ) # charge injection profiles verprofspick = os.path.join(productspath, os.path.split(sidd.products['PROFS_ALCOL'])[-1]) verprofs = files.cPickleRead(verprofspick) vprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['VERPROFILES'][vprofkey] = verprofs.copy() vprofskeys_v = np.zeros((1),dtype='U50') vprofskeys_v[0] = vprofkey sidd.addColumn(vprofskeys_v, 'VERPROFS_KEY', IndexS) horprofspick = os.path.join(productspath, os.path.split(sidd.products['PROFS_ALROW'])[-1]) horprofs = files.cPickleRead(horprofspick) hprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1) self.products['HORPROFILES'][hprofkey] = horprofs.copy() hprofskeys_v = np.zeros((1),dtype='U50') hprofskeys_v[0] = hprofkey sidd.addColumn(hprofskeys_v, 'HORPROFS_KEY', IndexS) # flatten sidd to table sit = sidd.flattentoTable() return sit def _get_extractor_NOTCH_fromPT(self, units): """ """ def _extract_NOTCH_fromPT(PT, block, CCDk, Q): ixblock = self.get_ixblock(PT, block) column = 'FIT_N_ADU_%s_Quad%s' % (CCDk, Q) if units == 'ADU': unitsConvFactor = 1 elif units == 'E': unitsConvFactor = self.cdps['GAIN'][block][CCDk][Q][0] Notch = np.nanmedian(PT[column][ixblock]) * unitsConvFactor return Notch return _extract_NOTCH_fromPT def _get_injcurve(self, _chfitdf, ixCCD, ixQ, IG1raw, gain): """ """ ixsel = np.where((_chfitdf['CCD'] == ixCCD) & (_chfitdf['Q'] == ixQ)) pars = ['BGD', 'K', 'XT', 'XN', 'A', 'N'] trans = dict(BGD='b', K='k', XT='xt', XN='xN', A='a', N='N') parsdict = dict() for par in pars: parsdict[trans[par]] = _chfitdf[par].values[ixsel][0] parsdict['IG1'] = IG1raw.copy() inj = ilib.f_Inj_vs_IG1_ReLU(**parsdict) * 2.**16 # ADU inj_kel = inj * gain / 1.E3 return inj_kel def _get_CHIG1_MAP_from_PT(self, kind='CAL'): """ """ CHIG1MAP = OrderedDict() CHIG1MAP['labelkeys'] = self.Quads PT = self.ParsedTable['CHINJ01'] column = 'METAFIT' IG1s = [2.5, 6.75] dIG1 = 0.05 NIG1 = (IG1s[1] - IG1s[0]) / dIG1 + 1 IG1raw = np.arange(NIG1) * dIG1 + IG1s[0] for jY in range(self.NSLICES_FPA): for iX in range(self.NCOLS_FPA): Ckey = 'C_%i%i' % (jY + 1, iX + 1) CHIG1MAP[Ckey] = OrderedDict() locator = self.fpa.FPA_MAP[Ckey] block = locator[0] CCDk = locator[1] jCCD = int(CCDk[-1]) ixblock = np.where(PT['BLOCK'] == block) if len(ixblock[0]) == 0: CHIG1MAP[Ckey] = OrderedDict(x=OrderedDict(), y=OrderedDict()) for Q in self.Quads: CHIG1MAP[Ckey]['x'][Q] = [] CHIG1MAP[Ckey]['y'][Q] = [] continue _chkey = PT[column][ixblock][0] _chfitdf = self.products['METAFIT'][_chkey] _ccd_chfitdict = OrderedDict(x=OrderedDict(), y=OrderedDict()) for kQ, Q in enumerate(self.Quads): roeVCal = self.roeVCals[block] IG1cal = roeVCal.fcal_HK(IG1raw, 'IG1', jCCD, Q) gain = self.cdps['GAIN'][block][CCDk][Q][0] inj_kel = self._get_injcurve(_chfitdf, jCCD, kQ + 1, IG1raw, gain) if kind == 'CAL': _IG1 = IG1cal.copy() elif kind == 'RAW': _IG1 = IG1raw.copy() _ccd_chfitdict['x'][Q] = _IG1.copy() _ccd_chfitdict['y'][Q] = inj_kel.copy() CHIG1MAP[Ckey] = _ccd_chfitdict.copy() return CHIG1MAP def _get_XYdict_INJ(self, kind='CAL'): x = dict() y = dict() PT = self.ParsedTable['CHINJ01'] column = 'METAFIT' IG1s = [2.5, 6.75] dIG1 = 0.05 NIG1 = (IG1s[1] - IG1s[0]) / dIG1 + 1 IG1raw = np.arange(NIG1) * dIG1 + IG1s[0] labelkeys = [] for block in self.flight_blocks: ixblock = np.where(PT['BLOCK'] == block) ch_key = PT[column][ixblock][0] chfitdf = self.products['METAFIT'][ch_key] for iCCD, CCD in enumerate(self.CCDs): CCDk = 'CCD%i' % CCD for kQ, Q in enumerate(self.Quads): roeVCal = self.roeVCals[block] IG1cal = roeVCal.fcal_HK(IG1raw, 'IG1', iCCD + 1, Q) gain = self.cdps['GAIN'][block][CCDk][Q][0] if kind == 'CAL': _IG1 = IG1cal.copy() elif kind == 'RAW': _IG1 = IG1raw.copy() pkey = '%s_%s_%s' % (block, CCDk, Q) inj_kel = self._get_injcurve(chfitdf, iCCD + 1, kQ + 1, IG1raw, gain) x[pkey] = _IG1.copy() y[pkey] = inj_kel.copy() labelkeys.append(pkey) CHdict = dict(x=x, y=y, labelkeys=labelkeys) return CHdict def _extract_INJCURVES_PAR_fromPT(self,PT,block,CCDk,Q): """ """ ixblock = self.get_ixblock(PT,block) column = 'METAFIT' ch_key = PT[column][ixblock][0] chfitdf = self.products['METAFIT'][ch_key] ixCCD = ['CCD1','CCD2','CCD3'].index(CCDk)+1 ixQ = ['E','F','G','H'].index(Q)+1 ixsel = np.where((chfitdf['CCD'] == ixCCD) & (chfitdf['Q'] == ixQ)) pars = ['BGD', 'K', 'XT', 'XN', 'A', 'N'] trans = dict(BGD='b', K='k', XT='xt', XN='xN', A='a', N='N') parsdict = dict() for par in pars: parsdict[trans[par]] = '%.3e' % chfitdf[par].values[ixsel][0] return parsdict def _get_XYdict_PROFS(self,proftype, IG1=4.5, Quads=None, doNorm=False, xrangeNorm=None): """ """ if Quads is None: Quads = self.Quads x = dict() y = dict() labelkeys = [] PT = self.ParsedTable['CHINJ01'] profcol = '%sPROFS_KEY' % proftype.upper() prodkey = '%sPROFILES' % proftype.upper() for block in self.flight_blocks: ixsel = np.where(PT['BLOCK'] == block) prof_key = PT[profcol][ixsel][0] i_Prof = self.products[prodkey][prof_key].copy() IG1key = 'IG1_%.2fV' % IG1 for iCCD, CCD in enumerate(self.CCDs): CCDk = 'CCD%i' % CCD for kQ, Q in enumerate(Quads): pkey = '%s_%s_%s' % (block, CCDk, Q) _pcq = i_Prof['data'][CCDk][Q].copy() _x = _pcq['x'][IG1key].copy() _y = _pcq['y'][IG1key].copy() x[pkey] = _x if doNorm: if xrangeNorm is not None: norm = np.nanmedian(_y[xrangeNorm[0]:xrangeNorm[1]]) else: norm = np.nanmedian(_y) y[pkey] = _y / norm labelkeys.append(pkey) Pdict = dict(x=x,y=y,labelkeys=labelkeys) return Pdict def init_fignames(self): """ """ if not os.path.exists(self.figspath): os.system('mkdir %s' % self.figspath) self.figs['NOTCH_ADU_MAP'] = os.path.join(self.figspath, 'NOTCH_ADU_MAP.png') self.figs['NOTCH_ELE_MAP'] = os.path.join(self.figspath, 'NOTCH_ELE_MAP.png') self.figs['CHINJ01_curves_IG1_RAW'] = os.path.join(self.figspath, 'CHINJ01_CURVES_IG1_RAW.png') self.figs['CHINJ01_curves_IG1_CAL'] = os.path.join(self.figspath, 'CHINJ01_CURVES_IG1_CAL.png') self.figs['CHINJ01_curves_MAP_IG1_CAL'] = os.path.join(self.figspath, 'CHINJ01_CURVES_MAP_IG1_CAL.png') for proftype in ['ver','hor']: for ccdhalf in ['top','bot']: figkey = 'PROFS_%s_%s' % (proftype.upper(),ccdhalf.upper()) self.figs[figkey] = os.path.join(self.figspath, 'CHINJ01_%s_%s_PROFILES.png' % \ (proftype.upper(),ccdhalf.upper())) for ccdhalf in ['top','bot']: figkey = 'PROFS_ver_%s_ZOOM' % (ccdhalf.upper(),) self.figs[figkey] = os.path.join(self.figspath, 'CHINJ01_ver_%s_ZOOM_PROFILES.png' % \ (ccdhalf.upper()),) def init_outcdpnames(self): if not os.path.exists(self.cdpspath): os.system('mkdir %s' % self.cdpspath) self.outcdps['INJCURVES'] = 'CHINJ01_INJCURVES_PAR.json' self.outcdps['INJPROF_XLSX_HOR'] = 'CHINJ01_INJPROFILES_HOR.xlsx' self.outcdps['INJPROF_XLSX_VER'] = 'CHINJ01_INJPROFILES_VER.xlsx' self.outcdps['INJPROF_FITS_HOR'] = 'CHINJ01_INJPROFILES_HOR.fits' self.outcdps['INJPROF_FITS_VER'] = 'CHINJ01_INJPROFILES_VER.fits' def _extract_NUNHOR_fromPT(self, PT, block, CCDk, Q): """ """ IG1 = 4.5 ixblock = self.get_ixblock(PT, block) profcol = 'HORPROFS_KEY' prodkey = 'HORPROFILES' prof_key = PT[profcol][ixblock][0] i_Prof = self.products[prodkey][prof_key].copy() IG1key = 'IG1_%.2fV' % IG1 _pcq = i_Prof['data'][CCDk][Q].copy() _y = _pcq['y'][IG1key].copy() return np.nanstd(_y)/np.nanmean(_y)*100. def _get_injprof_dfdict(self, direction, pandice=False): """ """ injprofs = OrderedDict() Quads = self.Quads PT = self.ParsedTable['CHINJ01'] profcol = '{}PROFS_KEY'.format(direction.upper()) prodkey = '{}PROFILES'.format(direction.upper()) for ib, block in enumerate(self.flight_blocks): injprofs[block] = OrderedDict() ixsel = np.where(PT['BLOCK'] == block) prof_key = PT[profcol][ixsel][0] i_Prof = self.products[prodkey][prof_key].copy() if ib==0: rawIG1keys = list(i_Prof['data']['CCD1']['E']['x'].keys()) IG1values = [float(item.replace('IG1_','').replace('V','')) for item in rawIG1keys] _order = np.argsort(IG1values) IG1keys = np.array(rawIG1keys)[_order].tolist() IG1values = np.array(IG1values)[_order].tolist() for IG1key in IG1keys: for iCCD, CCD in enumerate(self.CCDs): CCDk = 'CCD%i' % CCD Ckey = self.fpa.get_Ckey_from_BlockCCD(block, CCD) for kQ, Q in enumerate(Quads): _pcq = i_Prof['data'][CCDk][Q].copy() _x = _pcq['x'][IG1key].copy() _y = _pcq['y'][IG1key].copy() #_y /= np.nanmedian(_y) if iCCD==0 and kQ==0: injprofs[block]['pixel'] = _x.copy() injprofs[block]['%s_%s_%s' % (Ckey,Q,IG1key)] = _y.copy() if pandice: for block in self.flight_blocks: injprofs[block] = pd.DataFrame.from_dict(injprofs[block]) return injprofs, IG1values def get_injprof_xlsx_cdp(self, direction, inCDP_header=None): """ """ CDP_header = OrderedDict() if CDP_header is not None: CDP_header.update(inCDP_header) cdpname = self.outcdps['INJPROF_XLSX_%s' % direction.upper()] path = self.cdpspath injprof_cdp = cdp.Tables_CDP() injprof_cdp.rootname = os.path.splitext(cdpname)[0] injprof_cdp.path = path injprofs_meta = OrderedDict() injprofs, IG1values = self._get_injprof_dfdict(direction, pandice=True) injprofs_meta['IG1'] = IG1values.__repr__() #injprofs_meta['norm'] = 'median' injprof_cdp.ingest_inputs(data=injprofs.copy(), meta=injprofs_meta.copy(), header=CDP_header.copy()) injprof_cdp.init_wb_and_fillAll( header_title='CHINJ01: INJPROFS-%s' % direction.upper()) return injprof_cdp def get_injprof_fits_cdp(self, direction, inCDP_header=None): """ """ CDP_header = OrderedDict() if inCDP_header is not None: CDP_header.update(inCDP_header) cdpname = self.outcdps['INJPROF_FITS_%s' % direction.upper()] path = self.cdpspath injprof_cdp = cdp.FitsTables_CDP() injprof_cdp.rootname = os.path.splitext(cdpname)[0] injprof_cdp.path = path injprofs_meta = OrderedDict() injprofs, IG1values = self._get_injprof_dfdict(direction, pandice=False) injprofs_meta['IG1'] = IG1values.__repr__() #injprofs_meta['norm'] = 'median' CDP_header = self.FITSify_CDP_header(CDP_header) injprof_cdp.ingest_inputs(data=injprofs.copy(), meta=injprofs_meta.copy(), header=CDP_header.copy()) injprof_cdp.init_HL_and_fillAll() injprof_cdp.hdulist[0].header.insert(list(CDP_header.keys())[0], ('title', 'CHINJ01: INJPROFS-%s' % direction.upper())) return injprof_cdp def dump_aggregated_results(self): """ """ if self.report is not None: self.report.add_Section(keyword='dump', Title='Aggregated Results', level=0) self.add_DataAlbaran2Report() function, module = utils.get_function_module() CDP_header = self.CDP_header.copy() CDP_header.update(dict(function=function, module=module)) CDP_header['DATE'] = self.get_time_tag() # Histogram of Slopes [ADU/electrons] # Histogram of Notch [ADU/electrons] # Histogram of IG1_THRESH # Injection level vs. Calibrated IG1, MAP CURVES_IG1CAL_MAP = self._get_CHIG1_MAP_from_PT(kind='CAL') figkey1 = 'CHINJ01_curves_MAP_IG1_CAL' figname1 = self.figs[figkey1] self.plot_XYMAP(CURVES_IG1CAL_MAP, **dict( suptitle='Charge Injection Curves - Calibrated IG1', doLegend=True, ylabel='Inj [kel]', xlabel='IG1 [V]', corekwargs=dict(E=dict(linestyle='-', marker='', color='r'), F=dict(linestyle='-', marker='', color='g'), G=dict(linestyle='-', marker='', color='b'), H=dict(linestyle='-', marker='', color='m')), figname=figname1 )) if self.report is not None: self.addFigure2Report(figname1, figkey=figkey1, caption='CHINJ01: Charge injection level [ke-] as a function of '+\ 'calibrated IG1 voltage.', texfraction=0.7) # saving charge injection parameters to a json CDP ICURVES_PAR_MAP = self.get_FPAMAP_from_PT( self.ParsedTable['CHINJ01'], extractor=self._extract_INJCURVES_PAR_fromPT) ic_header = OrderedDict() ic_header['title'] = 'Injection Curves Parameters' ic_header['test'] = 'CHINJ01' ic_header.update(CDP_header) ic_meta = OrderedDict() ic_meta['units'] ='/2^16 ADU', ic_meta['model'] = 'I=b+1/(1+exp(-K(IG1-XT))) * (-A*(IG1-XN)[IG1<XN] + N)' ic_meta['structure'] = '' ic_cdp = cdp.Json_CDP(rootname=self.outcdps['INJCURVES'], path=self.cdpspath) ic_cdp.ingest_inputs(data=ICURVES_PAR_MAP, header = ic_header, meta=ic_meta) ic_cdp.savehardcopy() # Injection level vs. Calibrated IG1, single plot IG1CAL_Singledict = self._get_XYdict_INJ(kind='CAL') figkey2 = 'CHINJ01_curves_IG1_CAL' figname2 = self.figs[figkey2] IG1CAL_kwargs = dict( title='Charge Injection Curves - Calibrated IG1', doLegend=False, xlabel='IG1 (Calibrated) [V]', ylabel='Injection [kel]', figname=figname2) corekwargs = dict() for block in self.flight_blocks: for iCCD in self.CCDs: corekwargs['%s_CCD%i_E' % (block, iCCD)] = dict(linestyle='-', marker='', color='#FF4600') # red corekwargs['%s_CCD%i_F' % (block, iCCD)] = dict(linestyle='-', marker='', color='#61FF00') # green corekwargs['%s_CCD%i_G' % (block, iCCD)] = dict(linestyle='-', marker='', color='#00FFE0') # cyan corekwargs['%s_CCD%i_H' % (block, iCCD)] = dict(linestyle='-', marker='', color='#1700FF') # blue IG1CAL_kwargs['corekwargs'] = corekwargs.copy() self.plot_XY(IG1CAL_Singledict, **IG1CAL_kwargs) if self.report is not None: self.addFigure2Report(figname2, figkey=figkey2, caption='CHINJ01: Charge injection level [ke-] as a function of '+\ 'calibrated IG1 voltage.', texfraction=0.7) # Injection level vs. Non-Calibrated IG1, single plot IG1RAW_Singledict = self._get_XYdict_INJ(kind='RAW') figkey3 = 'CHINJ01_curves_IG1_RAW' figname3 = self.figs[figkey3] IG1RAW_kwargs = dict( title='Charge Injection Curves - RAW IG1', doLegend=False, xlabel='IG1 (RAW) [V]', ylabel='Injection [kel]', figname=figname3) corekwargs = dict() for block in self.flight_blocks: for iCCD in self.CCDs: corekwargs['%s_CCD%i_E' % (block, iCCD)] = dict(linestyle='-', marker='', color='#FF4600') # red corekwargs['%s_CCD%i_F' % (block, iCCD)] = dict(linestyle='-', marker='', color='#61FF00') # green corekwargs['%s_CCD%i_G' % (block, iCCD)] = dict(linestyle='-', marker='', color='#00FFE0') # cyan corekwargs['%s_CCD%i_H' % (block, iCCD)] = dict(linestyle='-', marker='', color='#1700FF') # blue IG1RAW_kwargs['corekwargs'] = corekwargs.copy() self.plot_XY(IG1RAW_Singledict, **IG1RAW_kwargs) if self.report is not None: self.addFigure2Report(figname3, figkey=figkey3, caption='CHINJ01: Charge injection level [ke-] as a function of '+\ 'Non-calibrated IG1 voltage.', texfraction=0.7) # Notch level vs. calibrated IG2 # Notch level vs. calibrated IDL # Notch level vs. calibrated OD # Notch injection map, ADUs NOTCHADUMAP = self.get_FPAMAP_from_PT( self.ParsedTable['CHINJ01'], extractor=self._get_extractor_NOTCH_fromPT( units='ADU')) figkey4 = 'NOTCH_ADU_MAP' figname4 = self.figs[figkey4] self.plot_SimpleMAP(NOTCHADUMAP, **dict( suptitle='CHINJ01: NOTCH INJECTION [ADU]', ColorbarText='ADU', figname=figname4)) if self.report is not None: self.addFigure2Report(figname4, figkey=figkey4, caption='CHINJ01: notch injection level, in ADU.', texfraction=0.7) # Notch injection map, ELECTRONs NOTCHEMAP = self.get_FPAMAP_from_PT(self.ParsedTable['CHINJ01'], extractor=self._get_extractor_NOTCH_fromPT(units='E')) figkey5 = 'NOTCH_ELE_MAP' figname5 = self.figs[figkey5] self.plot_SimpleMAP(NOTCHEMAP, **dict( suptitle='CHINJ01: NOTCH INJECTION [ELECTRONS]', ColorbarText='electrons', figname=figname5)) if self.report is not None: self.addFigure2Report(figname5, figkey=figkey5, caption='CHINJ01: notch injection level, in electrons.', texfraction=0.7) # Average injection profiles IG1profs = 4.5 xlabels_profs = dict(hor='column [pix]', ver='row [pix]') ylabels_profs = dict(hor='Injection level [Normalized]', ver='Injection level [ADU]',) proftypes = ['hor','ver'] ccdhalves = ['top','bot'] BLOCKcolors = cm.rainbow(np.linspace(0, 1, len(self.flight_blocks))) pointcorekwargs = dict() for jblock, block in enumerate(self.flight_blocks): jcolor = BLOCKcolors[jblock] for iCCD in self.CCDs: for kQ in self.Quads: pointcorekwargs['%s_CCD%i_%s' % (block, iCCD, kQ)] = dict( linestyle='', marker='.', color=jcolor, ms=2.0) for ccdhalf in ccdhalves: if ccdhalf == 'top': _Quads = ['G','H'] elif ccdhalf == 'bot': _Quads = ['E','F'] for proftype in proftypes: if proftype == 'hor': xrangeNorm = None elif proftype == 'ver': xrangeNorm = [10,20] XY_profs = self._get_XYdict_PROFS(proftype=proftype, IG1=IG1profs,Quads=_Quads, doNorm=True, xrangeNorm=xrangeNorm) figkey6 = 'PROFS_%s_%s' % (proftype.upper(),ccdhalf.upper()) figname6 = self.figs[figkey6] title = 'CHINJ01: Direction: %s, CCDHalf: %s' % \ (proftype.upper(),ccdhalf.upper()), if proftype == 'ver': xlim=[0,50] ylim=None elif proftype == 'hor': xlim=None ylim=[0.5,1.5] profkwargs = dict( title=title, doLegend=False, xlabel=xlabels_profs[proftype], xlim=xlim, ylim=ylim, ylabel=ylabels_profs[proftype], figname=figname6, corekwargs=pointcorekwargs) self.plot_XY(XY_profs, **profkwargs) if proftype == 'ver': captemp = 'CHINJ01: Average (normalized) injection profiles in vertical direction (along CCD columns) '+\ 'for IG1=%.2fV. Only the 2 channels in the CCD %s-half are shown '+\ '(%s, %s). Each colour corresponds to a '+\ 'different block (2x3 quadrant-channels in each colour).' elif proftype == 'hor': captemp = 'CHINJ01: Average injection profiles in horizontal direction (along CCD rows) '+\ 'for IG1=%.2fV. The profiles have been normalized by the median injection level. '+\ 'Only the 2 channels in the CCD %s-half are shown (%s, %s). Each colour corresponds to a '+\ 'different block (2x3 quadrant-channels in each colour).' if self.report is not None: self.addFigure2Report(figname6, figkey=figkey6, caption= captemp % (IG1profs, ccdhalf, _Quads[0],_Quads[1]), texfraction=0.7) # Average injection vertical profiles, zoomed in to highlight # non-perfect charge injection shut-down. pointcorekwargs = dict() for jblock, block in enumerate(self.flight_blocks): jcolor = BLOCKcolors[jblock] for iCCD in self.CCDs: for kQ in self.Quads: pointcorekwargs['%s_CCD%i_%s' % (block, iCCD, kQ)] = dict( linestyle='', marker='.', color=jcolor, ms=2.0) for ccdhalf in ccdhalves: if ccdhalf == 'top': _Quads = ['G','H'] elif ccdhalf == 'bot': _Quads = ['E','F'] XY_profs = self._get_XYdict_PROFS(proftype='ver', IG1=IG1profs,Quads=_Quads, doNorm=True, xrangeNorm=[10,20]) figkey7 = 'PROFS_ver_%s_ZOOM' % (ccdhalf.upper(),) figname7 = self.figs[figkey7] title = 'CHINJ01: Direction: ver, CCDHalf: %s, ZOOM-in' % \ (ccdhalf.upper(),), xlim=[25,50] ylim=[0,4.e-3] profkwargs = dict( title=title, doLegend=False, xlabel=xlabels_profs[proftype], xlim=xlim, ylim=ylim, ylabel=ylabels_profs[proftype], figname=figname7, corekwargs=pointcorekwargs) self.plot_XY(XY_profs, **profkwargs) captemp = 'CHINJ01: Average injection profiles in vertical direction (along CCD columns) '+\ 'for IG1=%.2fV. Only the 2 channels in the CCD %s-half are shown '+\ '(%s, %s). Each colour corresponds to a '+\ 'different block (2x3 quadrant-channels in each colour). Zoomed in '+\ 'to highlight injection shutdown profile.' if self.report is not None: self.addFigure2Report(figname7, figkey=figkey7, caption= captemp % (IG1profs, ccdhalf, _Quads[0],_Quads[1]), texfraction=0.7) # creating and saving INJ PROFILES CDPs. for direction in ['hor','ver']: _injprof_xlsx_cdp = self.get_injprof_xlsx_cdp(direction=direction, inCDP_header=CDP_header) _injprof_xlsx_cdp.savehardcopy() _injprof_fits_cdp = self.get_injprof_fits_cdp(direction=direction, inCDP_header=CDP_header) _injprof_fits_cdp.savehardcopy() # reporting non-uniformity of injection lines to report if self.report is not None: NUN_HOR = self.get_FPAMAP_from_PT(self.ParsedTable['CHINJ01'], extractor=self._extract_NUNHOR_fromPT) nun_cdpdict = dict( caption='CHINJ01: Non-Uniformity of the injection lines, rms, as percentage.', valformat='%.2f') ignore = self.add_StdQuadsTable2Report( Matrix = NUN_HOR, cdpdict = nun_cdpdict)
""" MagPy IAGA02 input filter Written by Roman Leonhardt June 2012 - contains test, read and write function """ from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from __future__ import division from io import open from magpy.stream import * MISSING_DATA = 99999 NOT_REPORTED = 88888 def isIAGA(filename): """ Checks whether a file is ASCII IAGA 2002 format. """ try: temp = open(filename, 'rt').readline() except: return False try: if not temp.startswith(' Format'): return False if not 'IAGA-2002' in temp: return False except: return False return True def readIAGA(filename, headonly=False, **kwargs): """ Reading IAGA2002 format data. """ starttime = kwargs.get('starttime') endtime = kwargs.get('endtime') debug = kwargs.get('debug') getfile = True array = [[] for key in KEYLIST] fh = open(filename, 'rt') # read file and split text into channels stream = DataStream() # Check whether header infromation is already present headers = {} data = [] key = None try: # get day from filename (platform independent) theday = extractDateFromString(filename)[0] day = datetime.strftime(theday,"%Y-%m-%d") # Select only files within eventually defined time range if starttime: if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False if endtime: if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'): getfile = False except: logging.warning("Could not identify typical IAGA date for %s. Reading all ...".format(filename)) getfile = True if getfile: loggerlib.info('Read: %s Format: %s ' % (filename, "IAGA2002")) dfpos = KEYLIST.index('df') for line in fh: if line.isspace(): # blank line continue elif line.startswith(' '): # data info infoline = line[:-4] key = infoline[:23].strip() val = infoline[23:].strip() if key.find('Source') > -1: if not val == '': stream.header['StationInstitution'] = val if key.find('Station') > -1: if not val == '': stream.header['StationName'] = val if key.find('IAGA') > -1: if not val == '': stream.header['StationIAGAcode'] = val stream.header['StationID'] = val if key.find('Latitude') > -1: if not val == '': stream.header['DataAcquisitionLatitude'] = val if key.find('Longitude') > -1: if not val == '': stream.header['DataAcquisitionLongitude'] = val if key.find('Elevation') > -1: if not val == '': stream.header['DataElevation'] = val if key.find('Format') > -1: if not val == '': stream.header['DataFormat'] = val if key.find('Reported') > -1: if not val == '': stream.header['DataComponents'] = val if key.find('Orientation') > -1: if not val == '': stream.header['DataSensorOrientation'] = val if key.find('Digital') > -1: if not val == '': stream.header['DataDigitalSampling'] = val if key.find('Interval') > -1: if not val == '': stream.header['DataSamplingFilter'] = val if key.startswith(' #'): if key.find('# V-Instrument') > -1: if not val == '': stream.header['SensorID'] = val elif key.find('# PublicationDate') > -1: if not val == '': stream.header['DataPublicationDate'] = val else: print ("formatIAGA: did not import optional header info {a}".format(a=key)) if key.find('Data Type') > -1: if not val == '': if val[0] in ['d','D']: stream.header['DataPublicationLevel'] = '4' elif val[0] in ['q','Q']: stream.header['DataPublicationLevel'] = '3' elif val[0] in ['p','P']: stream.header['DataPublicationLevel'] = '2' else: stream.header['DataPublicationLevel'] = '1' if key.find('Publication Date') > -1: if not val == '': stream.header['DataPublicationDate'] = val elif line.startswith('DATE'): # data header colsstr = line.lower().split() varstr = '' for it, elem in enumerate(colsstr): if it > 2: varstr += elem[-1] varstr = varstr[:4] stream.header["col-x"] = varstr[0].upper() stream.header["col-y"] = varstr[1].upper() stream.header["col-z"] = varstr[2].upper() stream.header["unit-col-x"] = 'nT' stream.header["unit-col-y"] = 'nT' stream.header["unit-col-z"] = 'nT' stream.header["unit-col-f"] = 'nT' if varstr.endswith('g'): stream.header["unit-col-df"] = 'nT' stream.header["col-df"] = 'G' stream.header["col-f"] = 'F' else: stream.header["col-f"] = 'F' if varstr in ['dhzf','dhzg']: #stream.header["col-x"] = 'H' #stream.header["col-y"] = 'D' #stream.header["col-z"] = 'Z' stream.header["unit-col-y"] = 'deg' stream.header['DataComponents'] = 'HDZF' elif varstr in ['ehzf','ehzg']: #stream.header["col-x"] = 'H' #stream.header["col-y"] = 'E' #stream.header["col-z"] = 'Z' stream.header['DataComponents'] = 'HEZF' elif varstr in ['dhif','dhig']: stream.header["col-x"] = 'I' stream.header["col-y"] = 'D' stream.header["col-z"] = 'F' stream.header["unit-col-x"] = 'deg' stream.header["unit-col-y"] = 'deg' stream.header['DataComponents'] = 'IDFF' elif varstr in ['hdzf','hdzg']: #stream.header["col-x"] = 'H' #stream.header["col-y"] = 'D' stream.header["unit-col-y"] = 'deg' #stream.header["col-z"] = 'Z' stream.header['DataComponents'] = 'HDZF' else: #stream.header["col-x"] = 'X' #stream.header["col-y"] = 'Y' #stream.header["col-z"] = 'Z' stream.header['DataComponents'] = 'XYZF' elif headonly: # skip data for option headonly continue elif line.startswith('%'): pass else: # data entry - may be written in multiple columns # row beinhaltet die Werte eine Zeile # transl. row values contains a line row=[] # Verwende das letzte Zeichen von "line" nicht, d.h. line[:-1], # da darin der Zeilenumbruch "\n" steht # transl. Do not use the last character of "line", d.h. line [:-1], # since this is the line break "\n" for val in line[:-1].split(): # nur nicht-leere Spalten hinzufuegen # transl. Just add non-empty columns if val.strip()!="": row.append(val.strip()) # Baue zweidimensionales Array auf # transl. Build two-dimensional array array[0].append( date2num(datetime.strptime(row[0]+'-'+row[1],"%Y-%m-%d-%H:%M:%S.%f")) ) if float(row[3]) >= NOT_REPORTED: row[3] = np.nan if float(row[4]) >= NOT_REPORTED: row[4] = np.nan if float(row[5]) >= NOT_REPORTED: row[5] = np.nan if varstr in ['dhzf','dhzg']: array[1].append( float(row[4]) ) array[2].append( float(row[3])/60.0 ) array[3].append( float(row[5]) ) elif varstr in ['ehzf','ehzg']: array[1].append( float(row[4]) ) array[2].append( float(row[3]) ) array[3].append( float(row[5]) ) elif varstr in ['dhif','dhig']: array[1].append( float(row[5])/60.0 ) array[2].append( float(row[3])/60.0 ) array[3].append( float(row[6]) ) elif varstr in ['hdzf','hdzg']: array[1].append( float(row[3]) ) array[2].append( float(row[4])/60.0 ) array[3].append( float(row[5]) ) else: array[1].append( float(row[3]) ) array[2].append( float(row[4]) ) array[3].append( float(row[5]) ) try: if float(row[6]) < NOT_REPORTED: if varstr[-1]=='f': array[4].append(float(elem[6])) elif varstr[-1]=='g' and varstr=='xyzg': array[4].append(np.sqrt(float(row[3])**2+float(row[4])**2+float(row[5])**2) - float(row[6])) array[dfpos].append(float(row[6])) elif varstr[-1]=='g' and varstr in ['hdzg','dhzg','ehzg']: array[4].append(np.sqrt(float(row[3])**2+float(row[5])**2) - float(row[6])) array[dfpos].append(float(row[6])) elif varstr[-1]=='g' and varstr in ['dhig']: array[4].append(float(row[6])) array[dfpos].append(float(row[6])) else: raise ValueError else: array[4].append(float('nan')) except: if not float(row[6]) >= NOT_REPORTED: array[4].append(float(row[6])) else: array[4].append(float('nan')) #data.append(row) fh.close() for idx, elem in enumerate(array): array[idx] = np.asarray(array[idx]) stream = DataStream([LineStruct()],stream.header,np.asarray(array)) sr = stream.samplingrate() return stream def writeIAGA(datastream, filename, **kwargs): """ Writing IAGA2002 format data. """ mode = kwargs.get('mode') useg = kwargs.get('useg') def OpenFile(filename, mode='w'): if sys.version_info >= (3,0,0): f = open(filename, mode, newline='') else: f = open(filename, mode+'b') return f if os.path.isfile(filename): if mode == 'skip': # skip existing inputs exst = read(path_or_url=filename) datastream = mergeStreams(exst,datastream,extend=True) myFile= OpenFile(filename) elif mode == 'replace': # replace existing inputs exst = read(path_or_url=filename) datastream = mergeStreams(datastream,exst,extend=True) myFile= OpenFile(filename) elif mode == 'append': myFile= OpenFile(filename,mode='a') else: # overwrite mode #os.remove(filename) ?? necessary ?? myFile= OpenFile(filename) else: myFile= OpenFile(filename) header = datastream.header datacomp = header.get('DataComponents'," ") if datacomp in ['hez','HEZ','hezf','HEZF','hezg','HEZG']: order = [1,0,2] datacomp = 'EHZ' elif datacomp in ['hdz','HDZ','hdzf','HDZF','hdzg','HDZG']: order = [1,0,2] datacomp = 'DHZ' elif datacomp in ['idf','IDF','idff','IDFF','idfg','IDFG']: order = [1,3,0] datacomp = 'DHI' elif datacomp in ['xyz','XYZ','xyzf','XYZF','xyzg','XYZG']: order = [0,1,2] datacomp = 'XYZ' elif datacomp in ['ehz','EHZ','ehzf','EHZF','ehzg','EHZG']: order = [0,1,2] datacomp = 'EHZ' elif datacomp in ['dhz','DHZ','dhzf','DHZF','dhzg','DHZG']: order = [0,1,2] datacomp = 'DHZ' elif datacomp in ['dhi','DHI','dhif','DHIF','dhig','DHIG']: order = [0,1,2] datacomp = 'DHI' else: order = [0,1,2] datacomp = 'XYZ' find = KEYLIST.index('f') findg = KEYLIST.index('df') if len(datastream.ndarray[findg]) > 0: useg = True if len(datastream.ndarray[find]) > 0: if not useg: datacomp = datacomp+'F' else: datacomp = datacomp+'G' else: datacomp = datacomp+'F' publevel = str(header.get('DataPublicationLevel'," ")) if publevel == '2': publ = 'Provisional' elif publevel == '3': publ = 'Quasi-definitive' elif publevel == '4': publ = 'Definitive' else: publ = 'Variation' proj = header.get('DataLocationReference','') longi = header.get('DataAcquisitionLongitude',' ') lati = header.get('DataAcquisitionLatitude',' ') if not longi=='' or lati=='': if proj == '': pass else: if proj.find('EPSG:') > 0: epsg = int(proj.split('EPSG:')[1].strip()) if not epsg==4326: longi,lati = convertGeoCoordinate(float(longi),float(lati),'epsg:'+str(epsg),'epsg:4326') line = [] if not mode == 'append': #if header.get('Elevation') > 0: # print(header) line.append(' Format %-15s IAGA-2002 %-34s |\n' % (' ',' ')) line.append(' Source of Data %-7s %-44s |\n' % (' ',header.get('StationInstitution'," ")[:44])) line.append(' Station Name %-9s %-44s |\n' % (' ', header.get('StationName'," ")[:44])) line.append(' IAGA Code %-12s %-44s |\n' % (' ',header.get('StationIAGAcode'," ")[:44])) line.append(' Geodetic Latitude %-4s %-44s |\n' % (' ',str(lati)[:44])) line.append(' Geodetic Longitude %-3s %-44s |\n' % (' ',str(longi)[:44])) line.append(' Elevation %-12s %-44s |\n' % (' ',str(header.get('DataElevation'," "))[:44])) line.append(' Reported %-13s %-44s |\n' % (' ',datacomp)) line.append(' Sensor Orientation %-3s %-44s |\n' % (' ',header.get('DataSensorOrientation'," ").upper()[:44])) line.append(' Digital Sampling %-5s %-44s |\n' % (' ',str(header.get('DataDigitalSampling'," "))[:44])) line.append(' Data Interval Type %-3s %-44s |\n' % (' ',(str(header.get('DataSamplingRate'," "))+' ('+header.get('DataSamplingFilter'," ")+')')[:44])) line.append(' Data Type %-12s %-44s |\n' % (' ',publ[:44])) if not header.get('DataPublicationDate','') == '': line.append(' {a:<20} {b:<45s}|\n'.format(a='Publication date',b=str(header.get('DataPublicationDate'))[:10])) # Optional header part: skipopt = False if not skipopt: if not header.get('SensorID','') == '': line.append(' #{a:<20} {b:<45s}|\n'.format(a='V-Instrument',b=header.get('SensorID')[:44])) if not header.get('SecondarySensorID','') == '': line.append(' #{a:<20} {b:<45s}|\n'.format(a='F-Instrument',b=header.get('SecondarySensorID')[:44])) if not header.get('StationMeans','') == '': try: meanlist = header.get('StationMeans') # Assume something like H:xxxx,D:xxx,Z:xxxx meanlist = meanlist.split(',') for me in meanlist: if me.startswith('H'): hval = me.split(':') line.append(' #{a:<20} {b:<45s}|\n'.format(a='Approx H',b=hval[1])) except: pass line.append(' #{a:<20} {b:<45s}|\n'.format(a='File created by',b='MagPy '+magpyversion)) iagacode = header.get('StationIAGAcode',"") line.append('DATE TIME DOY %8s %9s %9s %9s |\n' % (iagacode+datacomp[0],iagacode+datacomp[1],iagacode+datacomp[2],iagacode+datacomp[3])) try: myFile.writelines(line) # Write header sequence of strings to a file except IOError: pass try: line = [] ndtype = False if len(datastream.ndarray[0]) > 0: ndtype = True fulllength = datastream.length()[0] # Possible types: DHIF, DHZF, XYZF, or DHIG, DHZG, XYZG #datacomp = 'EHZ' #datacomp = 'DHZ' #datacomp = 'DHI' #datacomp = 'XYZ' xmult = 1.0 ymult = 1.0 zmult = 1.0 xind = order[0]+1 yind = order[1]+1 zind = order[2]+1 if len(datastream.ndarray[xind]) == 0 or len(datastream.ndarray[yind]) == 0 or len(datastream.ndarray[zind]) == 0: print("writeIAGA02: WARNING! Data missing in X, Y or Z component! Writing anyway...") find = KEYLIST.index('f') if datacomp.startswith('DHZ'): xmult = 60.0 elif datacomp.startswith('DHI'): xmult = 60.0 zmult = 60.0 for i in range(fulllength): if not ndtype: elem = datastream[i] xval = elem.x yval = elem.y zval = elem.z fval = elem.f timeval = elem.time else: if len(datastream.ndarray[xind]) > 0: xval = datastream.ndarray[xind][i]*xmult else: xval = NOT_REPORTED if len(datastream.ndarray[yind]) > 0: yval = datastream.ndarray[yind][i] if order[1] == '3': yval = datastream.ndarray[yind][i]*np.cos(datastream.ndarray[zind][i]*np.pi/180.) else: yval = NOT_REPORTED if len(datastream.ndarray[zind]) > 0: zval = datastream.ndarray[zind][i]*zmult else: zval = NOT_REPORTED if len(datastream.ndarray[find]) > 0: if not useg: fval = datastream.ndarray[find][i] else: fval = np.sqrt(xval**2+yval**2+zval**2)-datastream.ndarray[find][i] else: fval = NOT_REPORTED timeval = datastream.ndarray[0][i] row = '' try: row = datetime.strftime(num2date(timeval).replace(tzinfo=None),"%Y-%m-%d %H:%M:%S.%f") row = row[:-3] doi = datetime.strftime(num2date(timeval).replace(tzinfo=None), "%j") row += ' %s' % str(doi) except: row = '' pass if isnan(xval): row += '%13.2f' % MISSING_DATA else: row += '%13.2f' % xval if isnan(yval): row += '%10.2f' % MISSING_DATA else: row += '%10.2f' % yval if isnan(zval): row += '%10.2f' % MISSING_DATA else: row += '%10.2f' % zval if isnan(fval): row += '%10.2f' % MISSING_DATA else: row += '%10.2f' % fval line.append(row + '\n') try: myFile.writelines( line ) pass finally: myFile.close() except IOError: return False pass return True
class Solution: def majorityElement(self, nums): """ :type nums: List[int] :rtype: List[int] """ num1, cnt1 = 0, 0 num2, cnt2 = 1, 0 for num in nums: if num == num1: cnt1 += 1 elif num == num2: cnt2 += 1 else: if cnt1 == 0: num1, cnt1 = num, 1 elif cnt2 == 0: num2, cnt2 = num, 1 else: cnt1, cnt2 = cnt1 - 1, cnt2 - 1 return [num for num in (num1, num2) if nums.count(num) > len(nums) // 3]
import pandas as pd from sklearn.datasets import load_boston from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import train_test_split from sklearn.metrics import mean_squared_error from .auto_segment_FEMPO import BasicSegmenter_FEMPO def demo(X = None, y = None, test_size = 0.1): if X == None: boston = load_boston() X = pd.DataFrame(boston.data) y = pd.DataFrame(boston.target) base_estimator = DecisionTreeRegressor(max_depth = 5) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) print X_train.shape # If you want to compare with BaggingRegressor. # bench = BaggingRegressor(base_estimator = base_estimator, n_estimators = 10, max_samples = 1, oob_score = True).fit(X_train, y_train) # print bench.score(X_test, y_test) # print mean_squared_error(bench.predict(X_test), y_test) clf = BasicSegmenterEG_FEMPO(ngen=30,init_sample_percentage = 1, n_votes=10, n = 10, base_estimator = base_estimator, unseen_x = X_test, unseen_y = y_test) clf.fit(X_train, y_train) print clf.score(X_test,y_test) y = clf.predict(X_test) print mean_squared_error(y, y_test) print y.shape return clf, X_test, y_test
from os.path import join, abspath, dirname, exists import os import errno import shutil from tempfile import mkdtemp import subprocess import urllib2 import logging import sys import datetime import re from landsat.search import Search from landsat.ndvi import NDVIWithManualColorMap logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) current_date = datetime.datetime.now().date() print 'Current date is:', current_date sub_date = current_date - datetime.timedelta(days=1) print 'Subtract date is:', sub_date search = Search() try: search_results = search.search(paths_rows='177,025', start_date=sub_date, end_date=current_date) search_string = str(search_results.get('results')) search_list = re.compile('\w+').findall(search_string) scene_id = str(search_list.pop(5)) print scene_id l = len(scene_id) print l except Exception: raise SystemExit('Closing...') url_red = 'http://landsat-pds.s3.amazonaws.com/L8/177/025/' + scene_id + '/' + scene_id + '_B4.TIF' url_nir = 'http://landsat-pds.s3.amazonaws.com/L8/177/025/' + scene_id + '/' + scene_id + '_B5.TIF' red_file = scene_id + '_B4.TIF' nir_file = scene_id + '_B5.TIF' ndvi_file = scene_id + '_NDVI.TIF' print 'Filenames builded succsessfuly' base_dir = os.getcwd() temp_folder = join(base_dir, "temp_folder") scene_folder = join(temp_folder, scene_id) if not os.path.exists(temp_folder): os.makedirs(temp_folder) if not os.path.exists(scene_folder): os.makedirs(scene_folder) file_name = url_red.split('/')[-1] u = urllib2.urlopen(url_red) f = open("temp_folder/"+scene_id+"/"+file_name, 'wb') meta = u.info() file_size = int(meta.getheaders("Content-Length")[0]) print "Downloading: %s Bytes: %s" % (file_name, file_size) file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8)*(len(status)+1) print status, f.close() file_name = url_nir.split('/')[-1] u = urllib2.urlopen(url_nir) f = open("temp_folder/"+scene_id+"/"+file_name, 'wb') meta = u.info() file_size = int(meta.getheaders("Content-Length")[0]) print "Downloading: %s Bytes: %s" % (file_name, file_size) file_size_dl = 0 block_sz = 8192 while True: buffer = u.read(block_sz) if not buffer: break file_size_dl += len(buffer) f.write(buffer) status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size) status = status + chr(8)*(len(status)+1) print status, f.close() nd = NDVIWithManualColorMap(path=temp_folder+"/"+scene_id, dst_path=temp_folder) print nd.run() subprocess.call(["gdalbuildvrt", "-a_srs", "EPSG:3857", "NDVImap.vrt", "temp_folder/"+scene_id+"/"+ndvi_file]) shutil.rmtree("ndvi_tiles", ignore_errors=True) subprocess.call(["./gdal2tilesp.py", "-w", "none", "-s EPSG:3857", "-p", "mercator", "-z 8-12", "--format=PNG", "--processes=4", "-o", "tms", "NDVImap.vrt", "ndvi_tiles"]) shutil.rmtree("temp_folder", ignore_errors=True) os.remove("NDVImap.vrt") print 'All temporary data was succsessfully removed' raise SystemExit('Closing...')
from selenium import webdriver from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoAlertPresentException import unittest, time, re class DownloadEnteredDataTest(unittest.TestCase): def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(30) self.base_url = "http://kc.kbtdev.org/" self.verificationErrors = [] self.accept_next_alert = True def test_download_entered_data(self): # Open KoBoCAT. driver = self.driver driver.get(self.base_url + "") # Assert that our form's title is in the list of projects and follow its link. self.assertTrue(self.is_element_present(By.LINK_TEXT, "Selenium test form title.")) driver.find_element_by_link_text("Selenium test form title.").click() # Wait for and click the "Download data" link. for _ in xrange(self.DEFAULT_WAIT_SECONDS): self.check_timeout('Waiting for "Download data" link.') try: if self.is_element_present(By.LINK_TEXT, "Download data"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_link_text("Download data").click() # Wait for and click the "XLS" link. for _ in xrange(self.DEFAULT_WAIT_SECONDS): self.check_timeout('Waiting for "XLS" link.') try: if self.is_element_present(By.LINK_TEXT, "XLS"): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_link_text("XLS").click() # Wait for the download page's header and ensure it contains the word "excel" (case insensitive). for _ in xrange(self.DEFAULT_WAIT_SECONDS): self.check_timeout('Waiting for download page\'s header.') try: if self.is_element_present(By.CSS_SELECTOR, ".data-page__header"): break except: pass time.sleep(1) else: self.fail("time out") self.assertIsNotNone(re.compile('excel', re.IGNORECASE).search(driver.find_element_by_css_selector(".data-page__header").text)) # Wait for the export progress status. for _ in xrange(self.DEFAULT_WAIT_SECONDS): self.check_timeout('Waiting for the export progress status.') try: if self.is_element_present(By.CSS_SELECTOR, ".refresh-export-progress"): break except: pass time.sleep(1) else: self.fail("time out") # Wait (a little more than usual) for the export's download link and click it. for _ in xrange(30): self.check_timeout('Waiting for the export\'s download link.') try: if re.search(r"^Selenium_test_form_title_[\s\S]*$", driver.find_element_by_css_selector("#forms-table a").text): break except: pass time.sleep(1) else: self.fail("time out") driver.find_element_by_css_selector("#forms-table a").click() def is_element_present(self, how, what): try: self.driver.find_element(by=how, value=what) except NoSuchElementException: return False return True def is_alert_present(self): try: self.driver.switch_to_alert() except NoAlertPresentException: return False return True def close_alert_and_get_its_text(self): try: alert = self.driver.switch_to_alert() alert_text = alert.text if self.accept_next_alert: alert.accept() else: alert.dismiss() return alert_text finally: self.accept_next_alert = True def tearDown(self): self.driver.quit() self.assertEqual([], self.verificationErrors) if __name__ == "__main__": unittest.main()
""" ARC Computing Element """ __RCSID__ = "58c42fc (2013-07-07 22:54:57 +0200) Andrei Tsaregorodtsev <atsareg@in2p3.fr>" import os import stat import tempfile from types import StringTypes from DIRAC import S_OK, S_ERROR from DIRAC.Resources.Computing.ComputingElement import ComputingElement from DIRAC.Core.Utilities.Grid import executeGridCommand CE_NAME = 'ARC' MANDATORY_PARAMETERS = [ 'Queue' ] class ARCComputingElement( ComputingElement ): ############################################################################# def __init__( self, ceUniqueID ): """ Standard constructor. """ ComputingElement.__init__( self, ceUniqueID ) self.ceType = CE_NAME self.submittedJobs = 0 self.mandatoryParameters = MANDATORY_PARAMETERS self.pilotProxy = '' self.queue = '' self.outputURL = 'gsiftp://localhost' self.gridEnv = '' self.ceHost = self.ceName if 'Host' in self.ceParameters: self.ceHost = self.ceParameters['Host'] if 'GridEnv' in self.ceParameters: self.gridEnv = self.ceParameters['GridEnv'] ############################################################################# def _addCEConfigDefaults( self ): """Method to make sure all necessary Configuration Parameters are defined """ # First assure that any global parameters are loaded ComputingElement._addCEConfigDefaults( self ) def __writeXRSL( self, executableFile ): """ Create the JDL for submission """ workingDirectory = self.ceParameters['WorkingDirectory'] fd, name = tempfile.mkstemp( suffix = '.xrsl', prefix = 'ARC_', dir = workingDirectory ) diracStamp = os.path.basename( name ).replace( '.xrsl', '' ).replace( 'ARC_', '' ) xrslFile = os.fdopen( fd, 'w' ) xrsl = """ &(executable="%(executable)s") (inputFiles=(%(executable)s "%(executableFile)s")) (stdout="%(diracStamp)s.out") (stderr="%(diracStamp)s.err") (outputFiles=("%(diracStamp)s.out" "") ("%(diracStamp)s.err" "")) """ % { 'executableFile':executableFile, 'executable':os.path.basename( executableFile ), 'diracStamp':diracStamp } xrslFile.write( xrsl ) xrslFile.close() return name, diracStamp def _reset( self ): self.queue = self.ceParameters['Queue'] self.gridEnv = self.ceParameters['GridEnv'] ############################################################################# def submitJob( self, executableFile, proxy, numberOfJobs = 1 ): """ Method to submit job """ self.log.verbose( "Executable file path: %s" % executableFile ) if not os.access( executableFile, 5 ): os.chmod( executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH + stat.S_IXOTH ) batchIDList = [] stampDict = {} i = 0 while i < numberOfJobs: i += 1 xrslName, diracStamp = self.__writeXRSL( executableFile ) cmd = ['arcsub', '-j', self.ceParameters['JobListFile'], '-c', '%s' % self.ceHost, '%s' % xrslName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( xrslName ) if not result['OK']: break if result['Value'][0] != 0: break pilotJobReference = result['Value'][1].strip() if pilotJobReference and pilotJobReference.startswith('Job submitted with jobid:'): pilotJobReference = pilotJobReference.replace('Job submitted with jobid:', '').strip() batchIDList.append( pilotJobReference ) stampDict[pilotJobReference] = diracStamp else: break #os.unlink( executableFile ) if batchIDList: result = S_OK( batchIDList ) result['PilotStampDict'] = stampDict else: result = S_ERROR('No pilot references obtained from the glite job submission') return result def killJob( self, jobIDList ): """ Kill the specified jobs """ workingDirectory = self.ceParameters['WorkingDirectory'] fd, name = tempfile.mkstemp( suffix = '.list', prefix = 'KillJobs_', dir = workingDirectory ) jobListFile = os.fdopen( fd, 'w' ) jobList = list( jobIDList ) if type( jobIDList ) in StringTypes: jobList = [ jobIDList ] for job in jobList: jobListFile.write( job+'\n' ) cmd = ['arckill', '-c', self.ceHost, '-i', name] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( name ) if not result['OK']: return result if result['Value'][0] != 0: return S_ERROR( 'Failed kill job: %s' % result['Value'][0][1] ) return S_OK() def getCEStatus( self ): """ Method to return information on running and pending jobs. """ cmd = ['arcstat', '-c', self.ceHost, '-j', self.ceParameters['JobListFile'] ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) resultDict = {} if not result['OK']: return result if result['Value'][0] == 1 and result['Value'][1] == "No jobs\n": result = S_OK() result['RunningJobs'] = 0 result['WaitingJobs'] = 0 result['SubmittedJobs'] = 0 return result if result['Value'][0]: if result['Value'][2]: return S_ERROR(result['Value'][2]) else: return S_ERROR('Error while interrogating CE status') if result['Value'][1]: resultDict = self.__parseJobStatus( result['Value'][1] ) running = 0 waiting = 0 for ref in resultDict: status = resultDict[ref] if status == 'Scheduled': waiting += 1 if status == 'Running': running += 1 result = S_OK() result['RunningJobs'] = running result['WaitingJobs'] = waiting result['SubmittedJobs'] = 0 return result def __parseJobStatus( self, commandOutput ): """ """ resultDict = {} lines = commandOutput.split('\n') ln = 0 while ln < len( lines ): if lines[ln].startswith( 'Job:' ): jobRef = lines[ln].split()[1] ln += 1 line = lines[ln].strip() stateARC = '' if line.startswith( 'State' ): stateARC = line.replace( 'State:','' ).strip() line = lines[ln+1].strip() exitCode = None if line.startswith( 'Exit Code' ): line = line.replace( 'Exit Code:','' ).strip() exitCode = int( line ) # Evaluate state now if stateARC in ['Accepted','Preparing','Submitting','Queuing','Hold']: resultDict[jobRef] = "Scheduled" elif stateARC in ['Running','Finishing']: resultDict[jobRef] = "Running" elif stateARC in ['Killed','Deleted']: resultDict[jobRef] = "Killed" elif stateARC in ['Finished','Other']: if exitCode is not None: if exitCode == 0: resultDict[jobRef] = "Done" else: resultDict[jobRef] = "Failed" else: resultDict[jobRef] = "Failed" elif stateARC in ['Failed']: resultDict[jobRef] = "Failed" else: self.log.warn( "Unknown state %s for job %s" % ( stateARC, jobRef ) ) elif lines[ln].startswith( "WARNING: Job information not found:" ): jobRef = lines[ln].replace( 'WARNING: Job information not found:', '' ).strip() resultDict[jobRef] = "Scheduled" ln += 1 return resultDict def getJobStatus( self, jobIDList ): """ Get the status information for the given list of jobs """ workingDirectory = self.ceParameters['WorkingDirectory'] fd, name = tempfile.mkstemp( suffix = '.list', prefix = 'StatJobs_', dir = workingDirectory ) jobListFile = os.fdopen( fd, 'w' ) jobTmpList = list( jobIDList ) if type( jobIDList ) in StringTypes: jobTmpList = [ jobIDList ] jobList = [] for j in jobTmpList: if ":::" in j: job = j.split(":::")[0] else: job = j jobList.append( job ) jobListFile.write( job+'\n' ) cmd = ['arcstat', '-c', self.ceHost, '-i', name, '-j', self.ceParameters['JobListFile']] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( name ) resultDict = {} if not result['OK']: self.log.error( 'Failed to get job status', result['Message'] ) return result if result['Value'][0]: if result['Value'][2]: return S_ERROR(result['Value'][2]) else: return S_ERROR('Error while interrogating job statuses') if result['Value'][1]: resultDict = self.__parseJobStatus( result['Value'][1] ) if not resultDict: return S_ERROR('No job statuses returned') # If CE does not know about a job, set the status to Unknown for job in jobList: if not resultDict.has_key( job ): resultDict[job] = 'Unknown' return S_OK( resultDict ) def getJobOutput( self, jobID, localDir = None ): """ Get the specified job standard output and error files. If the localDir is provided, the output is returned as file in this directory. Otherwise, the output is returned as strings. """ if jobID.find( ':::' ) != -1: pilotRef, stamp = jobID.split( ':::' ) else: pilotRef = jobID stamp = '' if not stamp: return S_ERROR( 'Pilot stamp not defined for %s' % pilotRef ) arcID = os.path.basename(pilotRef) if "WorkingDirectory" in self.ceParameters: workingDirectory = os.path.join( self.ceParameters['WorkingDirectory'], arcID ) else: workingDirectory = arcID outFileName = os.path.join( workingDirectory, '%s.out' % stamp ) errFileName = os.path.join( workingDirectory, '%s.err' % stamp ) cmd = ['arcget', '-j', self.ceParameters['JobListFile'], pilotRef ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) output = '' if result['OK']: if not result['Value'][0]: outFile = open( outFileName, 'r' ) output = outFile.read() outFile.close() os.unlink( outFileName ) errFile = open( errFileName, 'r' ) error = errFile.read() errFile.close() os.unlink( errFileName ) else: error = '\n'.join( result['Value'][1:] ) return S_ERROR( error ) else: return S_ERROR( 'Failed to retrieve output for %s' % jobID ) return S_OK( ( output, error ) )
import re a = [[0 for x in range(25)] for y in range(13)] f=open("../distrib/spiral.txt","r") s=f.readline().strip() dx, dy = [0, 1, 0, -1], [1, 0, -1, 0] x, y, c = 0, -1, 1 l=0 for i in range(13+13-1): if i%2==0: for j in range((25+25-i)//2): x += dx[i % 4] y += dy[i % 4] #print(x,y,l) a[x][y] = s[l] l=l+1 c += 1 else: for j in range((13+13-i)//2): x += dx[i % 4] y += dy[i % 4] #print(x,y,l) a[x][y] = s[l] l=l+1 c += 1 for i in a: for k in i: k=re.sub(r"¦","█",k) k=re.sub(r"¯","▀",k) k=re.sub(r"_","▄",k) print(k,end="") print()
from __future__ import (absolute_import, division, print_function, unicode_literals) from os import path from mantid import logger class WorkspaceLoader(object): @staticmethod def load_workspaces(directory, workspaces_to_load): """ The method that is called to load in workspaces. From the given directory and the workspace names provided. :param directory: String or string castable object; The project directory :param workspaces_to_load: List of Strings; of the workspaces to load """ if workspaces_to_load is None: return from mantid.simpleapi import Load # noqa for workspace in workspaces_to_load: try: Load(path.join(directory, (workspace + ".nxs")), OutputWorkspace=workspace) except Exception: logger.warning("Couldn't load file in project: " + workspace + ".nxs")
"""Script for plotting distributions of epitopes per site for two sets of sites. Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py. Written by Jesse Bloom.""" import os import sys import random import epitopefinder.io import epitopefinder.plot def main(): """Main body of script.""" random.seed(1) # seed random number generator in case P values are being computed if not epitopefinder.plot.PylabAvailable(): raise ImportError("Cannot import matplotlib / pylab, which are required by this script.") # output is written to out, currently set to standard out out = sys.stdout out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n") # read input file and parse arguments args = sys.argv[1 : ] if len(args) != 1: raise IOError("Script must be called with exactly one argument specifying the input file") infilename = sys.argv[1] if not os.path.isfile(infilename): raise IOError("Failed to find infile %s" % infilename) d = epitopefinder.io.ParseInfile(open(infilename)) out.write("\nRead input arguments from %s\n" % infilename) out.write('Read the following key / value pairs:\n') for (key, value) in d.iteritems(): out.write("%s %s\n" % (key, value)) plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip() epitopesbysite1_list = [] epitopesbysite2_list = [] for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]: epitopesfile = epitopefinder.io.ParseFileList(d, xf) if len(epitopesfile) != 1: raise ValueError("%s specifies more than one file" % xf) epitopesfile = epitopesfile[0] for line in open(epitopesfile).readlines()[1 : ]: if not (line.isspace() or line[0] == '#'): (site, n) = line.split(',') (site, n) = (int(site), int(n)) xlist.append(n) if not xlist: raise ValueError("%s failed to specify information for any sites" % xf) set1name = epitopefinder.io.ParseStringValue(d, 'set1name') set2name = epitopefinder.io.ParseStringValue(d, 'set2name') title = epitopefinder.io.ParseStringValue(d, 'title').strip() if title.upper() in ['NONE', 'FALSE']: title = None pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue') if pvalue.upper() in ['NONE', 'FALSE']: pvalue = None pvaluewithreplacement = None else: pvalue = int(pvalue) pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement') if pvalue < 1: raise ValueError("pvalue must be >= 1") if len(epitopesbysite2_list) >= len(epitopesbysite1_list): raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.") ymax = None if 'ymax' in d: ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') out.write('\nNow creating the plot file %s\n' % plotfile) epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax) out.write("\nScript is complete.\n") if __name__ == '__main__': main() # run the script
mcinif='mcini_gen2' runname='gen_test2111b' mcpick='gen_test2b.pickle' pathdir='/beegfs/work/ka_oj4748/echoRD' wdir='/beegfs/work/ka_oj4748/gen_tests' update_prec=0.04 update_mf=False update_part=500 import sys sys.path.append(pathdir) import run_echoRD as rE rE.echoRD_job(mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,update_prec=update_prec,update_mf=update_mf,update_part=update_part,hdf5pick=False)
""" Created on Fri Dec 18 14:11:31 2015 @author: Martin Friedl """ from datetime import date import numpy as np from Patterns.GrowthTheoryCell import make_theory_cell from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path from gdsCAD_py3.shapes import Box, Rectangle, Label from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line WAFER_ID = 'XXXX' # CHANGE THIS FOR EACH DIFFERENT WAFER PATTERN = 'SQ1.2' putOnWafer = True # Output full wafer or just a single pattern? HighDensity = False # High density of triangles? glbAlignmentMarks = False tDicingMarks = 10. # Dicing mark line thickness (um) rotAngle = 0. # Rotation angle of the membranes wafer_r = 25e3 waferVer = '100 Membranes Multi-Use v1.2'.format(int(wafer_r / 1000)) waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y") l_smBeam = 0 l_lgBeam = 1 l_drawing = 100 class MBE100Wafer(Wafer_GridStyle): """ A 2" wafer divided into square cells """ def __init__(self, name, cells=None): Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.) # The placement of the wafer alignment markers am_x = 1.5e4 am_y = 1.5e4 self.align_pts = np.array([am_x, am_y]) self.align_pts = np.vstack((self.align_pts, self.align_pts * (-1, 1))) # Reflect about y-axis self.align_pts = np.vstack((self.align_pts, self.align_pts * (1, -1))) # Reflect about x-axis self.wafer_r = 25e3 self.block_size = np.array([10e3, 10e3]) self._place_blocks(radius=self.wafer_r + 5e3) # if glbAlignmentMarks: # self.add_aligment_marks(l_lgBeam) # self.add_orientation_text(l_lgBeam) # self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks self.add_blocks() self.add_wafer_outline(layers=l_drawing) self.add_dashed_dicing_marks(layers=[l_lgBeam]) self.add_block_labels(layers=[l_lgBeam]) self.add_prealignment_markers(layers=[l_lgBeam]) self.add_tem_membranes([0.08, 0.012, 0.028, 0.044], 2000, 1, l_smBeam) self.add_theory_cells() self.add_chip_labels() # self.add_blockLabels(l_lgBeam) # self.add_cellLabels(l_lgBeam) bottom = np.array([0, -self.wafer_r * 0.9]) # top = np.array([0, -1]) * bottom self.add_waferLabel(waferLabel, l_drawing, pos=bottom) def add_block_labels(self, layers): txtSize = 800 for (i, pt) in enumerate(self.block_pts): origin = (pt + np.array([0.5, 0.5])) * self.block_size blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]] for l in layers: txt = Label(blk_lbl, txtSize, layer=l) bbox = txt.bounding_box offset = np.array(pt) txt.translate(-np.mean(bbox, 0)) # Center text around origin lbl_cell = Cell("lbl_" + blk_lbl) lbl_cell.add(txt) origin += np.array([0, 0]) self.add(lbl_cell, origin=origin) def add_dashed_dicing_marks(self, layers): if type(layers) is not list: layers = [layers] width = 10. / 2 dashlength = 2000 r = self.wafer_r rng = np.floor(self.wafer_r / self.block_size).astype(int) dmarks = Cell('DIC_MRKS') for l in layers: for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]: y = np.sqrt(r ** 2 - x ** 2) vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l) dmarks.add(vm) for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]: x = np.sqrt(r ** 2 - y ** 2) hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l) dmarks.add(hm) self.add(dmarks) def add_prealignment_markers(self, layers, mrkr_size=7): if mrkr_size % 2 == 0: # Number is even, but we need odd numbers mrkr_size += 1 if type(layers) is not list: layers = [layers] for l in layers: rect_size = 10. # 10 um large PAMM rectangles marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l) marker = Cell('10umMarker') marker.add(marker_rect) # Make one arm of the PAMM array marker_arm = Cell('PAMM_Arm') # Define the positions of the markers, they increase in spacing by 1 um each time: mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)] for pos in mrkr_positions: marker_arm.add(marker, origin=[pos, 0]) # Build the final PAMM Marker pamm_cell = Cell('PAMM_Marker') pamm_cell.add(marker) # Center marker pamm_cell.add(marker_arm) # Right arm pamm_cell.add(marker_arm, rotation=180) # Left arm pamm_cell.add(marker_arm, rotation=90) # Top arm pamm_cell.add(marker_arm, rotation=-90) # Bottom arm for pos in mrkr_positions: pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90) pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90) # Make the 4 tick marks that mark the center of the array h = 30. w = 100. tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l) tick_mrk_cell = Cell("TickMark") tick_mrk_cell.add(tick_mrk) pos = mrkr_positions[-1] + 75 + w / 2. pamm_cell.add(tick_mrk_cell, origin=[pos, 0]) pamm_cell.add(tick_mrk_cell, origin=[-pos, 0]) pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90) pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(pamm_cell, origin=(center_x + 2000, center_y)) block.add(pamm_cell, origin=(center_x - 2000, center_y)) def add_tem_membranes(self, widths, length, pitch, layer): tem_membranes = Cell('TEM_Membranes') n = 5 curr_y = 0 for width in widths: membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer) membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000)) membrane_cell.add(membrane) membrane_array = CellArray(membrane_cell, 1, n, (0, pitch)) membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000)) membrane_array_cell.add(membrane_array) tem_membranes.add(membrane_array_cell, origin=(0, curr_y)) curr_y += n * pitch n2 = 3 tem_membranes2 = Cell('Many_TEM_Membranes') tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch))) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(tem_membranes2, origin=(center_x, center_y + 2000)) def add_theory_cells(self): theory_cells = Cell('TheoryCells') theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0)) theory_cells.add(make_theory_cell_3br(), origin=(0, 0)) theory_cells.add(make_theory_cell_4br(), origin=(400, 0)) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(theory_cells, origin=(center_x, center_y - 2000)) def add_chip_labels(self): wafer_lbl = PATTERN + '\n' + WAFER_ID text = Label(wafer_lbl, 20., layer=l_lgBeam) text.translate(tuple(np.array(-text.bounding_box.mean(0)))) # Center justify label chip_lbl_cell = Cell('chip_label') chip_lbl_cell.add(text) center_x, center_y = (5000, 5000) for block in self.blocks: block.add(chip_lbl_cell, origin=(center_x, center_y - 2850)) class Frame(Cell): """ Make a frame for writing to with ebeam lithography Params: -name of the frame, just like when naming a cell -size: the size of the frame as an array [xsize,ysize] """ def __init__(self, name, size, border_layers): if not (type(border_layers) == list): border_layers = [border_layers] Cell.__init__(self, name) self.size_x, self.size_y = size # Create the border of the cell for l in border_layers: self.border = Box( (-self.size_x / 2., -self.size_y / 2.), (self.size_x / 2., self.size_y / 2.), 1, layer=l) self.add(self.border) # Add border to the frame self.align_markers = None def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False): if not (type(layers) == list): layers = [layers] top_mk_cell = Cell('AlignmentMark') for l in layers: if not joy_markers: am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l) rect_mk_cell = Cell("RectMarker") rect_mk_cell.add(am0) top_mk_cell.add(rect_mk_cell) elif joy_markers: crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)] crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist()))) am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape joy_mk_cell = Cell("JOYMarker") joy_mk_cell.add(am0) top_mk_cell.add(joy_mk_cell) if camps_markers: emw = 20. # 20 um e-beam marker width camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l) camps_mk_cell = Cell("CAMPSMarker") camps_mk_cell.add(camps_mk) top_mk_cell.add(camps_mk_cell, origin=[100., 100.]) top_mk_cell.add(camps_mk_cell, origin=[100., -100.]) top_mk_cell.add(camps_mk_cell, origin=[-100., 100.]) top_mk_cell.add(camps_mk_cell, origin=[-100., -100.]) self.align_markers = Cell("AlignMarkers") self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1])) self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1])) self.add(self.align_markers) def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle, array_height, array_width, array_spacing, layers): if not (type(layers) == list): layers = [layers] if not (type(_pitches) == list): _pitches = [_pitches] if not (type(_lengths) == list): _lengths = [_lengths] if not (type(_widths) == list): _widths = [_widths] manyslits = i = j = None for l in layers: i = -1 j = -1 manyslits = Cell("SlitArray") pitch = _pitches[0] for length in _lengths: j += 1 i = -1 for width in _widths: # for pitch in pitches: i += 1 if i % 3 == 0: j += 1 # Move to array to next line i = 0 # Restart at left pitch_v = pitch / np.cos(np.deg2rad(rot_angle)) # widthV = width / np.cos(np.deg2rad(rotAngle)) nx = int(array_width / (length + spacing)) ny = int(array_height / pitch_v) # Define the slits slit = Cell("Slits") rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l) rect = rect.copy().rotate(rot_angle) slit.add(rect) slits = CellArray(slit, nx, ny, (length + spacing, pitch_v)) slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch_v / 2.)) slit_array = Cell("SlitArray") slit_array.add(slits) text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l) lbl_vertical_offset = 1.35 if j % 2 == 0: text.translate( tuple(np.array(-text.bounding_box.mean(0)) + np.array(( 0, -array_height / lbl_vertical_offset)))) # Center justify label else: text.translate( tuple(np.array(-text.bounding_box.mean(0)) + np.array(( 0, array_height / lbl_vertical_offset)))) # Center justify label slit_array.add(text) manyslits.add(slit_array, origin=((array_width + array_spacing) * i, ( array_height + 2. * array_spacing) * j - array_spacing / 2.)) self.add(manyslits, origin=(-i * (array_width + array_spacing) / 2, -(j + 1.5) * ( array_height + array_spacing) / 2)) lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True) widths = [0.004, 0.008, 0.012, 0.016, 0.028, 0.044] pitches = [1.0, 2.0] lengths = [10., 20.] smFrameSize = 400 slitColumnSpacing = 3. smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), []) smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam) smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), []) smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam) smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), []) smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam) smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), []) smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) smField4.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam) centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), []) centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True) topCell = Cell("TopCell") topCell.add(lgField) smFrameSpacing = 400 # Spacing between the three small frames dx = smFrameSpacing + smFrameSize dy = smFrameSpacing + smFrameSize topCell.add(smField1, origin=(-dx / 2., dy / 2.)) topCell.add(smField2, origin=(dx / 2., dy / 2.)) topCell.add(smField3, origin=(-dx / 2., -dy / 2.)) topCell.add(smField4, origin=(dx / 2., -dy / 2.)) topCell.add(centerAlignField, origin=(0., 0.)) topCell.spacing = np.array([4000., 4000.]) layout = Layout('LIBRARY') if putOnWafer: # Fit as many patterns on a 2inch wafer as possible wafer = MBE100Wafer('MembranesWafer', cells=[topCell]) layout.add(wafer) else: # Only output a single copy of the pattern (not on a wafer) layout.add(topCell) layout.show() filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks) filename = filestring.replace(' ', '_') + '.gds' layout.save(filename) cell_layout = Layout('LIBRARY') cell_layout.add(wafer.blocks[0]) cell_layout.save(filestring.replace(' ', '_') + '_block' + '.gds') layout_field = Layout('LIBRARY') layout_field.add(topCell) layout_field.save(filestring.replace(' ', '_') + '_2mmField.gds')
from rest_framework import serializers from .models import CustomerWallet class CustomerWalletSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = CustomerWallet fields = ("wallet_id", "msisdn", "balance", "type", "status")
import urllib2 import urllib from BeautifulSoup import BeautifulSoup import smtplib import ConfigParser config = ConfigParser.ConfigParser() config.read('config.cfg') user = config.get('data','user') password = config.get('data','password') fromaddr = config.get('data','fromaddr') toaddr = config.get('data','toaddr') smtpserver = config.get('data','smtp_server') login_page='https://bugs.archlinux.org/index.php?do=authenticate' msg = "To: %s \nFrom: %s \nSubject: Bug Mail\n\n" % (toaddr,fromaddr) msg += 'Unassigned bugs \n\n' o = urllib2.build_opener( urllib2.HTTPCookieProcessor() ) urllib2.install_opener( o ) p = urllib.urlencode( { 'user_name': user, 'password': password, 'remember_login' : 'on',} ) f = o.open(login_page, p) data = f.read() url = "https://bugs.archlinux.org/index.php?string=&project=1&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index" url2= "https://bugs.archlinux.org/index.php?string=&project=5&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index" def parse_bugtrackerpage(url,count=1): print url # open bugtracker / parse page = urllib2.urlopen(url) soup = BeautifulSoup(page) data = soup.findAll('td',{'class':'task_id'}) msg = "" pages = False # Is there another page with unassigned bugs if soup.findAll('a',{'id': 'next' }) == []: page = False else: print soup.findAll('a',{'id': 'next'}) count += 1 pages = True print count # print all found bugs for f in data: title = f.a['title'].replace('Assigned |','') title = f.a['title'].replace('| 0%','') msg += '* [https://bugs.archlinux.org/task/%s FS#%s] %s \n' % (f.a.string,f.a.string,title) if pages == True: new = "%s&pagenum=%s" % (url,count) msg += parse_bugtrackerpage(new,count) return msg msg += '\n\nArchlinux: \n\n' msg += parse_bugtrackerpage(url) msg += '\n\nCommunity: \n\n' msg += parse_bugtrackerpage(url2) msg = msg.encode("utf8") server = smtplib.SMTP(smtpserver) server.sendmail(fromaddr, toaddr,msg) server.quit()
import os import json import collections import datetime from flask import Flask, request, current_app, make_response, session, escape, Response, jsonify from flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity from flask_socketio import SocketIO from neo4j.v1 import GraphDatabase, basic_auth from lib.crossDomain import crossdomain import simplekv.memory import eventlet config = json.load(open('./config.json')); UPLOAD_FOLDER = os.path.dirname(os.path.realpath(__file__)) + "/uploads" x_socketio = SocketIO() def create_app(): app = Flask(__name__) app.debug = True app.config['SECRET_KEY'] = config['auth_secret'] app.config['JWT_BLACKLIST_ENABLED'] = False app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore() app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'all' app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(minutes=15) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER driver = GraphDatabase.driver(config['database_url'], auth=basic_auth(config['database_user'],config['database_pass'])) db_session = driver.session() # start jwt service jwt = JWTManager(app) # Import blueprints from auth import auth_blueprint from banner import banner_blueprint from people import people_blueprint from organizations import organizations_blueprint from repos import repositories_blueprint from schema import schema_blueprint from data import data_blueprint from search import search_blueprint from upload import upload_blueprint from export import export_blueprint from list import list_blueprint from .sockets import sockets as socket_blueprint # register API modules app.register_blueprint(banner_blueprint) app.register_blueprint(auth_blueprint) app.register_blueprint(people_blueprint) app.register_blueprint(organizations_blueprint) app.register_blueprint(repositories_blueprint) app.register_blueprint(schema_blueprint) app.register_blueprint(search_blueprint) app.register_blueprint(data_blueprint) app.register_blueprint(upload_blueprint) app.register_blueprint(socket_blueprint) app.register_blueprint(export_blueprint) app.register_blueprint(list_blueprint) x_socketio.init_app(app) return app, jwt
import sys sys.path.append('/var/www/html/valumodel.com/scripts/dcf') from calc_dcf import calc_dcf def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker): assumptions = {} try: assumptions['Tax Rate'] = float(tax_rate)/100.0 assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0 assumptions['SGA % of sales'] = float(sga_of_sales)/100.0 assumptions['D&A % of sales'] = float(da_of_sales)/100.0 assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0 assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0 assumptions['Levered Beta'] = float(levered_beta) assumptions['Current Yield'] = float(current_yield)/100.0 assumptions['Exit Multiple'] = float(exit_multiple) except ValueError: return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>' ticker = ticker.split(' ')[0] if not ticker.isalnum(): return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>' return calc_dcf(assumptions, ticker.upper())
import os import re import gettext import locale import threading # libsearchfilter_toggle starts thread libsearchfilter_loop import operator import gtk import gobject import pango import ui import misc import formatting import mpdhelper as mpdh from consts import consts import breadcrumbs def library_set_data(album=None, artist=None, genre=None, year=None, path=None): if album is not None: album = unicode(album) if artist is not None: artist = unicode(artist) if genre is not None: genre = unicode(genre) if year is not None: year = unicode(year) if path is not None: path = unicode(path) return (album, artist, genre, year, path) def library_get_data(data, *args): name_to_index = {'album': 0, 'artist': 1, 'genre': 2, 'year': 3, 'path': 4} # Data retrieved from the gtktreeview model is not in # unicode anymore, so convert it. retlist = [unicode(data[name_to_index[arg]]) if data[name_to_index[arg]] \ else None for arg in args] if len(retlist) == 1: return retlist[0] else: return retlist class Library(object): def __init__(self, config, mpd, artwork, TAB_LIBRARY, album_filename, settings_save, filtering_entry_make_red, filtering_entry_revert_color, filter_key_pressed, on_add_item, connected, on_library_button_press, new_tab, get_multicd_album_root_dir): self.artwork = artwork self.config = config self.mpd = mpd self.librarymenu = None # cyclic dependency, set later self.album_filename = album_filename self.settings_save = settings_save self.filtering_entry_make_red = filtering_entry_make_red self.filtering_entry_revert_color = filtering_entry_revert_color self.filter_key_pressed = filter_key_pressed self.on_add_item = on_add_item self.connected = connected self.on_library_button_press = on_library_button_press self.get_multicd_album_root_dir = get_multicd_album_root_dir self.NOTAG = _("Untagged") self.VAstr = _("Various Artists") self.search_terms = [_('Artist'), _('Title'), _('Album'), _('Genre'), _('Filename'), _('Everything')] self.search_terms_mpd = ['artist', 'title', 'album', 'genre', 'file', 'any'] self.libfilterbox_cmd_buf = None self.libfilterbox_cond = None self.libfilterbox_source = None self.prevlibtodo_base = None self.prevlibtodo_base_results = None self.prevlibtodo = None self.save_timeout = None self.libsearch_last_tooltip = None self.lib_view_filesystem_cache = None self.lib_view_artist_cache = None self.lib_view_genre_cache = None self.lib_view_album_cache = None self.lib_list_genres = None self.lib_list_artists = None self.lib_list_albums = None self.lib_list_years = None self.view_caches_reset() self.libraryvbox = gtk.VBox() self.library = ui.treeview() self.library_selection = self.library.get_selection() self.breadcrumbs = breadcrumbs.CrumbBox() self.breadcrumbs.props.spacing = 2 expanderwindow2 = ui.scrollwindow(add=self.library) self.searchbox = gtk.HBox() self.searchcombo = ui.combo(items=self.search_terms) self.searchcombo.set_tooltip_text(_("Search terms")) self.searchtext = ui.entry() self.searchtext.set_tooltip_text(_("Search library")) self.searchbutton = ui.button(img=ui.image(stock=gtk.STOCK_CANCEL), h=self.searchcombo.size_request()[1]) self.searchbutton.set_no_show_all(True) self.searchbutton.hide() self.searchbutton.set_tooltip_text(_("End Search")) self.libraryview = ui.button(relief=gtk.RELIEF_NONE) self.libraryview.set_tooltip_text(_("Library browsing view")) # disabled as breadcrumbs replace this: self.searchbox.pack_start(ui.label(_("Search:")), False, False, 3) self.searchbox.pack_start(self.searchtext, True, True, 2) self.searchbox.pack_start(self.searchcombo, False, False, 2) self.searchbox.pack_start(self.searchbutton, False, False, 2) self.libraryvbox.pack_start(self.breadcrumbs, False, False, 2) self.libraryvbox.pack_start(expanderwindow2, True, True) self.libraryvbox.pack_start(self.searchbox, False, False, 2) self.tab = new_tab(self.libraryvbox, gtk.STOCK_HARDDISK, TAB_LIBRARY, self.library) # Assign some pixbufs for use in self.library self.openpb2 = self.library.render_icon(gtk.STOCK_OPEN, gtk.ICON_SIZE_LARGE_TOOLBAR) self.harddiskpb2 = self.library.render_icon(gtk.STOCK_HARDDISK, gtk.ICON_SIZE_LARGE_TOOLBAR) self.openpb = self.library.render_icon(gtk.STOCK_OPEN, gtk.ICON_SIZE_MENU) self.harddiskpb = self.library.render_icon(gtk.STOCK_HARDDISK, gtk.ICON_SIZE_MENU) self.albumpb = gtk.gdk.pixbuf_new_from_file_at_size( album_filename, consts.LIB_COVER_SIZE, consts.LIB_COVER_SIZE) self.genrepb = self.library.render_icon('gtk-orientation-portrait', gtk.ICON_SIZE_LARGE_TOOLBAR) self.artistpb = self.library.render_icon('artist', gtk.ICON_SIZE_LARGE_TOOLBAR) self.sonatapb = self.library.render_icon('sonata', gtk.ICON_SIZE_MENU) # list of the library views: (id, name, icon name, label) self.VIEWS = [ (consts.VIEW_FILESYSTEM, 'filesystem', gtk.STOCK_HARDDISK, _("Filesystem")), (consts.VIEW_ALBUM, 'album', 'album', _("Albums")), (consts.VIEW_ARTIST, 'artist', 'artist', _("Artists")), (consts.VIEW_GENRE, 'genre', gtk.STOCK_ORIENTATION_PORTRAIT, _("Genres")), ] self.library_view_assign_image() self.library.connect('row_activated', self.on_library_row_activated) self.library.connect('button_press_event', self.on_library_button_press) self.library.connect('key-press-event', self.on_library_key_press) self.library.connect('query-tooltip', self.on_library_query_tooltip) expanderwindow2.connect('scroll-event', self.on_library_scrolled) self.libraryview.connect('clicked', self.library_view_popup) self.searchtext.connect('key-press-event', self.libsearchfilter_key_pressed) self.searchtext.connect('activate', self.libsearchfilter_on_enter) self.searchbutton.connect('clicked', self.on_search_end) self.libfilter_changed_handler = self.searchtext.connect( 'changed', self.libsearchfilter_feed_loop) searchcombo_changed_handler = self.searchcombo.connect( 'changed', self.on_library_search_combo_change) # Initialize library data and widget self.libraryposition = {} self.libraryselectedpath = {} self.searchcombo.handler_block(searchcombo_changed_handler) self.searchcombo.set_active(self.config.last_search_num) self.searchcombo.handler_unblock(searchcombo_changed_handler) self.librarydata = gtk.ListStore(gtk.gdk.Pixbuf, gobject.TYPE_PYOBJECT, str) self.library.set_model(self.librarydata) self.library.set_search_column(2) self.librarycell = gtk.CellRendererText() self.librarycell.set_property("ellipsize", pango.ELLIPSIZE_END) self.libraryimg = gtk.CellRendererPixbuf() self.librarycolumn = gtk.TreeViewColumn() self.librarycolumn.pack_start(self.libraryimg, False) self.librarycolumn.pack_start(self.librarycell, True) self.librarycolumn.set_attributes(self.libraryimg, pixbuf=0) self.librarycolumn.set_attributes(self.librarycell, markup=2) self.librarycolumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE) self.library.append_column(self.librarycolumn) self.library_selection.set_mode(gtk.SELECTION_MULTIPLE) def get_libraryactions(self): return [(name + 'view', icon, label, None, None, self.on_libraryview_chosen) for _view, name, icon, label in self.VIEWS] def get_model(self): return self.librarydata def get_widgets(self): return self.libraryvbox def get_treeview(self): return self.library def get_selection(self): return self.library_selection def set_librarymenu(self, librarymenu): self.librarymenu = librarymenu self.librarymenu.attach_to_widget(self.libraryview, None) def library_view_popup(self, button): self.librarymenu.popup(None, None, self.library_view_position_menu, 1, 0, button) def library_view_position_menu(self, _menu, button): x, y, _width, height = button.get_allocation() return (self.config.x + x, self.config.y + y + height, True) def on_libraryview_chosen(self, action): if self.search_visible(): self.on_search_end(None) if action.get_name() == 'filesystemview': self.config.lib_view = consts.VIEW_FILESYSTEM elif action.get_name() == 'artistview': self.config.lib_view = consts.VIEW_ARTIST elif action.get_name() == 'genreview': self.config.lib_view = consts.VIEW_GENRE elif action.get_name() == 'albumview': self.config.lib_view = consts.VIEW_ALBUM self.library.grab_focus() self.library_view_assign_image() self.libraryposition = {} self.libraryselectedpath = {} self.library_browse(self.library_set_data(path="/")) try: if len(self.librarydata) > 0: self.library_selection.unselect_range((0,), (len(self.librarydata)-1,)) except: pass gobject.idle_add(self.library.scroll_to_point, 0, 0) def library_view_assign_image(self): _view, _name, icon, label = [v for v in self.VIEWS if v[0] == self.config.lib_view][0] self.libraryview.set_image(ui.image(stock=icon)) self.libraryview.set_label(" " + label) def view_caches_reset(self): # We should call this on first load and whenever mpd is # updated. self.lib_view_filesystem_cache = None self.lib_view_artist_cache = None self.lib_view_genre_cache = None self.lib_view_album_cache = None self.lib_list_genres = None self.lib_list_artists = None self.lib_list_albums = None self.lib_list_years = None def on_library_scrolled(self, _widget, _event): try: # Use gobject.idle_add so that we can get the visible # state of the treeview gobject.idle_add(self._on_library_scrolled) except: pass def _on_library_scrolled(self): if not self.config.show_covers: return # This avoids a warning about a NULL node in get_visible_range if not self.library.props.visible: return vis_range = self.library.get_visible_range() if vis_range is None: return try: start_row = int(vis_range[0][0]) end_row = int(vis_range[1][0]) except IndexError: # get_visible_range failed return self.artwork.library_artwork_update(self.librarydata, start_row, end_row, self.albumpb) def library_browse(self, _widget=None, root=None): # Populates the library list with entries if not self.connected(): return if root is None or (self.config.lib_view == consts.VIEW_FILESYSTEM \ and self.library_get_data(root, 'path') is None): root = self.library_set_data(path="/") if self.config.wd is None or (self.config.lib_view == \ consts.VIEW_FILESYSTEM and \ self.library_get_data(self.config.wd, 'path') is None): self.config.wd = self.library_set_data(path="/") prev_selection = [] prev_selection_root = False prev_selection_parent = False if root == self.config.wd: # This will happen when the database is updated. So, lets save # the current selection in order to try to re-select it after # the update is over. model, selected = self.library_selection.get_selected_rows() for path in selected: if model.get_value(model.get_iter(path), 2) == "/": prev_selection_root = True elif model.get_value(model.get_iter(path), 2) == "..": prev_selection_parent = True else: prev_selection.append(model.get_value(model.get_iter(path), 1)) self.libraryposition[self.config.wd] = \ self.library.get_visible_rect()[1] path_updated = True else: path_updated = False new_level = self.library_get_data_level(root) curr_level = self.library_get_data_level(self.config.wd) # The logic below is more consistent with, e.g., thunar. if new_level > curr_level: # Save position and row for where we just were if we've # navigated into a sub-directory: self.libraryposition[self.config.wd] = \ self.library.get_visible_rect()[1] model, rows = self.library_selection.get_selected_rows() if len(rows) > 0: data = self.librarydata.get_value( self.librarydata.get_iter(rows[0]), 2) if not data in ("..", "/"): self.libraryselectedpath[self.config.wd] = rows[0] elif (self.config.lib_view == consts.VIEW_FILESYSTEM and \ root != self.config.wd) \ or (self.config.lib_view != consts.VIEW_FILESYSTEM and new_level != \ curr_level): # If we've navigated to a parent directory, don't save # anything so that the user will enter that subdirectory # again at the top position with nothing selected self.libraryposition[self.config.wd] = 0 self.libraryselectedpath[self.config.wd] = None # In case sonata is killed or crashes, we'll save the library state # in 5 seconds (first removing any current settings_save timeouts) if self.config.wd != root: try: gobject.source_remove(self.save_timeout) except: pass self.save_timeout = gobject.timeout_add(5000, self.settings_save) self.config.wd = root self.library.freeze_child_notify() self.librarydata.clear() # Populate treeview with data: bd = [] while len(bd) == 0: if self.config.lib_view == consts.VIEW_FILESYSTEM: bd = self.library_populate_filesystem_data( self.library_get_data(self.config.wd, 'path')) elif self.config.lib_view == consts.VIEW_ALBUM: album, artist, year = self.library_get_data(self.config.wd, 'album', 'artist', 'year') if album is not None: bd = self.library_populate_data(artist=artist, album=album, year=year) else: bd = self.library_populate_toplevel_data(albumview=True) elif self.config.lib_view == consts.VIEW_ARTIST: artist, album, year = self.library_get_data(self.config.wd, 'artist', 'album', 'year') if artist is not None and album is not None: bd = self.library_populate_data(artist=artist, album=album, year=year) elif artist is not None: bd = self.library_populate_data(artist=artist) else: bd = self.library_populate_toplevel_data(artistview=True) elif self.config.lib_view == consts.VIEW_GENRE: genre, artist, album, year = self.library_get_data( self.config.wd, 'genre', 'artist', 'album', 'year') if genre is not None and artist is not None and album is \ not None: bd = self.library_populate_data(genre=genre, artist=artist, album=album, year=year) elif genre is not None and artist is not None: bd = self.library_populate_data(genre=genre, artist=artist) elif genre is not None: bd = self.library_populate_data(genre=genre) else: bd = self.library_populate_toplevel_data(genreview=True) if len(bd) == 0: # Nothing found; go up a level until we reach the top level # or results are found last_wd = self.config.wd self.config.wd = self.library_get_parent() if self.config.wd == last_wd: break for _sort, path in bd: self.librarydata.append(path) self.library.thaw_child_notify() # Scroll back to set view for current dir: self.library.realize() gobject.idle_add(self.library_set_view, not path_updated) if len(prev_selection) > 0 or prev_selection_root or \ prev_selection_parent: # Retain pre-update selection: self.library_retain_selection(prev_selection, prev_selection_root, prev_selection_parent) # Update library artwork as necessary self.on_library_scrolled(None, None) self.update_breadcrumbs() def update_breadcrumbs(self): # remove previous buttons for b in self.breadcrumbs: self.breadcrumbs.remove(b) # add the views button first b = ui.button(text=_(" v "), can_focus=False, relief=gtk.RELIEF_NONE) b.connect('clicked', self.library_view_popup) self.breadcrumbs.pack_start(b, False, False) b.show() # add the ellipsis explicitly XXX make this unnecessary b = ui.label("...") self.breadcrumbs.pack_start(b, False, False) b.show() # find info for current view view, _name, icon, label = [v for v in self.VIEWS if v[0] == self.config.lib_view][0] # the first crumb is the root of the current view crumbs = [(label, icon, None, self.library_set_data(path='/'))] # rest of the crumbs are specific to the view if view == consts.VIEW_FILESYSTEM: path = self.library_get_data(self.config.wd, 'path') if path and path != '/': parts = path.split('/') else: parts = [] # no crumbs for / # append a crumb for each part for i, part in enumerate(parts): partpath = '/'.join(parts[:i + 1]) target = self.library_set_data(path=partpath) crumbs.append((part, gtk.STOCK_OPEN, None, target)) else: if view == consts.VIEW_ALBUM: # We don't want to show an artist button in album view keys = 'genre', 'album' nkeys = 2 else: keys = 'genre', 'artist', 'album' nkeys = 3 parts = self.library_get_data(self.config.wd, *keys) # append a crumb for each part for i, key, part in zip(range(nkeys), keys, parts): if part is None: continue partdata = dict(zip(keys, parts)[:i + 1]) target = self.library_set_data(**partdata) pb, icon = None, None if key == 'album': # Album artwork, with self.alumbpb as a backup: artist, album, path = self.library_get_data(self.config.wd, 'artist', 'album', 'path') cache_data = self.library_set_data(artist=artist, album=album, path=path) pb = self.artwork.get_library_artwork_cached_pb(cache_data, None) if pb is None: icon = 'album' elif key == 'artist': icon = 'artist' else: icon = gtk.STOCK_ORIENTATION_PORTRAIT crumbs.append((part, icon, pb, target)) # add a button for each crumb for crumb in crumbs: text, icon, pb, target = crumb text = misc.escape_html(text) if crumb is crumbs[-1]: text = "<b>%s</b>" % text label = ui.label(markup=text) if icon: image = ui.image(stock=icon) elif pb: pb = pb.scale_simple(16, 16, gtk.gdk.INTERP_HYPER) image = ui.image(pb=pb) b = breadcrumbs.CrumbButton(image, label) if crumb is crumbs[-1]: # FIXME makes the button request minimal space: # label.props.ellipsize = pango.ELLIPSIZE_END b.props.active = True # FIXME why doesn't the tooltip show? b.set_tooltip_text(label.get_label()) b.connect('toggled', self.library_browse, target) self.breadcrumbs.pack_start(b, False, False) b.show_all() def library_populate_add_parent_rows(self): return [] # disabled as breadcrumbs replace these if self.config.lib_view == consts.VIEW_FILESYSTEM: bd = [('0', [self.harddiskpb, self.library_set_data(path='/'), '/'])] bd += [('1', [self.openpb, self.library_set_data(path='..'), '..'])] else: bd = [('0', [self.harddiskpb2, self.library_set_data(path='/'), '/'])] bd += [('1', [self.openpb2, self.library_set_data(path='..'), '..'])] return bd def library_populate_filesystem_data(self, path): # List all dirs/files at path bd = [] if path == '/' and self.lib_view_filesystem_cache is not None: # Use cache if possible... bd = self.lib_view_filesystem_cache else: for item in self.mpd.lsinfo(path): if 'directory' in item: name = mpdh.get(item, 'directory').split('/')[-1] data = self.library_set_data(path=mpdh.get(item, 'directory')) bd += [('d' + unicode(name).lower(), [self.openpb, data, misc.escape_html(name)])] elif 'file' in item: data = self.library_set_data(path=mpdh.get(item, 'file')) bd += [('f' + unicode(mpdh.get(item, 'file')).lower(), [self.sonatapb, data, formatting.parse(self.config.libraryformat, item, True)])] bd.sort(key=operator.itemgetter(0)) if path != '/' and len(bd) > 0: bd = self.library_populate_add_parent_rows() + bd if path == '/': self.lib_view_filesystem_cache = bd return bd def library_get_toplevel_cache(self, genreview=False, artistview=False, albumview=False): if genreview and self.lib_view_genre_cache is not None: bd = self.lib_view_genre_cache elif artistview and self.lib_view_artist_cache is not None: bd = self.lib_view_artist_cache elif albumview and self.lib_view_album_cache is not None: bd = self.lib_view_album_cache else: return None # Check if we can update any artwork: for _sort, info in bd: pb = info[0] if pb == self.albumpb: artist, album, path = self.library_get_data(info[1], 'artist', 'album', 'path') key = self.library_set_data(path=path, artist=artist, album=album) pb2 = self.artwork.get_library_artwork_cached_pb(key, None) if pb2 is not None: info[0] = pb2 return bd def library_populate_toplevel_data(self, genreview=False, artistview=False, albumview=False): bd = self.library_get_toplevel_cache(genreview, artistview, albumview) if bd is not None: # We have our cached data, woot. return bd bd = [] if genreview or artistview: # Only for artist/genre views, album view is handled differently # since multiple artists can have the same album name if genreview: items = self.library_return_list_items('genre') pb = self.genrepb else: items = self.library_return_list_items('artist') pb = self.artistpb if not (self.NOTAG in items): items.append(self.NOTAG) for item in items: if genreview: playtime, num_songs = self.library_return_count(genre=item) data = self.library_set_data(genre=item) else: playtime, num_songs = self.library_return_count( artist=item) data = self.library_set_data(artist=item) if num_songs > 0: display = misc.escape_html(item) display += self.add_display_info(num_songs, int(playtime) / 60) bd += [(misc.lower_no_the(item), [pb, data, display])] elif albumview: albums = [] untagged_found = False for item in self.mpd.listallinfo('/'): if 'file' in item and 'album' in item: album = mpdh.get(item, 'album') artist = mpdh.get(item, 'artist', self.NOTAG) year = mpdh.get(item, 'date', self.NOTAG) path = self.get_multicd_album_root_dir( os.path.dirname(mpdh.get(item, 'file'))) data = self.library_set_data(album=album, artist=artist, year=year, path=path) albums.append(data) if album == self.NOTAG: untagged_found = True if not untagged_found: albums.append(self.library_set_data(album=self.NOTAG)) albums = misc.remove_list_duplicates(albums, case=False) albums = self.list_identify_VA_albums(albums) for item in albums: album, artist, year, path = self.library_get_data(item, 'album', 'artist', 'year', 'path') playtime, num_songs = self.library_return_count(artist=artist, album=album, year=year) if num_songs > 0: data = self.library_set_data(artist=artist, album=album, year=year, path=path) display = misc.escape_html(album) if artist and year and len(artist) > 0 and len(year) > 0 \ and artist != self.NOTAG and year != self.NOTAG: display += " <span weight='light'>(%s, %s)</span>" \ % (misc.escape_html(artist), misc.escape_html(year)) elif artist and len(artist) > 0 and artist != self.NOTAG: display += " <span weight='light'>(%s)</span>" \ % misc.escape_html(artist) elif year and len(year) > 0 and year != self.NOTAG: display += " <span weight='light'>(%s)</span>" \ % misc.escape_html(year) display += self.add_display_info(num_songs, int(playtime) / 60) bd += [(misc.lower_no_the(album), [self.albumpb, data, display])] bd.sort(locale.strcoll, key=operator.itemgetter(0)) if genreview: self.lib_view_genre_cache = bd elif artistview: self.lib_view_artist_cache = bd elif albumview: self.lib_view_album_cache = bd return bd def list_identify_VA_albums(self, albums): for i in range(len(albums)): if i + consts.NUM_ARTISTS_FOR_VA - 1 > len(albums)-1: break VA = False for j in range(1, consts.NUM_ARTISTS_FOR_VA): if unicode(self.library_get_data(albums[i], 'album')).lower() \ != unicode(self.library_get_data(albums[i + j], 'album')).lower() or \ self.library_get_data(albums[i], 'year') != \ self.library_get_data(albums[i + j], 'year') or \ self.library_get_data(albums[i], 'path') != \ self.library_get_data(albums[i + j], 'path'): break if unicode(self.library_get_data(albums[i], 'artist')) == \ unicode(self.library_get_data(albums[i + j], 'artist')): albums.pop(i + j) break if j == consts.NUM_ARTISTS_FOR_VA - 1: VA = True if VA: album, year, path = self.library_get_data(albums[i], 'album', 'year', 'path') artist = self.VAstr albums[i] = self.library_set_data(album=album, artist=artist, year=year, path=path) j = 1 while i + j <= len(albums) - 1: if unicode(self.library_get_data(albums[i], 'album')).lower() == \ unicode(self.library_get_data(albums[i + j], 'album')).lower() \ and self.library_get_data(albums[i], 'year') == \ self.library_get_data(albums[i + j], 'year'): albums.pop(i + j) else: break return albums def get_VAstr(self): return self.VAstr def library_populate_data(self, genre=None, artist=None, album=None, year=None): # Create treeview model info bd = [] if genre is not None and artist is None and album is None: # Artists within a genre artists = self.library_return_list_items('artist', genre=genre) if len(artists) > 0: if not self.NOTAG in artists: artists.append(self.NOTAG) for artist in artists: playtime, num_songs = self.library_return_count( genre=genre, artist=artist) if num_songs > 0: display = misc.escape_html(artist) display += self.add_display_info(num_songs, int(playtime) / 60) data = self.library_set_data(genre=genre, artist=artist) bd += [(misc.lower_no_the(artist), [self.artistpb, data, display])] elif artist is not None and album is None: # Albums/songs within an artist and possibly genre # Albums first: if genre is not None: albums = self.library_return_list_items('album', genre=genre, artist=artist) else: albums = self.library_return_list_items('album', artist=artist) for album in albums: if genre is not None: years = self.library_return_list_items('date', genre=genre, artist=artist, album=album) else: years = self.library_return_list_items('date', artist=artist, album=album) if not self.NOTAG in years: years.append(self.NOTAG) for year in years: if genre is not None: playtime, num_songs = self.library_return_count( genre=genre, artist=artist, album=album, year=year) if num_songs > 0: files = self.library_return_list_items( 'file', genre=genre, artist=artist, album=album, year=year) path = os.path.dirname(files[0]) data = self.library_set_data(genre=genre, artist=artist, album=album, year=year, path=path) else: playtime, num_songs = self.library_return_count( artist=artist, album=album, year=year) if num_songs > 0: files = self.library_return_list_items( 'file', artist=artist, album=album, year=year) path = os.path.dirname(files[0]) data = self.library_set_data(artist=artist, album=album, year=year, path=path) if num_songs > 0: cache_data = self.library_set_data(artist=artist, album=album, path=path) display = misc.escape_html(album) if year and len(year) > 0 and year != self.NOTAG: display += " <span weight='light'>(%s)</span>" \ % misc.escape_html(year) display += self.add_display_info(num_songs, int(playtime) / 60) ordered_year = year if ordered_year == self.NOTAG: ordered_year = '9999' pb = self.artwork.get_library_artwork_cached_pb( cache_data, self.albumpb) bd += [(ordered_year + misc.lower_no_the(album), [pb, data, display])] # Now, songs not in albums: bd += self.library_populate_data_songs(genre, artist, self.NOTAG, None) else: # Songs within an album, artist, year, and possibly genre bd += self.library_populate_data_songs(genre, artist, album, year) if len(bd) > 0: bd = self.library_populate_add_parent_rows() + bd bd.sort(locale.strcoll, key=operator.itemgetter(0)) return bd def library_populate_data_songs(self, genre, artist, album, year): bd = [] if genre is not None: songs, _playtime, _num_songs = \ self.library_return_search_items(genre=genre, artist=artist, album=album, year=year) else: songs, _playtime, _num_songs = self.library_return_search_items( artist=artist, album=album, year=year) for song in songs: data = self.library_set_data(path=mpdh.get(song, 'file')) track = mpdh.get(song, 'track', '99', False, 2) disc = mpdh.get(song, 'disc', '99', False, 2) try: bd += [('f' + disc + track + misc.lower_no_the( mpdh.get(song, 'title')), [self.sonatapb, data, formatting.parse( self.config.libraryformat, song, True)])] except: bd += [('f' + disc + track + \ unicode(mpdh.get(song, 'file')).lower(), [self.sonatapb, data, formatting.parse(self.config.libraryformat, song, True)])] return bd def library_return_list_items(self, itemtype, genre=None, artist=None, album=None, year=None, ignore_case=True): # Returns all items of tag 'itemtype', in alphabetical order, # using mpd's 'list'. If searchtype is passed, use # a case insensitive search, via additional 'list' # queries, since using a single 'list' call will be # case sensitive. results = [] searches = self.library_compose_list_count_searchlist(genre, artist, album, year) if len(searches) > 0: for s in searches: # If we have untagged tags (''), use search instead # of list because list will not return anything. if '' in s: items = [] songs, playtime, num_songs = \ self.library_return_search_items(genre, artist, album, year) for song in songs: items.append(mpdh.get(song, itemtype)) else: items = self.mpd.list(itemtype, *s) for item in items: if len(item) > 0: results.append(item) else: if genre is None and artist is None and album is None and year \ is None: for item in self.mpd.list(itemtype): if len(item) > 0: results.append(item) if ignore_case: results = misc.remove_list_duplicates(results, case=False) results.sort(locale.strcoll) return results def library_return_count(self, genre=None, artist=None, album=None, year=None): # Because mpd's 'count' is case sensitive, we have to # determine all equivalent items (case insensitive) and # call 'count' for each of them. Using 'list' + 'count' # involves much less data to be transferred back and # forth than to use 'search' and count manually. searches = self.library_compose_list_count_searchlist(genre, artist, album, year) playtime = 0 num_songs = 0 for s in searches: if '' in s and self.mpd.version <= (0, 13): # Can't return count for empty tags, use search instead: _results, playtime, num_songs = \ self.library_return_search_items( genre=genre, artist=artist, album=album, year=year) else: count = self.mpd.count(*s) playtime += mpdh.get(count, 'playtime', 0, True) num_songs += mpdh.get(count, 'songs', 0, True) return (playtime, num_songs) def library_compose_list_count_searchlist_single(self, search, typename, cached_list, searchlist): s = [] skip_type = (typename == 'artist' and search == self.VAstr) if search is not None and not skip_type: if search == self.NOTAG: itemlist = [search, ''] else: itemlist = [] if cached_list is None: cached_list = self.library_return_list_items(typename, ignore_case=False) # This allows us to match untagged items cached_list.append('') for item in cached_list: if unicode(item).lower() == unicode(search).lower(): itemlist.append(item) if len(itemlist) == 0: # There should be no results! return None, cached_list for item in itemlist: if len(searchlist) > 0: for item2 in searchlist: s.append(item2 + (typename, item)) else: s.append((typename, item)) else: s = searchlist return s, cached_list def library_compose_list_count_searchlist(self, genre=None, artist=None, album=None, year=None): s = [] s, self.lib_list_genres = \ self.library_compose_list_count_searchlist_single( genre, 'genre', self.lib_list_genres, s) if s is None: return [] s, self.lib_list_artists = \ self.library_compose_list_count_searchlist_single( artist, 'artist', self.lib_list_artists, s) if s is None: return [] s, self.lib_list_albums = \ self.library_compose_list_count_searchlist_single( album, 'album', self.lib_list_albums, s) if s is None: return [] s, self.lib_list_years = \ self.library_compose_list_count_searchlist_single( year, 'date', self.lib_list_years, s) if s is None: return [] return s def library_compose_search_searchlist_single(self, search, typename, searchlist): s = [] skip_type = (typename == 'artist' and search == self.VAstr) if search is not None and not skip_type: if search == self.NOTAG: itemlist = [search, ''] else: itemlist = [search] for item in itemlist: if len(searchlist) > 0: for item2 in searchlist: s.append(item2 + (typename, item)) else: s.append((typename, item)) else: s = searchlist return s def library_compose_search_searchlist(self, genre=None, artist=None, album=None, year=None): s = [] s = self.library_compose_search_searchlist_single(genre, 'genre', s) s = self.library_compose_search_searchlist_single(album, 'album', s) s = self.library_compose_search_searchlist_single(artist, 'artist', s) s = self.library_compose_search_searchlist_single(year, 'date', s) return s def library_return_search_items(self, genre=None, artist=None, album=None, year=None): # Returns all mpd items, using mpd's 'search', along with # playtime and num_songs. searches = self.library_compose_search_searchlist(genre, artist, album, year) for s in searches: args_tuple = tuple(map(str, s)) playtime = 0 num_songs = 0 results = [] if '' in s and self.mpd.version <= (0, 13): # Can't search for empty tags, search broader and # filter instead: # Strip empty tag args from tuple: pos = list(args_tuple).index('') strip_type = list(args_tuple)[pos-1] new_lst = [] for i, item in enumerate(list(args_tuple)): if i != pos and i != pos-1: new_lst.append(item) args_tuple = tuple(new_lst) else: strip_type = None if len(args_tuple) == 0: return None, 0, 0 items = self.mpd.search(*args_tuple) if items is not None: for item in items: if strip_type is None or (strip_type is not None and not \ strip_type in item.keys()): match = True pos = 0 # Ensure that if, e.g., "foo" is searched, # "foobar" isn't returned too for arg in args_tuple[::2]: if arg in item and \ unicode(mpdh.get(item, arg)).upper() != \ unicode(args_tuple[pos + 1]).upper(): match = False break pos += 2 if match: results.append(item) num_songs += 1 playtime += mpdh.get(item, 'time', 0, True) return (results, int(playtime), num_songs) def add_display_info(self, num_songs, playtime): return "\n<small><span weight='light'>%s %s, %s %s</span></small>" \ % (num_songs, gettext.ngettext('song', 'songs', num_songs), playtime, gettext.ngettext('minute', 'minutes', playtime)) def library_retain_selection(self, prev_selection, prev_selection_root, prev_selection_parent): # Unselect everything: if len(self.librarydata) > 0: self.library_selection.unselect_range((0,), (len(self.librarydata) - 1,)) # Now attempt to retain the selection from before the update: for value in prev_selection: for row in self.librarydata: if value == row[1]: self.library_selection.select_path(row.path) break if prev_selection_root: self.library_selection.select_path((0,)) if prev_selection_parent: self.library_selection.select_path((1,)) def library_set_view(self, select_items=True): # select_items should be false if the same directory has merely # been refreshed (updated) try: if self.config.wd in self.libraryposition: self.library.scroll_to_point( -1, self.libraryposition[self.config.wd]) else: self.library.scroll_to_point(0, 0) except: self.library.scroll_to_point(0, 0) # Select and focus previously selected item if select_items: if self.config.wd in self.libraryselectedpath: try: if self.libraryselectedpath[self.config.wd]: self.library_selection.select_path( self.libraryselectedpath[self.config.wd]) self.library.grab_focus() except: pass def library_set_data(self, *args, **kwargs): return library_set_data(*args, **kwargs) def library_get_data(self, data, *args): return library_get_data(data, *args) def library_get_data_level(self, data): if self.config.lib_view == consts.VIEW_FILESYSTEM: # Returns the number of directories down: if library_get_data(data, 'path') == '/': # Every other path doesn't start with "/", so # start the level numbering at -1 return -1 else: return library_get_data(data, 'path').count("/") else: # Returns the number of items stored in data, excluding # the path: level = 0 album, artist, genre, year = library_get_data( data, 'album', 'artist', 'genre', 'year') for item in [album, artist, genre, year]: if item is not None: level += 1 return level def on_library_key_press(self, widget, event): if event.keyval == gtk.gdk.keyval_from_name('Return'): self.on_library_row_activated(widget, widget.get_cursor()[0]) return True def on_library_query_tooltip(self, widget, x, y, keyboard_mode, tooltip): if keyboard_mode or not self.search_visible(): widget.set_tooltip_text(None) return False bin_x, bin_y = widget.convert_widget_to_bin_window_coords(x, y) pathinfo = widget.get_path_at_pos(bin_x, bin_y) if not pathinfo: widget.set_tooltip_text(None) # If the user hovers over an empty row and then back to # a row with a search result, this will ensure the tooltip # shows up again: gobject.idle_add(self.library_search_tooltips_enable, widget, x, y, keyboard_mode, None) return False treepath, _col, _x2, _y2 = pathinfo i = self.librarydata.get_iter(treepath[0]) path = misc.escape_html(self.library_get_data( self.librarydata.get_value(i, 1), 'path')) song = self.librarydata.get_value(i, 2) new_tooltip = "<b>%s:</b> %s\n<b>%s:</b> %s" \ % (_("Song"), song, _("Path"), path) if new_tooltip != self.libsearch_last_tooltip: self.libsearch_last_tooltip = new_tooltip self.library.set_property('has-tooltip', False) gobject.idle_add(self.library_search_tooltips_enable, widget, x, y, keyboard_mode, tooltip) gobject.idle_add(widget.set_tooltip_markup, new_tooltip) return self.libsearch_last_tooltip = new_tooltip return False #api says we should return True, but this doesn't work? def library_search_tooltips_enable(self, widget, x, y, keyboard_mode, tooltip): self.library.set_property('has-tooltip', True) if tooltip is not None: self.on_library_query_tooltip(widget, x, y, keyboard_mode, tooltip) def on_library_row_activated(self, _widget, path, _column=0): if path is None: # Default to last item in selection: _model, selected = self.library_selection.get_selected_rows() if len(selected) >= 1: path = selected[0] else: return value = self.librarydata.get_value(self.librarydata.get_iter(path), 1) icon = self.librarydata.get_value(self.librarydata.get_iter(path), 0) if icon == self.sonatapb: # Song found, add item self.on_add_item(self.library) elif value == self.library_set_data(path=".."): self.library_browse_parent(None) else: self.library_browse(None, value) def library_get_parent(self): if self.config.lib_view == consts.VIEW_ALBUM: value = self.library_set_data(path="/") elif self.config.lib_view == consts.VIEW_ARTIST: album, artist = self.library_get_data(self.config.wd, 'album', 'artist') if album is not None: value = self.library_set_data(artist=artist) else: value = self.library_set_data(path="/") elif self.config.lib_view == consts.VIEW_GENRE: album, artist, genre = self.library_get_data( self.config.wd, 'album', 'artist', 'genre') if album is not None: value = self.library_set_data(genre=genre, artist=artist) elif artist is not None: value = self.library_set_data(genre=genre) else: value = self.library_set_data(path="/") else: newvalue = '/'.join( self.library_get_data(self.config.wd, 'path').split('/')[:-1])\ or '/' value = self.library_set_data(path=newvalue) return value def library_browse_parent(self, _action): if not self.search_visible(): if self.library.is_focus(): value = self.library_get_parent() self.library_browse(None, value) return True def not_parent_is_selected(self): # Returns True if something is selected and it's not # ".." or "/": model, rows = self.library_selection.get_selected_rows() for path in rows: i = model.get_iter(path) value = model.get_value(i, 2) if value != ".." and value != "/": return True return False def get_path_child_filenames(self, return_root, selected_only=True): # If return_root=True, return main directories whenever possible # instead of individual songs in order to reduce the number of # mpd calls we need to make. We won't want this behavior in some # instances, like when we want all end files for editing tags items = [] if selected_only: model, rows = self.library_selection.get_selected_rows() else: model = self.librarydata rows = [(i,) for i in range(len(model))] for path in rows: i = model.get_iter(path) pb = model.get_value(i, 0) data = model.get_value(i, 1) value = model.get_value(i, 2) if value != ".." and value != "/": album, artist, year, genre, path = self.library_get_data( data, 'album', 'artist', 'year', 'genre', 'path') if path is not None and album is None and artist is None and \ year is None and genre is None: if pb == self.sonatapb: # File items.append(path) else: # Directory if not return_root: items += self.library_get_path_files_recursive( path) else: items.append(path) else: results, _playtime, _num_songs = \ self.library_return_search_items( genre=genre, artist=artist, album=album, year=year) for item in results: items.append(mpdh.get(item, 'file')) # Make sure we don't have any EXACT duplicates: items = misc.remove_list_duplicates(items, case=True) return items def library_get_path_files_recursive(self, path): results = [] for item in self.mpd.lsinfo(path): if 'directory' in item: results = results + self.library_get_path_files_recursive( mpdh.get(item, 'directory')) elif 'file' in item: results.append(mpdh.get(item, 'file')) return results def on_library_search_combo_change(self, _combo=None): self.config.last_search_num = self.searchcombo.get_active() if not self.search_visible(): return self.prevlibtodo = "" self.prevlibtodo_base = "__" self.libsearchfilter_feed_loop(self.searchtext) def on_search_end(self, _button, move_focus=True): if self.search_visible(): self.libsearchfilter_toggle(move_focus) def search_visible(self): return self.searchbutton.get_property('visible') def libsearchfilter_toggle(self, move_focus): if not self.search_visible() and self.connected(): self.library.set_property('has-tooltip', True) ui.show(self.searchbutton) self.prevlibtodo = 'foo' self.prevlibtodo_base = "__" self.prevlibtodo_base_results = [] # extra thread for background search work, # synchronized with a condition and its internal mutex self.libfilterbox_cond = threading.Condition() self.libfilterbox_cmd_buf = self.searchtext.get_text() qsearch_thread = threading.Thread(target=self.libsearchfilter_loop) qsearch_thread.setDaemon(True) qsearch_thread.start() elif self.search_visible(): ui.hide(self.searchbutton) self.searchtext.handler_block(self.libfilter_changed_handler) self.searchtext.set_text("") self.searchtext.handler_unblock(self.libfilter_changed_handler) self.libsearchfilter_stop_loop() self.library_browse(root=self.config.wd) if move_focus: self.library.grab_focus() def libsearchfilter_feed_loop(self, editable): if not self.search_visible(): self.libsearchfilter_toggle(None) # Lets only trigger the searchfilter_loop if 200ms pass # without a change in gtk.Entry try: gobject.source_remove(self.libfilterbox_source) except: pass self.libfilterbox_source = gobject.timeout_add( 300, self.libsearchfilter_start_loop, editable) def libsearchfilter_start_loop(self, editable): self.libfilterbox_cond.acquire() self.libfilterbox_cmd_buf = editable.get_text() self.libfilterbox_cond.notifyAll() self.libfilterbox_cond.release() def libsearchfilter_stop_loop(self): self.libfilterbox_cond.acquire() self.libfilterbox_cmd_buf = '$$$QUIT###' self.libfilterbox_cond.notifyAll() self.libfilterbox_cond.release() def libsearchfilter_loop(self): while True: # copy the last command or pattern safely self.libfilterbox_cond.acquire() try: while(self.libfilterbox_cmd_buf == '$$$DONE###'): self.libfilterbox_cond.wait() todo = self.libfilterbox_cmd_buf self.libfilterbox_cond.release() except: todo = self.libfilterbox_cmd_buf searchby = self.search_terms_mpd[self.config.last_search_num] if self.prevlibtodo != todo: if todo == '$$$QUIT###': gobject.idle_add(self.filtering_entry_revert_color, self.searchtext) return elif len(todo) > 1: gobject.idle_add(self.libsearchfilter_do_search, searchby, todo) elif len(todo) == 0: gobject.idle_add(self.filtering_entry_revert_color, self.searchtext) self.libsearchfilter_toggle(False) else: gobject.idle_add(self.filtering_entry_revert_color, self.searchtext) self.libfilterbox_cond.acquire() self.libfilterbox_cmd_buf = '$$$DONE###' try: self.libfilterbox_cond.release() except: pass self.prevlibtodo = todo def libsearchfilter_do_search(self, searchby, todo): if not self.prevlibtodo_base in todo: # Do library search based on first two letters: self.prevlibtodo_base = todo[:2] self.prevlibtodo_base_results = self.mpd.search(searchby, self.prevlibtodo_base) subsearch = False else: subsearch = True # Now, use filtering similar to playlist filtering: # this make take some seconds... and we'll escape the search text # because we'll be searching for a match in items that are also escaped # # Note that the searching is not order specific. That is, "foo bar" # will match on "fools bar" and "barstool foo". todos = todo.split(" ") regexps = [] for i in range(len(todos)): todos[i] = misc.escape_html(todos[i]) todos[i] = re.escape(todos[i]) todos[i] = '.*' + todos[i].lower() regexps.append(re.compile(todos[i])) matches = [] if searchby != 'any': for row in self.prevlibtodo_base_results: is_match = True for regexp in regexps: if not regexp.match(unicode(mpdh.get(row, searchby)).lower()): is_match = False break if is_match: matches.append(row) else: for row in self.prevlibtodo_base_results: allstr = " ".join(mpdh.get(row, meta) for meta in row) is_match = True for regexp in regexps: if not regexp.match(unicode(allstr).lower()): is_match = False break if is_match: matches.append(row) if subsearch and len(matches) == len(self.librarydata): # nothing changed.. return self.library.freeze_child_notify() currlen = len(self.librarydata) bd = [[self.sonatapb, self.library_set_data(path=mpdh.get(item, 'file')), formatting.parse(self.config.libraryformat, item, True)] for item in matches if 'file' in item] bd.sort(locale.strcoll, key=operator.itemgetter(2)) for i, item in enumerate(bd): if i < currlen: j = self.librarydata.get_iter((i, )) for index in range(len(item)): if item[index] != self.librarydata.get_value(j, index): self.librarydata.set_value(j, index, item[index]) else: self.librarydata.append(item) # Remove excess items... newlen = len(bd) if newlen == 0: self.librarydata.clear() else: for i in range(currlen - newlen): j = self.librarydata.get_iter((currlen - 1 - i,)) self.librarydata.remove(j) self.library.thaw_child_notify() if len(matches) == 0: gobject.idle_add(self.filtering_entry_make_red, self.searchtext) else: gobject.idle_add(self.library.set_cursor, '0') gobject.idle_add(self.filtering_entry_revert_color, self.searchtext) def libsearchfilter_key_pressed(self, widget, event): self.filter_key_pressed(widget, event, self.library) def libsearchfilter_on_enter(self, _entry): self.on_library_row_activated(None, None) def libsearchfilter_set_focus(self): gobject.idle_add(self.searchtext.grab_focus) def libsearchfilter_get_style(self): return self.searchtext.get_style()
from django.shortcuts import render def about(request): return render(request, "about.html", {}) def location(request): return render(request, "location.html", {}) def failure(request): return render(request, "failure.html", {})
import json import urllib import urllib2 def shorten(url): gurl = 'http://goo.gl/api/url?url=%s' % urllib.quote(url) req = urllib2.Request(gurl, data='') req.add_header('User-Agent','toolbar') results = json.load(urllib2.urlopen(req)) return results['short_url']
import wx import os import wx.xrc import modules.baz.cDatabase as cDatabase import linecache class PubDialog ( wx.Dialog ): ## Konstruktor def __init__( self ): wx.Dialog.__init__ ( self, None, id = wx.ID_ANY, title = u"Zarządzanie Publikacjami", pos = wx.DefaultPosition, size = wx.Size( 450,430 ), style = wx.DEFAULT_DIALOG_STYLE ) self.session = cDatabase.connectDatabase() self.listType = [] self.getType() ico = wx.Icon('icon/pub.ico', wx.BITMAP_TYPE_ICO) self.SetIcon(ico) self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize ) bSizer1 = wx.BoxSizer( wx.VERTICAL ) bSizer28 = wx.BoxSizer( wx.VERTICAL ) bSizer21 = wx.BoxSizer( wx.VERTICAL ) self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"Dodawanie Publikacji", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTRE|wx.ST_NO_AUTORESIZE ) self.m_staticText1.Wrap( -1 ) bSizer21.Add( self.m_staticText1, 0, wx.EXPAND|wx.ALL, 5 ) bSizer28.Add( bSizer21, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 ) bSizer1.Add( bSizer28, 0, wx.EXPAND, 5 ) bSizer26 = wx.BoxSizer( wx.HORIZONTAL ) bSizer15 = wx.BoxSizer( wx.VERTICAL ) bSizer3 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"Tytuł:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText2.Wrap( -1 ) bSizer3.Add( self.m_staticText2, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer3.Add( self.m_textCtrl2, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer3, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 ) bSizer5 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"Autorzy:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText4.Wrap( -1 ) bSizer5.Add( self.m_staticText4, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl4 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer5.Add( self.m_textCtrl4, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer5, 0, wx.EXPAND, 5 ) bSizer4 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"Cytowania:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText3.Wrap( -1 ) bSizer4.Add( self.m_staticText3, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl3 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer4.Add( self.m_textCtrl3, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer4, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 ) bSizer6 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"Typ:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText5.Wrap( -1 ) bSizer6.Add( self.m_staticText5, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) m_choice1Choices = self.listType self.m_choice1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice1Choices, 0 ) self.m_choice1.SetSelection( 0 ) bSizer6.Add( self.m_choice1, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer6, 0, wx.EXPAND, 5 ) bSizer7 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText6 = wx.StaticText( self, wx.ID_ANY, u"Rok:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText6.Wrap( -1 ) bSizer7.Add( self.m_staticText6, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl5 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer7.Add( self.m_textCtrl5, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer7, 0, wx.EXPAND, 5 ) bSizer8 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText7 = wx.StaticText( self, wx.ID_ANY, u"DOI:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText7.Wrap( -1 ) bSizer8.Add( self.m_staticText7, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl6 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer8.Add( self.m_textCtrl6, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer8, 0, wx.EXPAND, 5 ) bSizer29 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText9 = wx.StaticText( self, wx.ID_ANY, u"Inny klucz:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText9.Wrap( -1 ) bSizer29.Add( self.m_staticText9, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_textCtrl7 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer29.Add( self.m_textCtrl7, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer29, 0, wx.EXPAND, 5 ) bSizer9 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText8 = wx.StaticText( self, wx.ID_ANY, u"Wydawca:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText8.Wrap( -1 ) bSizer9.Add( self.m_staticText8, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) m_choice2Choices = cDatabase.getJournalName(self.session) self.m_choice2 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice2Choices, 0 ) bSizer9.Add( self.m_choice2, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer9, 0, wx.EXPAND, 5 ) bSizer17 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText10 = wx.StaticText( self, wx.ID_ANY, u"Źródło:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText10.Wrap( -1 ) bSizer17.Add( self.m_staticText10, 1, wx.ALL, 5 ) self.m_textCtrl71 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) bSizer17.Add( self.m_textCtrl71, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer17, 1, wx.EXPAND, 5 ) bSizer18 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText99 = wx.StaticText( self, wx.ID_ANY, u"LMCP:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText99.Wrap( -1 ) bSizer18.Add( self.m_staticText99, 1, wx.ALL, 5 ) self.m_textCtrl99 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 ) self.m_textCtrl99.SetToolTipString( u"Ilość punktów na liście ministerialnej" ) bSizer18.Add( self.m_textCtrl99, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer18, 1, wx.EXPAND, 5 ) bSizer19 = wx.BoxSizer( wx.HORIZONTAL ) self.m_staticText98 = wx.StaticText( self, wx.ID_ANY, u"JCR:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText98.Wrap( -1 ) bSizer19.Add( self.m_staticText98, 1, wx.ALL, 5 ) m_choice3Choices = ['True', 'False'] self.m_choice3 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice3Choices, 0 ) bSizer19.Add( self.m_choice3, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer15.Add( bSizer19, 1, wx.EXPAND, 5 ) bSizer26.Add( bSizer15, 1, wx.EXPAND, 5 ) bSizer23 = wx.BoxSizer( wx.VERTICAL ) bSizer10 = wx.BoxSizer( wx.VERTICAL ) m_checkList3Choices = cDatabase.getUserName(self.session) self.m_checkList3 = wx.CheckListBox( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 200,281 ), m_checkList3Choices, 0 ) self.m_checkList3.SetToolTipString( u"Powiąż autorów z publikacją" ) bSizer10.Add( self.m_checkList3, 0, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 ) bSizer23.Add( bSizer10, 0, wx.EXPAND, 5 ) bSizer26.Add( bSizer23, 1, wx.EXPAND, 5 ) bSizer1.Add( bSizer26, 0, wx.EXPAND, 5 ) bSizer55 = wx.BoxSizer( wx.HORIZONTAL ) self.m_textCtrl55 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,50 ), wx.TE_MULTILINE ) self.m_textCtrl55.SetToolTipString( u"Notatki do publikacji" ) bSizer55.Add( self.m_textCtrl55, 1, wx.ALL|wx.EXPAND, 5 ) bSizer1.Add( bSizer55, 0, wx.EXPAND, 5 ) bSizer11 = wx.BoxSizer( wx.HORIZONTAL ) self.m_button1 = wx.Button( self, wx.ID_ANY, u"Dodaj", wx.DefaultPosition, wx.DefaultSize, 0 ) bSizer11.Add( self.m_button1, 0, wx.ALL|wx.EXPAND, 5 ) self.m_button3 = wx.Button( self, wx.ID_ANY, u"Zatwierdź", wx.DefaultPosition, wx.DefaultSize, 0 ) bSizer11.Add( self.m_button3, 0, wx.ALL, 5 ) self.m_button4 = wx.Button( self, wx.ID_ANY, u"Zamknij", wx.DefaultPosition, wx.DefaultSize, 0 ) bSizer11.Add( self.m_button4, 0, wx.ALL, 5 ) self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText11.Wrap( -1 ) bSizer11.Add( self.m_staticText11, 1, wx.ALL, 5 ) self.m_staticText12 = wx.StaticText( self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText12.Wrap( -1 ) bSizer11.Add( self.m_staticText12, 1, wx.ALL, 5 ) bSizer1.Add( bSizer11, 0, wx.ALIGN_RIGHT, 5 ) self.SetSizer( bSizer1 ) self.Layout() self.Centre( wx.BOTH ) self.m_button3.Hide() self.m_staticText11.Hide() self.m_staticText12.Hide() self.m_button1.Bind(wx.EVT_BUTTON, self.addPubValue) self.m_button4.Bind(wx.EVT_BUTTON, self.close) self.m_button3.Bind(wx.EVT_BUTTON, self.editPubValue) self.getType() ## Dokumentacja getType # @param self Wskaźnik obiektu # # @return void # Funkcja pobiera typy publikacji z pliku def getType(self): count = len(open('type.txt', 'rU').readlines()) for i in range(count): self.listType.append(linecache.getline('type.txt',i+1)) print self.listType ## Dokumentacja editPubValue # @param self Wskaźnik obiektu # @param event Wywołanie żadania # # @return void # Funkcja wysyla zadanie edycji wybranej publikacji def editPubValue(self, event): #Pobiera wartosci z kontrolek do edycji tmp = self.m_staticText1.GetLabel() tmp = tmp.split('. ', 1) t0 = tmp[1] t1 = self.m_textCtrl2.GetValue() t2 = self.m_textCtrl4.GetValue() t3 = self.m_textCtrl3.GetValue() t4 = self.m_choice1.GetStringSelection() t5 = self.m_textCtrl5.GetValue() t6 = self.m_textCtrl6.GetValue() t7 = self.m_textCtrl7.GetValue() t8 = self.m_choice2.GetStringSelection() t10 = self.m_textCtrl71.GetValue() t11 = self.m_textCtrl99.GetValue() #Lista ministerialna t12 = self.m_choice3.GetStringSelection() #czy jest w JCR t13 = self.m_textCtrl55.GetValue() #notatka #Odznacza już powiazanych autorów ch = cDatabase.editItemAuthor(self.session, t0) t9 = self.getCheckUser() #Pobiera wartosci ID dla zaznaczonych autorów tmp = cDatabase.getJournalNameID(self.session) print t8 if t8 != u'': t8 = tmp[t8] else: t8 = None t = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13) #Sprawdzenie czy obowiazkowe wartości nie sa puste if t1 != '' and t2 != '' and t3 != '' and t5 != '': cDatabase.editPubData(self.session, t, t0) wx.MessageBox(u'Zauktualizowano wartości!', u'Sukces', wx.OK | wx.ICON_INFORMATION) else: wx.MessageBox(u'Nie podana nazwy grupy \nlub nie wybrano autorów.', u'Bład', wx.OK | wx.ICON_INFORMATION) self.Destroy() ## Dokumentacja addPubValue # @param self Wskaźnik obiektu # @param event Wywołanie żadania # # @return void # Funkcja wysyla zadanie dodania nowej publikacji def addPubValue(self, event): #Pobiera wartosci z kontrolek do edycji tx1 = self.m_textCtrl2.GetValue() #tytul tx2 = self.m_textCtrl4.GetValue() #autor tx3 = self.m_textCtrl3.GetValue() #cytowania tx4 = self.m_choice1.GetStringSelection() #typ tx5 = self.m_textCtrl5.GetValue() #rok tx6 = self.m_textCtrl6.GetValue() #doi tx9 = self.m_textCtrl7.GetValue() #identy tx7 = self.m_choice2.GetStringSelection() #wydawca ID tx8 = self.getCheckUser() #autor id tx10 = self.m_textCtrl71.GetValue() #zrodlo tx11 = self.m_staticText11.GetLabel() #urlpub tx12 = self.m_staticText12.GetLabel() #urlcit tx13 = self.m_textCtrl99.GetValue() #Lista ministerialna tx14 = self.m_choice3.GetStringSelection() #jcr tx15 = self.m_textCtrl55.GetValue() #note #Pobiera wartosci ID dla zaznaczonych autorów tmp = cDatabase.getJournalNameID(self.session) if tx7 != u'': tx7 = tmp[tx7] else: tx7 = None t = (tx1, tx2, tx3, tx4, tx5, tx6, tx9, tx7, tx8, tx11, tx12, tx10, tx13, tx14, tx15) #Sprawdzenie czy obowiazkowe wartości nie sa puste if tx1 != '' and tx2 != '' and tx3 != '' and tx5 != '': cDatabase.addPubData(self.session, t) else: wx.MessageBox(u'Pola "Tytuł, Autor, Cytowania, Rok" sa wymagane!', u'Bład', wx.OK | wx.ICON_INFORMATION) self.Destroy() ## Dokumentacja getCheckUser # @param self Wskaźnik obiektu # # @return list Lista ID autorow powiazanych z publikacja # Funkcja pobiera id wszystkich powiazanych autorów do publikacji def getCheckUser(self): result = [] guser = cDatabase.getUserName(self.session) t = cDatabase.getUserNameID(self.session) for i in range(len(guser)): if self.m_checkList3.IsChecked(i): id = t[guser[i]] result.append(id) return result ## Dokumentacja close # @param self Wskaźnik obiektu # @param event Wywołanie żadania # # @return void # Funkcja zamyka okienko z zarzadzaniem publikacjami def close(self, event): """Zamyka okienko publikacji""" self.Destroy() if __name__ == "__main__": app = wx.App(False) controller = PubDialog() controller.Show() app.MainLoop()
__author__ = 'xiaoxiaol' import numpy as np import pylab as pl import scipy import pandas as pd import seaborn as sns import os import sys, getopt from scipy.cluster import hierarchy import platform from scipy.stats.stats import pearsonr import scipy.stats as stats from PIL import Image import glob from sklearn.metrics import silhouette_samples, silhouette_score import math from sklearn.cluster import AffinityPropagation from sklearn import metrics from itertools import cycle ZSCORE_OUTLIER_THRESHOLD = 5 sns.set_context("poster") def zscore(features, remove_outlier=0): zscores = scipy.stats.zscore(features, 0) # zscores = normalizeFeatures(features) return zscores def copySnapshots(df_in, snapshots_dir, output_dir): if not os.path.exists(output_dir): os.mkdir(output_dir) swc_files = df_in['swc_file_name'] if len(swc_files) > 0: for afile in swc_files: filename = snapshots_dir + '/' + afile.split('/')[-1] + '.BMP' if os.path.exists(filename): os.system("cp " + filename + " " + output_dir + "/\n") return def assemble_screenshots(input_dir, output_image_file_name, size): files = glob.glob(input_dir + "/*.BMP") assemble_image = Image.new("RGB", (size * len(files),size)) y = 0 for infile in files: im = Image.open(infile) im.thumbnail((size, size), Image.ANTIALIAS) assemble_image.paste(im, (y, 0)) y += size assemble_image.save(output_image_file_name) return def generateLinkerFileFromDF(df_in, output_ano_file, strip_path=False, swc_path=None): swc_files = df_in['swc_file_name'] if len(swc_files) > 0: with open(output_ano_file, 'w') as outf: for afile in swc_files: if swc_path is not None: filename = swc_path + '/'+afile else: filename = afile if strip_path: filename = afile.split('/')[-1] line = 'SWCFILE=' + filename + '\n' outf.write(line) outf.close() return return g def plot_confusion_matrix(cm, xlabel, ylabel, xnames, ynames, title='Confusion matrix', cmap=pl.cm.Blues): pl.grid(False) pl.imshow(cm, interpolation = 'none',cmap=cmap) pl.title(title) pl.colorbar() tick_marksx = np.arange(len(xnames)) tick_marksy = np.arange(len(ynames)) pl.xticks(tick_marksx, xnames) pl.yticks(tick_marksy, ynames) pl.tight_layout() pl.ylabel(ylabel) pl.xlabel(xlabel) def heatmap_plot_zscore_ivscc(df_zscore_features, df_all, output_dir, title=None): # Create a custom palette for dendrite_type colors dendrite_types = [np.nan, 'aspiny', 'sparsely spiny', 'spiny'] # dendrite_type_pal = sns.color_palette("coolwarm", len(dendrite_types)) dendrite_type_pal = sns.color_palette(["gray","black","purple","red"]) dendrite_type_lut = dict(zip(dendrite_types, dendrite_type_pal)) dendrite_type_colors = df_all['dendrite_type'].map(dendrite_type_lut) # Create a custom palette for creline colors cre_lines = np.unique(df_all['cre_line']) print cre_lines cre_lines = ['Pvalb-IRES-Cre','Sst-IRES-Cre','Gad2-IRES-Cre', 'Htr3a-Cre_NO152', 'Nr5a1-Cre', 'Ntsr1-Cre','Rbp4-Cre_KL100' ,'Rorb-IRES2-Cre-D', 'Scnn1a-Tg2-Cre', 'Scnn1a-Tg3-Cre','Slc17a6-IRES-Cre','Cux2-CreERT2'] cre_line_pal = sns.color_palette("BrBG", len(cre_lines)) cre_line_lut = dict(zip(cre_lines, cre_line_pal)) # map creline type to color cre_line_colors = df_all['cre_line'].map(cre_line_lut) # layers = np.unique(df_all['layer']) # layer_pal = sns.light_palette("green", len(layers)) # layer_lut = dict(zip(layers, layer_pal)) # layer_colors = df_all['layer'].map(layer_lut) # # only if types are available # types = np.unique(df_all['types']) # #reorder # types = ['NGC','multipolar','symm', 'bitufted','bipolar','tripod', 'Martinotti','cortico-cortical', 'cortico-thal','non-tufted', 'short-thick-tufted', 'tufted','thick-tufted'] # type_pal = sns.color_palette("coolwarm", len(types))# sns.diverging_palette(220, 20, n=len(types))# sns.color_palette("husl", len(types)) # type_lut = dict(zip(types, type_pal)) # type_colors = df_all['types'].map(type_lut) # Create a custom colormap for the heatmap values #cmap = sns.diverging_palette(240, 10, as_cmap=True) linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean') data = df_zscore_features.transpose() row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean') feature_order = hierarchy.leaves_list(row_linkage) #print data.index matchIndex = [data.index[x] for x in feature_order] #print matchIndex data = data.reindex(matchIndex) g = sns.clustermap(data, row_cluster = False, col_linkage=linkage, method='ward', metric='euclidean', linewidths = 0.0,col_colors = [cre_line_colors,dendrite_type_colors], cmap = sns.cubehelix_palette(light=1, as_cmap=True),figsize=(40,20)) #g.ax_heatmap.xaxis.set_xticklabels() pl.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90 ) pl.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0) pl.subplots_adjust(left=0.1, bottom=0.5, right=0.9, top=0.95) # !!!!! #pl.tight_layout( fig, h_pad=20.0, w_pad=20.0) if title: pl.title(title) location ="best" num_cols=1 # Legend for row and col colors for label in cre_lines: g.ax_row_dendrogram.bar(0, 0, color=cre_line_lut[label], label=label, linewidth=0.0) g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0) for i in range(3): g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0) g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0) # for label in layers: # pl.bar(0, 0, color=layer_lut[label], label=label, linewidth=1) # pl.legend(loc="left", ncol=2,borderpad=0.5) # # for label in types: # g.ax_row_dendrogram.bar(0, 0, color=type_lut[label], label=label,linewidth=0) # g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0.0) # # # g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0) # g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0) for label in dendrite_types: g.ax_row_dendrogram.bar(0, 0, color = dendrite_type_lut[label], label=label, linewidth=0) g.ax_row_dendrogram.legend(loc=location, ncol= num_cols, borderpad=0.0) filename = output_dir + '/zscore_feature_heatmap.png' pl.savefig(filename, dpi=300) #pl.show() print("save zscore matrix heatmap figure to :" + filename) pl.close() return linkage def heatmap_plot_zscore_bbp(df_zscore_features, df_all, output_dir, title=None): print "heatmap plot" metric ='m-type' mtypes = np.unique(df_all[metric]) print mtypes mtypes_pal = sns.color_palette("hls", len(mtypes)) mtypes_lut = dict(zip(mtypes, mtypes_pal)) # map creline type to color mtypes_colors = df_all[metric].map(mtypes_lut) layers = np.unique(df_all['layer']) layer_pal = sns.light_palette("green", len(layers)) layers_lut = dict(zip(layers, layer_pal)) layer_colors = df_all['layer'].map(layers_lut) # Create a custom colormap for the heatmap values #cmap = sns.diverging_palette(240, 10, as_cmap=True) linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean') data = df_zscore_features.transpose() row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean') feature_order = hierarchy.leaves_list(row_linkage) #print data.index matchIndex = [data.index[x] for x in feature_order] #print matchIndex data = data.reindex(matchIndex) g = sns.clustermap(data, row_cluster = False, col_linkage=linkage, method='ward', metric='euclidean', linewidths = 0.0,col_colors = [mtypes_colors,layer_colors], cmap = sns.cubehelix_palette(light=1, as_cmap=True),figsize=(40,20)) #g.ax_heatmap.xaxis.set_xticklabels() pl.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90 ) pl.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0) pl.subplots_adjust(left=0.1, bottom=0.5, right=0.9, top=0.95) # !!!!! #pl.tight_layout( fig, h_pad=20.0, w_pad=20.0) if title: pl.title(title) location ="best" num_cols=1 # Legend for row and col colors for label in mtypes: g.ax_row_dendrogram.bar(0, 0, color=mtypes_lut[label], label=label, linewidth=0.0) g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0) for i in range(3): g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0) g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0) for label in layers: g.ax_row_dendrogram.bar(0, 0, color=layers_lut[label], label=label, linewidth=0.0) g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0) filename = output_dir + '/zscore_feature_heatmap.png' pl.savefig(filename, dpi=300) #pl.show() print("save zscore matrix heatmap figure to :" + filename) pl.close() return linkage def remove_correlated_features(df_all, feature_names, coef_threshold=0.98): num_features = len(feature_names) removed_names = [] for i in range(num_features): if not feature_names[i] in removed_names: a = df_all[feature_names[i]].astype(float) for j in range(i + 1, num_features): if not feature_names[j] in removed_names: b = df_all[feature_names[j]].astype(float) corrcoef = pearsonr(a, b) if (corrcoef[0] > coef_threshold): removed_names.append(feature_names[j]) print("highly correlated:[" + feature_names[i] + ", " + feature_names[j] + " ]") subset_features_names = feature_names.tolist() for i in range(len(removed_names)): if removed_names[i] in subset_features_names: print ("remove " + removed_names[i]) subset_features_names.remove(removed_names[i]) return np.asarray(subset_features_names) def delta(ck, cl): values = np.ones([len(ck), len(cl)]) * 10000 for i in range(0, len(ck)): for j in range(0, len(cl)): values[i, j] = np.linalg.norm(ck[i] - cl[j]) return np.min(values) def big_delta(ci): values = np.zeros([len(ci), len(ci)]) for i in range(0, len(ci)): for j in range(0, len(ci)): values[i, j] = np.linalg.norm(ci[i] - ci[j]) return np.max(values) def dunn(k_list): """ Dunn index [CVI] Parameters ---------- k_list : list of np.arrays A list containing a numpy array for each cluster |c| = number of clusters c[K] is np.array([N, p]) (N : number of samples in cluster K, p : sample dimension) """ deltas = np.ones([len(k_list), len(k_list)]) * 1000000 big_deltas = np.zeros([len(k_list), 1]) l_range = range(0, len(k_list)) for k in l_range: for l in (l_range[0:k] + l_range[k + 1:]): deltas[k, l] = delta(k_list[k], k_list[l]) big_deltas[k] = big_delta(k_list[k]) di = np.min(deltas) / np.max(big_deltas) return di def cluster_specific_features(df_all, assign_ids, feature_names, output_csv_fn): #student t to get cluster specific features labels=[] clusters = np.unique(assign_ids) num_cluster = len(clusters) df_pvalues = pd.DataFrame(index = feature_names, columns = clusters) for cluster_id in clusters: ids_a = np.nonzero(assign_ids == cluster_id)[0] # starting from 0 ids_b = np.nonzero(assign_ids != cluster_id)[0] # starting from 0 labels.append("C"+str(cluster_id) + "("+ str(len(ids_a))+")" ) for feature in feature_names: a = df_all.iloc[ids_a][feature] b = df_all.iloc[ids_b][feature] t_stats,pval = stats.ttest_ind(a,b,equal_var=False) df_pvalues.loc[feature,cluster_id] = -np.log10(pval) df_pvalues.to_csv(output_csv_fn) ### visulaize df_pvalues.index.name = "Features" df_pvalues.columns.name ="Clusters" d=df_pvalues[df_pvalues.columns].astype(float) g = sns.heatmap(data=d,linewidths=0.1) # cmap =sns.color_palette("coolwarm",7, as_cmap=True)) g.set_xticklabels(labels) pl.yticks(rotation=0) pl.xticks(rotation=90) pl.subplots_adjust(left=0.5, right=0.9, top=0.9, bottom=0.1) pl.title('-log10(P value)') filename = output_csv_fn + '.png' pl.savefig(filename, dpi=300) #pl.show() pl.close() return df_pvalues def get_zscore_features(df_all, feature_names, out_file, REMOVE_OUTLIER=0, zscore_threshold=ZSCORE_OUTLIER_THRESHOLD): # if remove_outlier ==0 , just clip at threshold featureArray = df_all[feature_names].astype(float) featureArray.fillna(0,inplace=True) ### might introduce some bias normalized = zscore(featureArray) # normalized = featureArray # normalized[~np.isnan(featureArray)] = zscore(featureArray[~np.isnan(featureArray)]) num_outliers = np.count_nonzero(normalized < -zscore_threshold) + np.count_nonzero( normalized > zscore_threshold) print("Found %d |z score| > %f in zscore matrix :" % (num_outliers, zscore_threshold) ) df_all_modified = df_all df_outliers = pd.DataFrame() if num_outliers > 0: if not REMOVE_OUTLIER: # just clip normalized[normalized < -zscore_threshold] = -zscore_threshold normalized[normalized > zscore_threshold] = zscore_threshold # else: # outliers_l = np.nonzero(normalized < -zscore_threshold) # outliers_h = np.nonzero(normalized > zscore_threshold) # outlier_index = np.unique((np.append(outliers_l[0], outliers_h[0]))) # # # remove outlier rows # df_all_modified = df_all_modified.drop(df_all_modified.index[outlier_index]) # normalized = np.delete(normalized, outlier_index, 0) # # # re-zscoring and clipping # # m_featureArray = df_all_modified[feature_names].astype(float) # # normalized = zscore(m_featureArray) # # normalized[normalized < -zscore_threshold] = -zscore_threshold # # normalized[normalized > zscore_threshold] = zscore_threshold # # # print("Removed %d outlier neurons" % len(outlier_index)) # # df_outliers = df_all.iloc[outlier_index] df_z = pd.DataFrame(normalized) df_z.columns = feature_names df_z.index = df_all['swc_file_name'] if out_file: df_z.to_csv(out_file, index=True) print("save to " + out_file ) if (df_z.shape[0] != df_all_modified.shape[0]): print ("error: the sample size of the zscore and the original table does not match!") return df_z, df_all_modified, df_outliers def output_single_cluster_results(df_cluster, output_dir, output_prefix, snapshots_dir=None, swc_path = None): csv_file = output_dir + '/' + output_prefix + '.csv' df_cluster.to_csv(csv_file, index=False) ano_file = output_dir + '/' + output_prefix + '.ano' generateLinkerFileFromDF(df_cluster, ano_file, False, swc_path) # copy bmp vaa3d snapshots images over if (snapshots_dir): copySnapshots(df_cluster, snapshots_dir, output_dir + '/' + output_prefix) assemble_screenshots(output_dir + '/' + output_prefix, output_dir + '/' + output_prefix + '_assemble.png', 128) else: print "no bmp copying from:", snapshots_dir return def output_clusters(assign_ids, df_zscores, df_all, feature_names, output_dir, snapshots_dir=None): if not os.path.exists(output_dir): os.mkdir(output_dir) df_assign_id = pd.DataFrame() df_assign_id['specimen_name'] = df_all['specimen_name'] df_assign_id['cluster_id'] = assign_ids df_assign_id.to_csv(output_dir + "/cluster_id.csv", index=False) clusters = np.unique(assign_ids) num_cluster = len(clusters) cluster_list = [] # for dunn index calculation print("There are %d clusters in total" % num_cluster) df_cluster = pd.DataFrame() df_zscore_cluster = pd.DataFrame() for i in clusters: ids = np.nonzero(assign_ids == i)[0] # starting from 0 df_cluster = df_all.iloc[ids] print(" %d neurons in cluster %d" % (df_cluster.shape[0], i)) output_single_cluster_results(df_cluster, output_dir, "/cluster_" + str(i), snapshots_dir) df_zscore_cluster = df_zscores.iloc[ids] csv_file2 = output_dir + '/cluster_zscore_' + str(i) + '.csv' df_zscore_cluster.to_csv(csv_file2, index=False) cluster_list.append(df_zscore_cluster.values) ## pick the cluster specific feature and plot histogram cluster_specific_features(df_all, assign_ids, feature_names, output_dir+'/pvalues.csv') return cluster_list def ward_cluster(df_all, feature_names, max_cluster_num, output_dir, snapshots_dir= None, RemoveOutliers = 0, datasetType='ivscc'): print("\n\n\n *************** ward computation, max_cluster = %d *************:" % max_cluster_num) if not os.path.exists(output_dir): os.mkdir(output_dir) else: os.system("rm -r " + output_dir + '/*') #### similarity plots # df_simMatrix = distance_matrix(df_all, feature_names, output_dir + "/morph_features_similarity_matrix.csv", 1) # # visualize heatmap using ward on similarity matrix # out = heatmap_plot_distancematrix(df_simMatrix, df_all, output_dir, "Similarity") # linkage = out.dendrogram_row.calculated_linkage ##### zscores featuer plots df_zscores, df_all_outlier_removed, df_outliers = get_zscore_features(df_all, feature_names, output_dir + '/zscore.csv', RemoveOutliers) if (df_outliers.shape[0] > 0 ): output_single_cluster_results(df_outliers, output_dir, "outliers", snapshots_dir) if datasetType =='ivscc': linkage = heatmap_plot_zscore_ivscc(df_zscores, df_all_outlier_removed, output_dir, "feature zscores") if datasetType =='bbp': linkage = heatmap_plot_zscore_bbp(df_zscores, df_all_outlier_removed, output_dir, "feature zscores") assignments = hierarchy.fcluster(linkage, max_cluster_num, criterion="maxclust") #hierarchy.dendrogram(linkage) ## put assignments into ano files and csv files clusters_list = output_clusters(assignments, df_zscores, df_all_outlier_removed, feature_names, output_dir, snapshots_dir) dunn_index = dunn(clusters_list) print("dunn index is %f" % dunn_index) return linkage,df_zscores def silhouette_clusternumber(linkage,df_zscores,output_dir ="."): #Silhouette analysis for determining the number of clusters print("Silhouettee analysis:") scores=[] for n_clusters in range(2,30): assignments = hierarchy.fcluster(linkage, n_clusters, criterion="maxclust") silhouette_avg = silhouette_score(df_zscores, assignments) print("For n_clusters =", n_clusters,"The average silhouette_score is :", silhouette_avg) scores.append(silhouette_avg) # plot sihouettee and cut pl.figure() pl.plot(range(2,30),scores,"*-") pl.xlabel("cluster number") pl.ylabel("average sihouettee coefficient") pl.savefig(output_dir+'/sihouettee_clusternumber.pdf') #pl.show() pl.close() return def dunnindex_clusternumber(linkage,df_zscores, output_dir ="."): index_list=[] for n_clusters in range(2,30): assignments = hierarchy.fcluster(linkage, n_clusters, criterion="maxclust") df_assign_id = pd.DataFrame() df_assign_id['cluster_id'] = assignments clusters = np.unique(assignments) num_cluster = len(clusters) cluster_list = [] # for dunn index calculation df_cluster = pd.DataFrame() df_zscore_cluster = pd.DataFrame() for i in clusters: ids = np.nonzero(assignments == i)[0] # starting from 0 df_zscore_cluster = df_zscores.iloc[ids] cluster_list.append(df_zscore_cluster.values) dunn_index = dunn(cluster_list) index_list.append(dunn_index) pl.figure() pl.plot(range(2,30),index_list,"*-") pl.xlabel("cluster number") pl.ylabel("dunn index") pl.savefig(output_dir+'/dunnindex_clusternumber.pdf') #pl.show() return def affinity_propagation(df_all, feature_names, output_dir, snapshots_dir=None, RemoveOutliers=0): ###### Affinity Propogation ############## print("\n\n\n *************** affinity propogation computation ****************:") redundancy_removed_features_names = remove_correlated_features(df_all, feature_names, 0.95) print(" The %d features that are not closely correlated are %s" % ( len(redundancy_removed_features_names), redundancy_removed_features_names)) if not os.path.exists(output_dir): os.mkdir(output_dir) else: os.system("rm -r " + output_dir + '/*') # Compute Affinity Propagation df_zscores, df_all_outlier_removed, df_outliers = get_zscore_features(df_all, redundancy_removed_features_names, None, RemoveOutliers) if (df_outliers.shape[0] > 0 ): output_single_cluster_results(df_outliers, output_dir, "outliers", snapshots_dir) X = df_zscores.as_matrix() af = AffinityPropagation().fit(X) cluster_centers_indices = af.cluster_centers_indices_ labels = af.labels_ labels = labels + 1 # the default labels start from 0, to be consistent with ward, add 1 so that it starts from 1 clusters_list = output_clusters(labels, df_zscores, df_all_outlier_removed, redundancy_removed_features_names, output_dir, snapshots_dir) dunn_index = dunn(clusters_list) print("dunn index is %f" % dunn_index) return len(np.unique(labels)), dunn_index def run_ward_cluster(df_features, feature_names, num_clusters,output_dir,output_postfix): redundancy_removed_features_names = remove_correlated_features(df_features, feature_names, 0.95) print(" The %d features that are not closely correlated are %s" % ( len(redundancy_removed_features_names), redundancy_removed_features_names)) #num_clusters, dunn_index1 = affinity_propagation(merged, redundancy_removed_features_names, output_dir + '/ap' + postfix, swc_screenshot_folder, REMOVE_OUTLIERS) linkage, df_zscore = ward_cluster(df_features, redundancy_removed_features_names, num_clusters, output_dir + '/ward' + output_postfix) silhouette_clusternumber(linkage, df_zscore, output_dir + '/ward' + output_postfix) return redundancy_removed_features_names def main(): ###################################################################################################################### data_DIR = "/data/mat/xiaoxiaol/data/lims2/pw_aligned_1223" #default_all_feature_merged_file = data_DIR + '/keith_features_23dec.csv' #drop outliers, edit dendrite_type, creline #df_features = pd.read_csv(data_DIR +'/0107_new_features.csv') #df_features = df_features[df_features['QC status'] != "Outlier"] # #parse creline info from specimen_name #df_features.dropnas() # crelines=[] # swc_file_names=[] # for i in range(df_features.shape[0]): # sn=df_features['specimen_name'][i] # fn = df_features['specimen_name'][i].split('/')[-1] # cl=sn.split(';')[0] # crelines.append(cl) # swc_file_names.append(fn) # df_features['cre_line'] = pd.Series(crelines) # df_features['swc_file_name'] = pd.Series(swc_file_names) # df_features.to_csv(data_DIR+'/filtered_w_cre.csv') input_csv_file = data_DIR + '/0108/0108_features.csv' out_dir = data_DIR + '/0108/clustering_results/no_GMI' default_swc_screenshot_folder = data_DIR + "/figures/pw_aligned_bmps" ####################################################################################################################### swc_screenshot_folder = default_swc_screenshot_folder method = "all" SEL_FEATURE = "all" if not os.path.exists(out_dir): os.mkdir(out_dir) ######################################################## all_feature_file = input_csv_file ######################################################### meta_feature_names = np.array(['specimen_name','specimen_id','dendrite_type','cre_line','region_info','filename','swc_file_name']) basal_feature_names = np.array(['basal_average_bifurcation_angle_local','basal_average_bifurcation_angle_remote','basal_average_contraction','basal_average_fragmentation', 'basal_max_branch_order','basal_max_euclidean_distance','basal_max_path_distance', 'basal_nodes_over_branches','basal_number_of_bifurcations', 'basal_number_of_branches','basal_number_of_stems','basal_number_of_tips','basal_overall_depth','basal_overall_height', 'basal_overall_width','basal_total_length','bb_first_moment_x_basal','bb_first_moment_y_basal','bb_first_moment_z_basal', 'kg_soma_depth', 'basal_moment1','basal_moment10','basal_moment11','basal_moment12','basal_moment13','basal_moment2', 'basal_moment3','basal_moment4', 'basal_moment5','basal_moment6','basal_moment7','basal_moment8','basal_moment9']) #'basal_total_surface','basal_total_volume','basal_soma_surface','basal_number_of_nodes','basal_average_diameter', # 'basal_moment1','basal_moment10','basal_moment11','basal_moment12','basal_moment13','basal_moment14','basal_moment2','basal_moment3','basal_moment4', #'basal_moment5','basal_moment6','basal_moment7','basal_moment8','basal_moment9','basal_average_parent_daughter_ratio' apical_feature_names = np.array(['apical_average_bifurcation_angle_local','apical_average_bifurcation_angle_remote','apical_average_contraction', 'apical_average_fragmentation','apical_max_branch_order','apical_max_euclidean_distance', 'apical_max_path_distance', 'apical_nodes_over_branches','apical_number_of_bifurcations','apical_number_of_branches', 'apical_number_of_tips','apical_overall_depth','apical_overall_height','apical_overall_width','apical_total_length', 'kg_branch_mean_from_centroid_z_apical', 'kg_branch_stdev_from_centroid_z_apical', 'kg_centroid_over_farthest_branch_apical', 'kg_centroid_over_farthest_neurite_apical', 'kg_centroid_over_radial_dist_apical', 'kg_mean_over_centroid', 'kg_mean_over_farthest_branch_apical', 'kg_mean_over_farthest_neurite_apical', 'kg_mean_over_radial_dist_apical', 'kg_mean_over_stdev', 'kg_num_branches_over_radial_dist_apical', 'kg_num_outer_apical_branches', 'kg_outer_mean_from_center_z_apical', 'kg_outer_mean_over_stdev', 'kg_outer_stdev_from_center_z_apical', 'kg_peak_over_moment_z_apical', 'kg_radial_dist_over_moment_z_apical', 'kg_soma_depth']) #, 'apical_number_of_nodes' # ])#'apical_soma_surface', 'apical_total_surface','apical_total_volume','apical_average_diameter','apical_moment1','apical_moment10','apical_moment11','apical_moment12','apical_moment13','apical_moment14', # 'apical_moment2','apical_moment3','apical_moment4','apical_moment5','apical_moment6','apical_moment7','apical_moment8','apical_moment9','apical_average_parent_daughter_ratio','apical_number_of_stems?? always 1', bbp_feature_names = np.array(['bb_first_moment_apical','bb_first_moment_basal','bb_first_moment_dendrite','bb_first_moment_x_apical','bb_first_moment_x_basal', 'bb_first_moment_x_dendrite','bb_first_moment_y_apical','bb_first_moment_y_basal','bb_first_moment_y_dendrite','bb_first_moment_z_apical', 'bb_first_moment_z_basal','bb_first_moment_z_dendrite','bb_max_branch_order_apical','bb_max_branch_order_basal','bb_max_branch_order_dendrite', 'bb_max_path_length_apical','bb_max_path_length_basal','bb_max_path_length_dendrite','bb_max_radial_distance_apical','bb_max_radial_distance_basal', 'bb_max_radial_distance_dendrite','bb_mean_trunk_diameter_apical','bb_mean_trunk_diameter_basal','bb_mean_trunk_diameter_dendrite', 'bb_number_branches_apical','bb_number_branches_basal','bb_number_branches_dendrite','bb_number_neurites_apical','bb_number_neurites_basal', 'bb_number_neurites_dendrite','bb_second_moment_apical','bb_second_moment_basal','bb_second_moment_dendrite','bb_second_moment_x_apical', 'bb_second_moment_x_basal','bb_second_moment_x_dendrite','bb_second_moment_y_apical','bb_second_moment_y_basal','bb_second_moment_y_dendrite', 'bb_second_moment_z_apical','bb_second_moment_z_basal','bb_second_moment_z_dendrite','bb_total_length_apical','bb_total_length_basal', 'bb_total_length_dendrite']) #'bb_total_surface_area_apical','bb_total_volume_basal','bb_total_volume_apical','bb_total_volume_dendrite','bb_total_surface_area_basal','bb_total_surface_area_dendrite' #selected_features = ['max_euclidean_distance', 'num_stems', 'num_bifurcations', 'average_contraction', #'parent_daughter_ratio'] #tmp = np.append(meta_feature_names, basal_feature_names) all_dendritic_feature_names = np.append(basal_feature_names, apical_feature_names) #bbp_feature_names spiny_feature_names = apical_feature_names aspiny_feature_names = basal_feature_names df_features = pd.read_csv(all_feature_file) print df_features.columns df_features[all_dendritic_feature_names]= df_features[all_dendritic_feature_names].astype(float) print "There are %d neurons in this dataset" % df_features.shape[0] print "Dendrite types: ", np.unique(df_features['dendrite_type']) # df_features_all = df_features[np.append(meta_feature_names,all_dendritic_feature_names)] # df_features_all.to_csv(data_DIR+'/0108/all_dendrite_features.csv') df_groups = df_features.groupby(['dendrite_type']) df_spiny = df_groups.get_group('spiny') # df_w_spiny = df_spiny[np.append(meta_feature_names,spiny_feature_names)] # df_w_spiny.to_csv(data_DIR +'/0108/spiny_features.csv', index=False) df_aspiny = pd.concat([df_groups.get_group('aspiny'),df_groups.get_group('sparsely spiny')],axis=0) # df_w_aspiny = df_aspiny[np.append(meta_feature_names,aspiny_feature_names)] # df_w_aspiny.to_csv(data_DIR +'/0108/aspiny_features.csv', index=False) print "There are %d neurons are aspiny " % df_aspiny.shape[0] print "There are %d neurons are spiny\n\n" % df_spiny.shape[0] feature_names = all_dendritic_feature_names method = "ward" REMOVE_OUTLIERS = 0 postfix = "_" + SEL_FEATURE postfix += "_ol_clipped" #run_ward_cluster(df_features, feature_names, num_clusters,output_postfix): # num_clusters, dunn_index1 = affinity_propagation(df_aspiny, aspiny_feature_names, # out_dir + '/ap_aspiny' + postfix, # None, REMOVE_OUTLIERS) # print "spiny ap:" # print num_clusters # # num_clusters, dunn_index1 = affinity_propagation(df_spiny, spiny_feature_names, # out_dir + '/ap_spiny' + postfix, # None, REMOVE_OUTLIERS) # print "aspiny ap:" # print num_clusters # exit() redundancy_removed_features = run_ward_cluster(df_aspiny, aspiny_feature_names, num_clusters = 6 ,output_dir = out_dir,output_postfix= '_aspiny'+postfix) df_w_aspiny = df_aspiny[np.append(meta_feature_names,redundancy_removed_features)] df_w_aspiny.to_csv(data_DIR +'/0108/aspiny_selected_features.csv', index=False) # # # df_spiny.fillna(0,inplace=True) # redundancy_removed_features = run_ward_cluster(df_spiny, spiny_feature_names, num_clusters = 9 ,output_dir = out_dir, output_postfix='_spiny'+ postfix) # df_w_spiny = df_spiny[np.append(meta_feature_names,redundancy_removed_features)] # df_w_spiny.to_csv(data_DIR +'/0108/spiny_selected_features.csv', index=False) # if __name__ == "__main__": main()
from xboxdrv_parser import Controller from time import sleep import argparse import os import sys sys.path.append(os.path.abspath("../../..")) from util.communication.grapevine import Communicator from robosub_settings import settings def main (args): com = Communicator (args.module_name) controller = Controller (["X1", "Y1", "X2", "Y2", "R2", "L2"], ["right/left", "forward/backward", "yaw", "pitch", "up", "down"], (0, 255), (-1, 1)) while True: control_packet = controller.get_values () try: outgoing_packet = {"right/left": 0.0, "forward/backward": 0.0, "yaw": 0.0, "pitch": 0.0, "up/down": 0.0, "roll": 0.0} # Further parse controller values here # Controller's sticks Y axis are switched control_packet["forward/backward"] = -control_packet["forward/backward"] control_packet["pitch"] = -control_packet["pitch"] # Up and Down are not -1 to 1. Just 0 - 1 control_packet["up"] = controller.map_range(control_packet["up"], -1, 1, 0, 1) control_packet["down"] = controller.map_range(control_packet["down"], -1, 1, 0, -1) # Transferring to outgoing packet outgoing_packet["forward/backward"] = control_packet["forward/backward"] outgoing_packet["right/left"] = control_packet["right/left"] outgoing_packet["up/down"] = control_packet["up"] + control_packet["down"] outgoing_packet["yaw"] = control_packet["yaw"] outgoing_packet["pitch"] = control_packet["pitch"] #outgoing_packet["roll"] = control_packet["roll"] outgoing_packet["roll"] = 0.0 # Controller sticks are not centered very well. # TODO: Find a better way to do this (short of getting a new controller) for key in outgoing_packet.keys (): if abs (outgoing_packet[key]) < .10: outgoing_packet[key] = 0.0 print outgoing_packet Fuzzy_Sets = {"Fuzzy_Sets": outgoing_packet} com.publish_message (Fuzzy_Sets) except KeyError as i: pass sleep (args.epoch) def commandline(): parser = argparse.ArgumentParser(description='Mock module.') parser.add_argument('-e', '--epoch', type=float, default=0.1, help='Sleep time per cycle.') parser.add_argument('-m', '--module_name', type=str, default='movement/fuzzification', help='Module name.') return parser.parse_args() if __name__ == '__main__': args = commandline() main(args)
""" Integration test: permit call """ import os import sys myPath = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, myPath + '/../../') import logging import nose from nose.tools import * import inte_testutils from telewall.core.model import TelephoneNumber from telewall.core.util import sleep_until logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG) logging.getLogger('telewall').setLevel(logging.DEBUG) LOG = logging.getLogger(__name__) def test_Anruf_erlauben(): u = inte_testutils.TestUtil() u.unblock_callerid(TelephoneNumber('0790000001')) call = u.make_call_to_incoming(callerid='0790000001') LOG.info('call: %s', call) sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5) call.hangup() states = call.get_call_states() LOG.info('states: %s', states) assert_true('Ringing' in states, 'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.') call.stop() if __name__ == '__main__': nose.runmodule()
from __future__ import absolute_import import argparse import os import sys import yarrharr def main(argv=sys.argv[1:]): parser = argparse.ArgumentParser(description="Yarrharr feed reader") parser.add_argument("--version", action="version", version=yarrharr.__version__) parser.parse_args(argv) os.environ["DJANGO_SETTINGS_MODULE"] = "yarrharr.settings" from yarrharr.application import run run()
from datetime import date from itertools import starmap from hscommon.testutil import eq_ from ...model.amount import Amount from ...model.currency import USD from ...model.entry import Entry from ...model.transaction import Transaction from ...plugin.base_import_bind import ReferenceBind def create_entry(entry_date, description, reference): txn = Transaction(entry_date, description=description, amount=Amount(1, USD)) split = txn.splits[0] split.reference = reference return Entry(split, split.amount, 0, 0, 0) def test_typical_situation(): # Verify that ReferenceBind.match_entries() return expected entried in a typical situation # We only match entries with the same reference plugin = ReferenceBind() DATE = date(2017, 10, 10) existing_entries = list(starmap(create_entry, [ (DATE, 'e1', 'ref1'), (DATE, 'e2', 'ref2'), ])) imported_entries = list(starmap(create_entry, [ (DATE, 'i1', 'ref1'), (DATE, 'i2', 'ref3'), ])) matches = plugin.match_entries(None, None, None, existing_entries, imported_entries) EXPECTED = [('e1', 'i1', True, 0.99)] result = [(m.existing.description, m.imported.description, m.will_import, m.weight) for m in matches] eq_(result, EXPECTED) def test_reconciled_entry(): # Reconciled entries are matched, but with will_import = False plugin = ReferenceBind() DATE = date(2017, 10, 10) existing = create_entry(DATE, 'e1', 'ref1') existing.split.reconciliation_date = DATE imported = create_entry(DATE, 'i1', 'ref1') matches = plugin.match_entries(None, None, None, [existing], [imported]) EXPECTED = [('e1', 'i1', False, 0.99)] result = [(m.existing.description, m.imported.description, m.will_import, m.weight) for m in matches] eq_(result, EXPECTED) def test_match_first_only(): # If two entries have the same reference, we only get one match (we don't care which, it's not # really supposed to happen...). # Verify that ReferenceBind.match_entries() return expected entried in a typical situation # We only match entries with the same reference plugin = ReferenceBind() DATE = date(2017, 10, 10) existing_entries = list(starmap(create_entry, [ (DATE, 'e1', 'ref1'), ])) imported_entries = list(starmap(create_entry, [ (DATE, 'i1', 'ref1'), (DATE, 'i2', 'ref1'), ])) matches = plugin.match_entries(None, None, None, existing_entries, imported_entries) eq_(len(matches), 1)
from django.core.management.base import BaseCommand, CommandError from django.core import management from django.db.models import Count from scoping.models import * class Command(BaseCommand): help = 'check a query file - how many records' def add_arguments(self, parser): parser.add_argument('qid',type=int) def handle(self, *args, **options): qid = options['qid'] q = Query.objects.get(pk=qid) p = 'TY - ' if q.query_file.name is not '': fpath = q.query_file.path else: if q.database=="scopus": fname = 's_results.txt' else: fname = 'results.txt' fpath = f'{settings.QUERY_DIR}/{qid}/{fname}' with open(fpath, 'r') as f: c = f.read().count(p) print('\n{} documents in downloaded file\n'.format(c)) if q.doc_set.count() > 0: yts = q.doc_set.values('PY').annotate( n = Count('pk') ) for y in yts: print('{} documents in {}'.format(y['n'],y['PY']))
""" IfExp astroid node An if statement written in an expression form. Attributes: - test (Node) - Holds a single node such as Compare. - Body (List[Node]) - A list of nodes that will execute if the condition passes. - orelse (List[Node]) - The else clause. Example: - test -> True - Body -> [x = 1] - orelse -> [0] """ x = 1 if True else 0
import pilas archi = open('datos.txt', 'r') nivel = archi.readline() pantalla = archi.readline() idioma = archi.readline() archi.close() if idioma == "ES": from modulos.ES import * else: from modulos.EN import * class EscenaMenu(pilas.escena.Base): "Es la escena de presentación donde se elijen las opciones del juego." def __init__(self, musica=False): pilas.escena.Base.__init__(self) self.musica = musica def iniciar(self): pilas.fondos.Fondo("data/guarida.jpg") pilas.avisar(menu_aviso) self.crear_el_menu_principal() pilas.mundo.agregar_tarea(0.1, self.act) self.sonido = pilas.sonidos.cargar("data/menu.ogg") self.sonido.reproducir(repetir=True) def crear_el_menu_principal(self): opciones = [ (menu1, self.comenzar_a_jugar), (menu2, self.mostrar_ayuda_del_juego), (menu3, self.mostrar_historia), (menu4, self.mostrar_opciones), (menu5, self.salir_del_juego) ] self.trans = pilas.actores.Actor("data/trans.png") self.trans.x = -155 self.trans.arriba = 85 self.menu = pilas.actores.Menu(opciones, x=-150, y=70, color_normal= pilas.colores.negro, color_resaltado=pilas.colores.rojo) self.menu.x = -150 def act(self): if self.menu.x == -500: if self.donde == "jugar": self.sonido.detener() import escena_niveles pilas.cambiar_escena(escena_niveles.EscenaNiveles()) return False elif self.donde == "historia": self.sonido.detener() import escena_historia pilas.cambiar_escena(escena_historia.Historia()) elif self.donde == "ayuda": self.sonido.detener() import escena_ayuda pilas.cambiar_escena(escena_ayuda.Ayuda()) elif self.donde == "opciones": self.sonido.detener() import escena_opciones pilas.cambiar_escena(escena_opciones.Opciones()) return True def mostrar_historia(self): self.menu.x = [-500] self.trans.x = [-500] self.donde = "historia" def mostrar_opciones(self): self.menu.x = [-500] self.trans.x = [-500] self.donde = "opciones" def comenzar_a_jugar(self): self.menu.x = [-500] self.trans.x = [-500] self.donde = "jugar" def mostrar_ayuda_del_juego(self): self.menu.x = [-500] self.trans.x = [-500] self.donde = "ayuda" def salir_del_juego(self): pilas.terminar()