text
stringlengths
0
1.05M
meta
dict
import matplotlib as mpl mpl.use('agg') import neurokernel.LPU.utils.visualizer as vis import networkx as nx # Temporary fix for bug in networkx 1.8: nx.readwrite.gexf.GEXF.convert_bool = {'false':False, 'False':False, 'true':True, 'True':True} #starts up the visualizer code V = vis.visualizer() #takes in the input file as defined in data, and plots it V.add_LPU('./data/simple_input.h5', LPU='Input') V.add_plot({'type': 'waveform', 'ids': [[0]]}, 'input_Input') #takes in the spike data/potential from the neuron output and plots it V.add_LPU('simple_output_spike.h5', './data/simple_lpu.gexf.gz', 'Simple LPU (Spikes)') #the [0,1] under ids should print both hh and leaky V.add_plot({'type':'raster', 'ids': {0: [0]}, 'yticks': [0], 'yticklabels': [0]}, 'Simple LPU (Spikes)','Output') V.add_LPU('simple_output_gpot.h5', './data/simple_lpu.gexf.gz', 'Simple LPU (Graded Potential)') V.add_plot({'type': 'waveform', 'ids': {0:[0]}}, 'Simple LPU (Graded Potential)', 'Output') V.add_plot({'type': 'waveform', 'ids': {0:[1]}}, 'Simple LPU (Graded Potential)', 'Output') V.add_plot({'type': 'waveform', 'ids': {0:[2]}}, 'Simple LPU (Graded Potential)', 'Output') #vars for plots #how often it updates V._update_interval = 50 #rows and colums to plot in terms of size V.rows = 5 V.cols = 1 #self explantory V.fontsize = 10 #V.out_filename = 'simple_output.avi' #V.codec = 'libtheora' #time step V.dt = 0.0001 #Changes the sizes on the axis V.xlim = [0, 1.0] V.ylim = [-70.0, 10.0] #figure size V.figsize = (16, 9) V.title = "Simple LPU Testing RK4 Models" #runs the visualizer V.run('simple_output.png', 120)
{ "repo_name": "cerrno/neurokernel", "path": "examples/testLPU/visualize_testLPU.py", "copies": "1", "size": "1818", "license": "bsd-3-clause", "hash": -5919871173636297000, "line_mean": 24.9714285714, "line_max": 71, "alpha_frac": 0.6287128713, "autogenerated": false, "ratio": 2.7883435582822087, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8765123594297494, "avg_score": 0.030386567056942926, "num_lines": 70 }
import re from .tlds import tlds __all__ = ['AbsUrl', 'RelUrl', 'UrlException', 'UrlParseException'] class UrlException(Exception): pass class UrlParseException(Exception): pass class AbsUrl(): regex = re.compile( "^([a-zA-Z][a-zA-Z0-9+-\.]+)://" + #scheme "(?:(?:" + "(?:(" + "(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,64}\.)*" + #subdomain "(?:[a-zA-Z0-9][a-zA-Z0-9-]{0,63})" + #hostname "(?:\.[a-zA-Z]{2,3}(?:\.[a-zA-Z]{2,3})?)" + #tld ")|(" + "(?:[a-zA-Z][a-zA-Z0-9-]{0,63})" + #local hostname "))" + ")|(" + "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}" + #ip address "))" + "[/\?]?.*$") #rest of the url def __init__(self, url): self._url = url self._tld = None self._hostname = None self._subdomain = None self._scheme = None self._valid = None self.parse() def parse(self): match = self.regex.match(self._url) if match is None: self._valid = False return self._valid = True self._scheme = match.group(1) #import pdb; pdb.set_trace() if match.group(2) is not None: parts = match.group(2).split('.') if len(parts[-2]) <= 3 and parts[-2] in tlds: #length check is a dirty fix for tlds which are also self._tld = parts[-2] + '.' + parts[-1] #valid domain names del parts[-1] del parts[-1] else: self._tld = parts[-1] del parts[-1] if len(parts) == 0: raise UrlParseException('%s does not have a valid domain name'%self._url) self._hostname = parts[-1] del parts[-1] if len(parts) > 0: self._subdomain = '.'.join(parts) elif match.group(3) is not None: #local hostname self._hostname = match.group(3) elif match.group(4) is not None: #ip address self._hostname = match.group(4) def get_hostname(self): return self._hostname def get_tld(self): return self._tld def get_domain(self): return self._hostname + (('.' + self._tld) if self._tld is not None else '') def get_subdomain(self): return self._subdomain def get_scheme(self): return self._scheme def is_valid(self): return self._valid def extend(self, url): if not self._valid: raise UrlException('base url invalid') rel_url = RelUrl(url) if rel_url.protocol_relative: extended_url = self._scheme + '://' + rel_url.url return extended_url search_start_pos = len(self._scheme) + 3 end_pos = None if rel_url.root_relative: end_pos = self._url.find('/', search_start_pos) else: end_pos = self._url.rfind('/', search_start_pos) if end_pos < 0: end_pos = self._url.find('?', search_start_pos) if end_pos < 0: end_pos = len(self._url) if rel_url.root_relative: extended_url = self._url[0:end_pos] + '/' + rel_url.url return extended_url base_url = self._url[0:end_pos] base_parts = base_url.split('/') if rel_url.up > (len(base_parts) - 3): raise UrlException('invalid number of levels up') base_parts = base_parts[0:len(base_parts)-rel_url.up] extended_url = '/'.join(base_parts) + '/' + rel_url.url return extended_url hostname = property(get_hostname) tld = property(get_tld) domain = property(get_domain) subdomain = property(get_subdomain) scheme = property(get_scheme) valid = property(is_valid) class RelUrl(): def __init__(self, url): self._url = url self._up = 0 self._root_rel = False self._proto_rel = False self.parse() def parse(self): if self._url.startswith('//'): self._proto_rel = True self._url = self._url[2:] return if self._url.startswith('/'): #url relative to root self._root_rel = True self._url = self._url[1:] return while self._url.startswith('../'): self._up += 1 self._url = self._url[3:] while self._url.startswith('./'): self._url = self._url[2:] if self._url.startswith('/'): #clean any extraneous front slashes self._url = self._url[1:] def get_url(self): return self._url def get_up(self): return self._up def get_root_rel(self): return self._root_rel def get_proto_rel(self): return self._proto_rel url = property(get_url) up = property(get_up) root_relative = property(get_root_rel) protocol_relative = property(get_proto_rel)
{ "repo_name": "amol9/mutils", "path": "redlib/net/urls.py", "copies": "2", "size": "4233", "license": "mit", "hash": -6798766798204427000, "line_mean": 19.6487804878, "line_max": 102, "alpha_frac": 0.5979210961, "autogenerated": false, "ratio": 2.7013401403956605, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8977689734788102, "avg_score": 0.06431430034151167, "num_lines": 205 }
__author__ = 'amryf' #!/usr/bin/env python # -*- coding: utf-8 -*- from ctypes import * import time from .ic_grabber_dll import IC_GrabberDLL from .ic_exception import IC_Exception from .ic_property import IC_Property from . import ic_structures as structs from IPython import embed GrabberHandlePtr = POINTER(structs.GrabberHandle) # "typedefs" IMG_FILETYPE = ['FILETYPE_BMP', 'FILETYPE_JPG'] COLOR_FORMAT = ['Y800', 'RGB24', 'RGB32', 'UYVY', 'Y16', 'NONE'] # c function type for frame callback # outside of class so it can be called by unbound function C_FRAME_READY_CALLBACK = CFUNCTYPE(None, GrabberHandlePtr, POINTER(c_ubyte), c_ulong, c_void_p) class IC_Camera(object): @property def callback_registered(self): return self._callback_registered def __init__(self, setting_file=None, show_dialog=False): self._handle = IC_GrabberDLL.create_grabber() if show_dialog: self._handle = IC_GrabberDLL.device_selection_dialog(self._handle) if setting_file: self._handle = IC_GrabberDLL.load_device_state(self._handle, c_char_p(setting_file.encode())) if not self._handle: raise IC_Exception(-1) self._callback_registered = False self._frame = {'num' : -1, 'ready' : False} def __getattr__(self, attr): if attr in IC_Property.get_all_property_names(): return IC_Property(self._handle, attr) else: raise AttributeError # not needed if we use props directly #def __setattr__(self, attr, val): # # if attr.startswith('_'): # super(IC_Camera, self).__setattr__(attr, val) # # # if it's an actual device property # elif attr in self.get_all_property_names(): # IC_Property(self._handle, attr).value = val # # # otherwise just set the attribute value as normal # else: # super(IC_Camera, self).__setattr__(attr, val) def save_device_state(self, filename): """ Save camera setting to filename :param filename string -- file name of the setting """ err = IC_GrabberDLL.save_device_state(self._handle, c_char_p(filename.encode())) if err != 1: raise Exception(err) def open(self): """ Open the camera device, required for most functions. """ err = IC_GrabberDLL.open_device_by_unique_name(self._handle, self._unique_device_name) if err != 1: raise IC_Exception(err) def close(self): """ Close the camera device. """ IC_GrabberDLL.close_device(self._handle) ## don't use, returns wrong number..? #def get_serial_number(self): # # #serial = create_string_buffer(20) # serial = (c_char * 20)() # # IC_GrabberDLL.get_serial_number(self._handle, # serial) # # return serial.value def is_open(self): """ Check if the camera device is currently open. :returns: boolean -- True if camera is open. """ return bool(IC_GrabberDLL.is_dev_valid(self._handle)) def show_property_dialog(self): """ Show property dialog for device. """ err = IC_GrabberDLL.show_property_dialog(self._handle) if err != 1: raise IC_Exception(err) def list_property_names(self): return IC_Property.get_all_property_names() # use props instead, e.g. cam.gain.range #def get_property_range(self, property_name): # return IC_Property(self._handle, property_name).range # #def is_property_available(self, property_name): # return IC_Property(self._handle, property_name).is_available # #def is_property_auto_available(self, property_name): # return IC_Property(self._handle, property_name).is_auto_available # #def get_property_type(self, property_name): # return IC_Property(self._handle, property_name).type def reset_properties(self): """ Resets all properties to their default values. If a property has automation, the automatic will be enabled. If the device supports external trigger, the external trigger will be disabled. """ return IC_GrabberDLL.reset_properties(self._handle) def list_video_formats(self): """ :returns: list -- available video formats. """ vf_list = ((c_char * 80) * 40)() num_vfs = IC_GrabberDLL.list_video_formats(self._handle, byref(vf_list), c_int(80)) if num_vfs < 0: raise IC_Exception(num_vfs) return_list = [] for vf in vf_list: if vf.value: return_list.append(vf.value) return return_list def get_video_norm_count(self): """ Get the number of the available video norm formats for the current device. A video capture device must have been opened before this call. :returns: int -- number of available video norms. """ vn_count = IC_GrabberDLL.get_video_norm_count(self._handle) if vn_count < 0: raise IC_Exception(vn_count) return vn_count def get_video_norm(self, norm_index): """ Get a string representation of the video norm specified by norm_index. norm_index must be between 0 and get_video_norm_count(). :returns: string -- name of video norm of specified index. """ # DLL says need to call this first for it to work num_vns = self.get_video_norm_count() if norm_index >= num_vns: raise IC_Exception(-102) vn = IC_GrabberDLL.get_video_norm(self._handle, c_int(norm_index)) if vn is None: raise IC_Exception(-104) return vn def get_video_format_count(self): """ Get the number of the available video formats for the current device. A video capture device must have been opened before this call. :returns: int -- number of available video formats. """ vf_count = IC_GrabberDLL.get_video_format_count(self._handle) if vf_count < 0: raise IC_Exception(vf_count) return vf_count def get_video_format(self, format_index): """ Get a string representation of the video format specified by format_index. format_index must be between 0 and get_video_format_count(). """ # DLL says need to call this first for it to work num_vfs = self.get_video_format_count() if format_index >= num_vfs: raise IC_Exception(-103) vf = IC_GrabberDLL.get_video_format(self._handle, c_int(format_index)) if vf is None: raise IC_Exception(-105) return vf def set_video_format(self, video_format): """ Set a video format for the device. Must be supported. :param video_format: string -- video format to use. """ err = IC_GrabberDLL.set_video_format(self._handle, c_char_p(video_format.encode())) if err != 1: raise IC_Exception(err) def set_video_norm(self, video_norm): """ Sets video norm format, whatver that means. :param video_norm: string -- video norm to use. """ err = IC_GrabberDLL.set_video_norm(self._handle, c_char_p(video_norm.encode())) if err != 1: raise IC_Exception(err) def get_video_format_width(self): """ """ return IC_GrabberDLL.get_video_format_width(self._handle) def get_video_format_height(self): """ """ return IC_GrabberDLL.get_video_format_height(self._handle) def get_format(self): """ """ return IC_GrabberDLL.get_format(self._handle) def set_format(self, color_format): """ """ err = IC_GrabberDLL.set_format(self._handle, c_int(color_format)) if err != 1: raise IC_Exception(err) def remove_overlay(self, enable_overlay=False): """ """ err = IC_GrabberDLL.remove_overlay(self._handle, c_int(enable_overlay)) if err != None: raise IC_Exception(err) def is_triggerable(self): """ """ return bool(IC_GrabberDLL.is_trigger_available(self._handle)) def get_frame_rate(self): """ """ return IC_GrabberDLL.get_frame_rate(self._handle) def set_frame_rate(self, frame_rate): """ """ err = IC_GrabberDLL.set_frame_rate(self._handle, c_float(frame_rate)) if err != 1: raise IC_Exception(err) def enable_trigger(self, enable): """ Enable or disable camera triggering. :param enable: boolean -- True to enable the trigger, False to disable. """ err = IC_GrabberDLL.enable_trigger(self._handle, c_int(int(enable))) if err != 1: #raise IC_Exception(err) pass # todo, always raises false error for some reason...? def enable_continuous_mode(self, enable): """ Enable or disable continuous mode. :param enable: boolean -- True to enable continuous mode, False to disable. """ actual = not enable #print actual, enable, c_int(int(actual)) err = IC_GrabberDLL.set_continuous_mode(self._handle, c_int(int(actual))) if err != 1: #raise IC_Exception(err) pass # todo, always raises false error for some reason...? def send_trigger(self): """ Send a software trigger to fire the device when in triggered mode. """ err = IC_GrabberDLL.software_trigger(self._handle) if err != 1: raise IC_Exception(err) def prepare_live(self, show_display=False): """ Prepare the device for live video. """ err = IC_GrabberDLL.prepare_live(self._handle, c_int(int(show_display))) if err != 1: raise IC_Exception(err) def start_live(self, show_display=False): """ Start the live video. """ err = IC_GrabberDLL.start_live(self._handle, c_int(int(show_display))) if err != 1: raise IC_Exception(-1) def suspend_live(self): """ Suspend the live video and put into a prepared state. """ err = IC_GrabberDLL.suspend_live(self._handle) if err != 1: raise IC_Exception(err) def stop_live(self): """ Stop the live video. """ IC_GrabberDLL.stop_live(self._handle) def get_image_description(self): """ Get image info. :returns: tuple -- (image width, image height, image depth, color format). """ img_width = c_long() img_height = c_long() img_depth = c_int() color_format = c_int() err = IC_GrabberDLL.get_image_description(self._handle, byref(img_width), byref(img_height), byref(img_depth), byref(color_format), ) return (img_width.value, img_height.value, img_depth.value, color_format.value) def snap_image(self, timeout=1000): """ Snap an image. Device must be set to live mode and a format must be set. :param timeout: int -- time out in milliseconds. """ err = IC_GrabberDLL.snap_image(self._handle, c_int(timeout)) if err != 1: raise IC_Exception(err) def get_image_ptr(self): """ Get image buffer from camera. :returns: ctypes pointer -- pointer to image data. """ img_ptr = IC_GrabberDLL.get_image_ptr(self._handle) if img_ptr is None: raise IC_Exception(-1) #img_data = cast(img_ptr, POINTER(c_ubyte * buffer_size)) ####array = (c_ubyte * iheight * iwidth * 3).from_address(addressof(data.contents)) #array = img_data.contents return img_ptr def get_image_data(self): """ Get image data. :returns: ctypes.c_ubyte array -- the image data. """ image_size = self.get_image_description()[:3] img_width = image_size[0] img_height = image_size[1] img_depth = image_size[2] / 8 buffer_size = img_width * img_height * img_depth * sizeof(c_uint8) img_ptr = self.get_image_ptr() data = cast(img_ptr, POINTER(c_ubyte * buffer_size)) return (data.contents, img_width, img_height, img_depth) #img = np.ndarray(buffer = data.contents, # dtype = np.uint8, # shape = (img_height, # img_width, # img_depth)) #return img def save_image(self, filename, filetype=1, jpeq_quality=75): """ Save the contents of the last snapped image into a file. :param filename: string -- filename to name saved file. :param filetype: int -- 0 = BMP, 1 = JPEG. :param jpeq_quality: int -- JPEG file quality, 0-100. """ err = IC_GrabberDLL.save_image(self._handle, c_char_p(filename.encode()), c_int(filetype), c_long(jpeq_quality)) if err != 1: raise IC_Exception(err) # generate callback function so it is not a bound method # (cb_func cannot have the self parameter) def _get_callback_func(self): def cb_func(handle_ptr, p_data, frame_num, data): self._frame['ready'] = True self._frame['num'] = frame_num return C_FRAME_READY_CALLBACK(cb_func) def register_frame_ready_callback(self): """ Register the frame ready callback with the device. """ # keep ref to prevent garbage collection self._rfrc_func = self._get_callback_func() # register callback function with DLL # instead of passing pointer to a variable (3rd param) we will set the flag ourselves IC_GrabberDLL.set_frame_ready_callback(self._handle, self._rfrc_func, None) self._callback_registered = True def reset_frame_ready(self): """ Reset the frame ready flag to False, generally so that wait_til_frame_ready() can be called again. """ self._frame['ready'] = False self._frame['num'] = -1 def wait_til_frame_ready(self, timeout=0): """ Wait until the devices announces a frame as being ready. Requires register_frame_ready_callback() being called. :param timeout: int -- timeout in milliseconds. Set to 0 for no timeout. :returns: int -- frame number that was announced as ready. """ if timeout: start = time.clock() elapsed = (time.clock() - start) * 1000 while not self._frame['ready'] and elapsed < timeout: time.sleep(0.001) elapsed = (time.clock() - start) * 1000 else: while not self._frame['ready']: time.sleep(0.001) return self._frame['num']
{ "repo_name": "amryfitra/icpy3", "path": "icpy3/ic_camera.py", "copies": "1", "size": "15862", "license": "mit", "hash": -5741563388565190000, "line_mean": 31.7051546392, "line_max": 105, "alpha_frac": 0.5535871895, "autogenerated": false, "ratio": 3.8668941979522184, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9901023823851258, "avg_score": 0.0038915127201921626, "num_lines": 485 }
__author__ = 'amryf' #!/usr/bin/env python # -*- coding: utf-8 -*- from .ic_grabber_dll import IC_GrabberDLL from .ic_camera import IC_Camera from .ic_exception import IC_Exception from IPython import embed class IC_ImagingControl(object): def init_library(self): """ Initialise the IC Imaging Control library. """ # remember device objects by unique name self._devices = [] # no license key needed anymore err = IC_GrabberDLL.init_library(None) if err != 1: raise IC_Exception(err) def get_device_by_dialog(self): device = IC_Camera(show_dialog=True) self._devices.append(device) return device def get_device_by_file(self, filename): device = IC_Camera(setting_file=filename) self._devices.append(device) return device def close_library(self): """ Close the IC Imaging Control library, and close and release all references to camera devices. """ # release handle grabber objects of cameras as they won't be needed again. # try to close & delete each known device, but only if we own the reference to it! for device in self._devices: IC_GrabberDLL.release_grabber(device._handle) self._devices = None # close lib IC_GrabberDLL.close_library()
{ "repo_name": "amryfitra/icpy3", "path": "icpy3/ic_imaging_control.py", "copies": "1", "size": "1381", "license": "mit", "hash": 8231136200852709000, "line_mean": 28.3829787234, "line_max": 101, "alpha_frac": 0.6249094859, "autogenerated": false, "ratio": 3.8254847645429364, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9937125342640345, "avg_score": 0.002653781560518363, "num_lines": 47 }
__author__ = 'amryf' #!/usr/bin/env python # -*- coding: utf-8 -*- from ctypes import * from .ic_grabber_dll import IC_GrabberDLL from .ic_exception import IC_Exception class IC_Property(object): @property def available(self): """ """ # returns boolean value iav = self._avail_funcs[self._prop_type](self._handle, c_int(self._prop_index)) return bool(iav) @property def auto_available(self): """ """ # returns boolean value iaa = self._auto_avail_funcs[self._prop_type](self._handle, c_int(self._prop_index)) return bool(iaa) @property def range(self): """ Get valid range of values for the property. :returns: tuple -- (range min, range max). """ rmin = c_long() rmax = c_long() err = self._range_funcs[self._prop_type](self._handle, c_int(self._prop_index), byref(rmin), byref(rmax)) if err != 1: raise IC_Exception(err) else: return (rmin.value, rmax.value) @property def min(self): """ """ return self.range[0] @property def max(self): """ """ return self.range[1] @property def value(self): """ """ val = c_long() err = self._get_value_funcs[self._prop_type](self._handle, c_int(self._prop_index), byref(val)) if err != 1: raise IC_Exception(err) else: return val.value @value.setter def value(self, val): """ """ # turn off auto first self.auto = False # set value err = self._set_value_funcs[self._prop_type](self._handle, c_int(self._prop_index), c_long(val)) if err != 1: raise IC_Exception(err) @property def auto(self): """ """ aut = c_int() err = self._get_auto_funcs[self._prop_type](self._handle, c_int(self._prop_index), byref(aut)) if err != 1: raise IC_Exception(err) else: return bool(aut.value) @auto.setter def auto(self, aut): """ """ err = self._set_auto_funcs[self._prop_type](self._handle, c_int(self._prop_index), c_long(int(aut))) if err != 1: raise IC_Exception(err) @property def type(self): """ """ return self._prop_type @staticmethod def get_video_property_names(): """ """ return ['brightness', 'contrast', 'hue', 'saturation', 'sharpness', 'gamma', 'colorenable', 'whitebalance', 'blacklightcompensation', 'gain'] @staticmethod def get_camera_property_names(): """ """ return ['pan', 'tilt', 'roll', 'zoom', 'exposure', 'iris', 'focus'] @staticmethod def get_all_property_names(): """ """ return IC_Property.get_video_property_names() + IC_Property.get_camera_property_names() def __init__(self, handle, name): self._handle = handle self._prop_name = name self._avail_funcs = { 'video' : IC_GrabberDLL.is_video_property_available, 'camera' : IC_GrabberDLL.is_camera_property_available} self._auto_avail_funcs = { 'video' : IC_GrabberDLL.is_video_property_auto_available, 'camera' : IC_GrabberDLL.is_camera_property_auto_available} self._range_funcs = { 'video' : IC_GrabberDLL.video_property_get_range, 'camera' : IC_GrabberDLL.camera_property_get_range} self._get_value_funcs = { 'video' : IC_GrabberDLL.get_video_property, 'camera' : IC_GrabberDLL.get_camera_property} self._set_value_funcs = { 'video' : IC_GrabberDLL.set_video_property, 'camera' : IC_GrabberDLL.set_camera_property} self._get_auto_funcs = { 'video' : IC_GrabberDLL.get_auto_video_property, 'camera' : IC_GrabberDLL.get_auto_camera_property} self._set_auto_funcs = { 'video' : IC_GrabberDLL.enable_auto_video_property, 'camera' : IC_GrabberDLL.enable_auto_camera_property} vid_props = IC_Property.get_video_property_names() cam_props = IC_Property.get_camera_property_names() if name in vid_props: self._prop_type = 'video' self._prop_index = vid_props.index(name) elif name in cam_props: self._prop_type = 'camera' self._prop_index = cam_props.index(name) else: raise IC_Exception(todo)
{ "repo_name": "amryfitra/icpy3", "path": "icpy3/ic_property.py", "copies": "1", "size": "5730", "license": "mit", "hash": 3459619793558446000, "line_mean": 29, "line_max": 99, "alpha_frac": 0.4385689354, "autogenerated": false, "ratio": 4.137184115523466, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5075753050923466, "avg_score": null, "num_lines": null }
__author__ = 'amw' import numpy as np from sklearn.gaussian_process import GaussianProcess class GaussianProcessInterpolator: def __init__(self, observations): self.observations = observations self.gaussian_process = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) self._compute_model() def _compute_model(self): observation_points = [] observation_results = [] for entry in self.observations: observation_points.append(entry[0]) observation_results.append(entry[1]) observation_points_array = np.atleast_2d(observation_points) observation_results_array = np.array(observation_results).T self.gaussian_process.fit(observation_points_array, observation_results_array) def compute_prediction(self, observation_points): observation_points_array = np.atleast_2d(observation_points) predicted_observation_results, MSE = self.gaussian_process.predict(observation_points_array, eval_MSE=True) return predicted_observation_results, MSE
{ "repo_name": "ClockworkOrigins/m2etis", "path": "configurator/configurator/interpolation/GaussianProcessInterpolator.py", "copies": "1", "size": "1116", "license": "apache-2.0", "hash": -540534466864054400, "line_mean": 28.3684210526, "line_max": 118, "alpha_frac": 0.6944444444, "autogenerated": false, "ratio": 3.9857142857142858, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5180158730114286, "avg_score": null, "num_lines": null }
__author__ = 'Amy' import os,zipfile from .utils import makeDir from library.core.utils import compress_dir,unzip no_zip_res = { 'mongodb-2.4.5':[ "data/", "logs/", ], "mysql-5.1":[ 'data/', ], "openssl-1.9.8":[ 'certs/', ], 'nginx-1.5.12':[ 'conf/certs/ptphp.com.key' 'conf/certs/ptphp.net.key' ] } def get_zip_version(name,dir): path = os.path.join(dir,name) print "get version :" + path versions = [] for file in os.listdir(dir): if name in file and ".zip" in file: versions.append(int(file.replace(name+"-","").replace(".zip",""))) if versions: v = sorted(versions,reverse = True)[0] else: v = 0 return v+1 def zip_local_all(dir): dir_local = os.path.join(dir,"local") for plugin_name in os.listdir(dir_local): zip_local(dir,plugin_name) def zip_local(dir,name): no_zip_res_plugin = [] if name in no_zip_res.keys(): print name no_zip_res_plugin = no_zip_res[name] dir_local = os.path.join(dir,"local") dir_local_zip = os.path.join(dir,"local_zip") version = get_zip_version(name,dir_local_zip) plugin_name_zip_file_name = name+"-"+str(version)+".zip" print plugin_name_zip_file_name plugin_name_zip_file_name_path = os.path.join(dir_local_zip,plugin_name_zip_file_name) path_root = os.path.join(dir_local,name) if False == os.path.isdir(path_root): print "no exists : " + path_root return os.chdir(path_root) compress_dir("./",plugin_name_zip_file_name_path,no_zip_res_plugin) def unzip_local(dir,zip_name): (dirname,filename) = os.path.split(zip_name) print (dirname,filename) dir = os.path.join(dir,filename.replace(".zip","")) print dir if os.path.exists(dir): print "exists : "+ dir return if os.path.exists(zip_name) == True: unzip(zip_name,dir) else: print "no exsits : "+zip_name
{ "repo_name": "ptphp/PtServer", "path": "library/core/dist_zip.py", "copies": "1", "size": "2005", "license": "bsd-3-clause", "hash": 8255496568933224000, "line_mean": 24.3797468354, "line_max": 90, "alpha_frac": 0.5845386534, "autogenerated": false, "ratio": 3.024132730015083, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4108671383415083, "avg_score": null, "num_lines": null }
__author__ = 'Anand Madhavan' # TODO(Anand) Remove this from pants proper when a code adjoinment mechanism exists # or ok if/when thriftstore is open sourced as well import os import re import subprocess from collections import defaultdict from twitter.common import log from twitter.common.collections import OrderedSet from twitter.common.dirutil import safe_mkdir from twitter.pants import is_jvm from twitter.pants.targets import JavaLibrary, JavaThriftstoreDMLLibrary, JavaThriftLibrary from twitter.pants.tasks import TaskError from twitter.pants.tasks.binary_utils import select_binary from twitter.pants.tasks.code_gen import CodeGen class ThriftstoreDMLGen(CodeGen): @classmethod def setup_parser(cls, option_group, args, mkflag): option_group.add_option(mkflag("outdir"), dest="thriftstore_gen_create_outdir", help="Emit thriftstore generated code in to this directory.") def __init__(self, context): CodeGen.__init__(self, context) self.thriftstore_admin = context.config.get('thriftstore-dml-gen', 'thriftstore-admin') self.output_dir = (context.options.thriftstore_gen_create_outdir or context.config.get('thriftstore-dml-gen', 'workdir')) self.verbose = context.config.getbool('thriftstore-dml-gen', 'verbose') def create_javadeps(): gen_info = context.config.getlist('thriftstore-dml-gen', 'javadeps') deps = OrderedSet() for dep in gen_info: deps.update(context.resolve(dep)) return deps def is_thriftstore_dml_instance(target): return isinstance(target, JavaThriftstoreDMLLibrary) # Resolved java library targets go in javadeps self.javadeps = create_javadeps() self.gen_thriftstore_java_dir = os.path.join(self.output_dir, 'gen-thriftstore-java') def insert_java_dml_targets(): self.gen_dml_jls = {} # Create a synthetic java library for each dml target for dml_lib_target in context.targets(is_thriftstore_dml_instance): # Add one JavaThriftLibrary target thrift_dml_lib = self.context.add_new_target(dml_lib_target.target_base, # Dir where sources are relative to JavaThriftLibrary, name=dml_lib_target.id, sources=dml_lib_target.sources) # Add one generated JavaLibrary target (whose sources we will fill in later on) java_dml_lib = self.context.add_new_target(self.gen_thriftstore_java_dir, JavaLibrary, name=dml_lib_target.id, sources=[], dependencies=self.javadeps) java_dml_lib.id = dml_lib_target.id + '.thriftstore_dml_gen' java_dml_lib.add_label('codegen') java_dml_lib.update_dependencies([thrift_dml_lib]) self.gen_dml_jls[dml_lib_target] = java_dml_lib for dependee, dmls in context.dependants(is_thriftstore_dml_instance).items(): jls = map(lambda dml: self.gen_dml_jls[dml], dmls) dependee.update_dependencies(jls) insert_java_dml_targets() def invalidate_for(self): return set('java') def is_gentarget(self, target): return isinstance(target, JavaThriftstoreDMLLibrary) def is_forced(self, lang): return True def genlangs(self): return dict(java=is_jvm) def genlang(self, lang, targets): bases, sources = self._calculate_sources(targets) safe_mkdir(self.gen_thriftstore_java_dir) args = [ self.thriftstore_admin, 'dml', '-o', self.gen_thriftstore_java_dir ] if self.verbose: args.append('-verbose') args.extend(sources) self.context.log.debug('Executing: %s' % ' '.join(args)) result = subprocess.call(args) if result!=0: raise TaskError() def _calculate_sources(self, thrift_targets): bases = set() sources = set() def collect_sources(target): if self.is_gentarget(target): bases.add(target.target_base) sources.update(os.path.join(target.target_base, source) for source in target.sources) for target in thrift_targets: target.walk(collect_sources) return bases, sources def createtarget(self, lang, gentarget, dependees): if lang == 'java': return self._create_java_target(gentarget) else: raise TaskError('Unrecognized thrift gen lang: %s' % lang) def _calculate_genfiles(self, source): args = [ self.thriftstore_admin, 'parse', source ] self.context.log.debug('Executing: %s' % ' '.join(args)) p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE) output, error = p.communicate() if p.wait() != 0: raise TaskError thriftstore_classes = filter(lambda s: s.strip() != '', output.split('\n')) return thriftstore_classes def _create_java_target(self, target): genfiles = [] for source in target.sources: genfiles.extend(self._calculate_genfiles(os.path.join(target.target_base, source))) self.gen_dml_jls[target].sources = genfiles return self.gen_dml_jls[target]
{ "repo_name": "foursquare/commons-old", "path": "src/python/twitter/pants/tasks/thriftstore_dml_gen.py", "copies": "1", "size": "5285", "license": "apache-2.0", "hash": -6438109501552313000, "line_mean": 35.198630137, "line_max": 116, "alpha_frac": 0.6467360454, "autogenerated": false, "ratio": 3.6983904828551433, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9805213948882391, "avg_score": 0.007982515874550412, "num_lines": 146 }
__author__ = 'Anand' ###################################################### # Muon - simulates n product electron from muon # decays and counts the # of spark events with a # given # of sparks. Each instance of the Muon # class contains the following member variables: # # muon_energy - the initial energy of the incident muon. # Note: the expected range in muon_energy energy is # [60 MeV, 140 MeV]. # # sparks - a list containing [# of sparks, # of spark events] # objects. Ex: sparks = [[0, 32], [1, 23], [2, 34]]. # Note: the # of sparks ranges from 0 to 7 inclusive. # # residuals - (simulated spark events - experimental # spark events)^2 summed over # of sparks. # Note: since the experimental spark events are available # for the # of sparks in the range 3 to 7 inclusive, # we sum over those indexes only. # # In addition, the Muon class contains the following fixed # class variables: # # t_p - aluminum plate thickness (0.9525 cm). # t_g - spark gap thickness (0.635 cm). # R - aluminum plate radius (7.62 cm). # X0 - radiation length for aluminum (8.9 cm). # E_crit - the energy when the stopping power due to # ionization is equal to the stopping power due to # bremsstrahlung (45 MeV). # # The main method in this class is the spark method, # which generates tuples of sparks along with # of spark events. # The values of z0, r, theta, phi, and E_e are uniformly # sampled from the random library. ###################################################### from math import pi, log, cos, sin, sqrt, floor from random import uniform class Muon(object): # Class Variables # t_p = 0.9525 t_g = 0.635 R = 7.62 X0 = 8.9 E_crit = 45 # Constructor # def __init__(self, muon_energy, n): self.muon_energy = muon_energy self.sparks = Muon.spark(muon_energy, n) self.residuals = Muon.residual(self.sparks) # Methods # @staticmethod def l(E_e): return Muon.X0 * log(1 + E_e / Muon.E_crit) @staticmethod def l_esc(r, phi, theta): return (-r * cos(phi) + sqrt((r ** 2) * (cos(phi) ** 2) + (Muon.R ** 2 - r ** 2))) / sin(theta) * ( Muon.t_p / (Muon.t_p + Muon.t_g)) @staticmethod def n_s(theta, z0, l): return 1 + floor((l * cos(theta) - z0) / Muon.t_p) @staticmethod def n_sim(r, muon_energy, E_e): return r * ((muon_energy * E_e) ** 2) * (3 - 4 * E_e / muon_energy) @staticmethod # Places each n_sim value in each n_s bin, which ranges from 0 to 7 inclusive. def bin(n_s, n_sim, sparks): if n_s == 0: sparks[0][1] += n_sim if n_s == 1: sparks[1][1] += n_sim if n_s == 2: sparks[2][1] += n_sim if n_s == 3: sparks[3][1] += n_sim if n_s == 4: sparks[4][1] += n_sim if n_s == 5: sparks[5][1] += n_sim if n_s == 6: sparks[6][1] += n_sim if n_s == 7: sparks[7][1] += n_sim @staticmethod # Principle method that generates list of sparks and spark events. def spark(muon_energy, n): sparks = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0]] # Generating sparks list. for i in range(0, n): # Randomly assign a value of z0 = [0, t_p]. z0 = uniform(0, Muon.t_p) # Randomly assign a value of r = [0, R/2]. r = uniform(0, Muon.R / 2) # Randomly assign a value of theta = [0, pi/2]. # Note: incidentally, none of the sampled values are exactly 0 or pi/2. theta = uniform(0, pi / 2) # Randomly assign a value of phi = [0, 2*pi]. phi = uniform(0, 2 * pi) # Muon initial energy E_e = [0, muon_energy/2]. E_e = uniform(0, muon_energy / 2) # Comparing l and l_esc. l = Muon.l(E_e) l_esc = Muon.l_esc(r, phi, theta) if l > l_esc: l = l_esc # Appending number of sparks and spark events to list. n_s = Muon.n_s(theta, z0, l) n_sim = Muon.n_sim(r, muon_energy, E_e) Muon.bin(n_s, n_sim, sparks) # Normalize sparks list. count = 0 for i in range(0, 8): count += sparks[i][1] for i in range(0, 8): sparks[i][1] *= 43 / count return sparks @staticmethod # Computes the sum of the square residuals using spark values from 3 to 7 inclusive. def residual(sparks): data = [3.949275, 7.137681, 13.514493, 15.615942, 4.021739] errors = [2.119565, 2.807971, 3.804348, 3.985507, 2.119565] count = 0 for i in range(5): count += ((data[i] - sparks[i + 3][1]) ** 2) / (errors[i] ** 2) return count ###################################################### # MuonDriver - this section instantiates Muon objects, # each of which contains three attributes: muon_energy, # sparks, and residuals. ###################################################### Muon_Energy = [] for energy in range(90, 111): Muon_Energy.append([energy, Muon(energy, 100000).residuals]) print("Muon Energy: ", energy, "MeV") f = open("excel.txt", "w") f.write(str(Muon_Energy)) f.close()
{ "repo_name": "adyavanapalli/Muon-Mass", "path": "rand_deg/Muon_Rand.py", "copies": "1", "size": "5283", "license": "mit", "hash": -8764700675439529000, "line_mean": 30.6407185629, "line_max": 107, "alpha_frac": 0.5371947757, "autogenerated": false, "ratio": 3.1149764150943398, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.415217119079434, "avg_score": null, "num_lines": null }
__author__ = 'Anand' # PYTHON VERSION 3.4.3 # ###################################################### # Muon - simulates n product electron from muon # decays and counts the # of spark events with a # given # of sparks. Each instance of the Muon # class contains the following member variables: # # muon_energy - the initial energy of the incident muon. # Note: the expected range in muon_energy energy is # [60 MeV, 140 MeV]. # # sparks - a list containing [# of sparks, # of spark events] # objects. Ex: sparks = [[0, 32], [1, 23], [2, 34]]. # Note: the # of sparks ranges from 0 to 7 inclusive. # # residuals - (simulated spark events - experimental # spark events)^2 summed over # of sparks. # Note: since the experimental spark events are available # for the # of sparks in the range 3 to 7 inclusive, # we sum over those indexes only. # # In addition, the Muon class contains the following fixed # class variables: # # t_p - aluminum plate thickness (0.9525 cm). # t_g - spark gap thickness (0.635 cm). # R - aluminum plate radius (7.62 cm). # theta_max - maximum product electron polar angle (pi/6). # X0 - radiation length for aluminum (8.9 cm). # E_crit - the energy when the stopping power due to # ionization is equal to the stopping power due to # bremsstrahlung (45 MeV). # # The main method in this class is the spark method, # which generates tuples of sparks along with # of spark events. # The values of z0, r, theta, phi, and E_e are uniformly # sampled from the random library. ###################################################### from math import pi, log, cos, sin, sqrt, floor from random import uniform class Muon(object): # Class Variables # t_p = 0.9525 t_g = 0.635 R = 7.62 theta_max = pi / 6 X0 = 8.9 E_crit = 45 # Constructor # def __init__(self, muon_energy, n): self.muon_energy = muon_energy self.sparks = Muon.spark(muon_energy, n) self.residuals = Muon.residual(self.sparks) # Methods # @staticmethod def l(E_e): return Muon.X0 * log(1 + E_e / Muon.E_crit) @staticmethod def l_esc(r, phi, theta): return (-r * cos(phi) + sqrt((r ** 2) * (cos(phi) ** 2) + (Muon.R ** 2 - r ** 2))) / sin(theta) * ( Muon.t_p / (Muon.t_p + Muon.t_g)) @staticmethod def n_s(theta, z0, l): return 1 + floor((l * cos(theta) - z0) / Muon.t_p) @staticmethod def n_sim(r, muon_energy, E_e): return r * ((muon_energy * E_e) ** 2) * (3 - 4 * E_e / muon_energy) @staticmethod # Places each n_sim value in each n_s bin, which ranges from 0 to 7 inclusive. def bin(n_s, n_sim, sparks): if n_s == 0: sparks[0][1] += n_sim elif n_s == 1: sparks[1][1] += n_sim elif n_s == 2: sparks[2][1] += n_sim elif n_s == 3: sparks[3][1] += n_sim elif n_s == 4: sparks[4][1] += n_sim elif n_s == 5: sparks[5][1] += n_sim elif n_s == 6: sparks[6][1] += n_sim elif n_s == 7: sparks[7][1] += n_sim @staticmethod # Principle method that generates list of sparks and spark events. def spark(muon_energy, n): sparks = [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0]] # Generating sparks list. for i in range(0, n): # Randomly assign a value of z0 = [0, t_p]. z0 = uniform(0, Muon.t_p) # Randomly assign a value of r = [0, R/2]. r = uniform(0, Muon.R / 2) # Randomly assign a value of theta = [0,theta_max]. theta = uniform(0, Muon.theta_max) # Randomly assign a value of phi = [0, 2*pi]. phi = uniform(0, 2 * pi) # Muon initial energy E_e = [0, muon_energy/2]. E_e = uniform(0, muon_energy / 2) # Comparing l and l_esc. l = Muon.l(E_e) l_esc = Muon.l_esc(r, phi, theta) if l > l_esc: l = l_esc # Appending number of sparks and spark events to list. n_s = Muon.n_s(theta, z0, l) n_sim = Muon.n_sim(r, muon_energy, E_e) Muon.bin(n_s, n_sim, sparks) # Normalize sparks list. count = 0 for i in range(0, 8): count += sparks[i][1] for i in range(0, 8): sparks[i][1] *= 43 / count return sparks @staticmethod # Computes the sum of the square residuals using spark values from 3 to 7 inclusive. def residual(sparks): data = [3.949275, 7.137681, 13.514493, 15.615942, 4.021739] errors = [2.119565, 2.807971, 3.804348, 3.985507, 2.119565] count = 0 for i in range(5): count += ((data[i] - sparks[i + 3][1]) ** 2) / (errors[i] ** 2) / 5 return count ###################################################### # MuonDriver - this section instantiates Muon objects, # each of which contains three attributes: muon_energy, # sparks, and residuals. ###################################################### Muon_Energy = [] for energy in range(1040, 1081): Muon_Energy.append([energy/10, Muon(energy/10, 100000).residuals]) print("Muon Energy: ", energy/10, "MeV") f = open("excel5.txt", "w") f.write(str(Muon_Energy)) f.close()
{ "repo_name": "adyavanapalli/Muon-Mass", "path": "final_data/Muon.py", "copies": "1", "size": "5350", "license": "mit", "hash": -7055505224381448000, "line_mean": 30.4764705882, "line_max": 107, "alpha_frac": 0.5407476636, "autogenerated": false, "ratio": 3.101449275362319, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.912893558511604, "avg_score": 0.002652270769255764, "num_lines": 170 }
__author__ = 'Anand' """ This class will act like the game administrator. It should be connected by the person handling the tournament. This 'player' will manage the current tournament, game, number of players, etc... """ __author__ = 'Paul Council' import time from ClientPackage.GameMasterClient import * from AvailablePlayers.GMPlayer import * from AvailablePlayers.TestPlayer1 import * from AvailablePlayers.BEPCPlayer import * from AvailablePlayers.TestPlayer2 import * HOST = "150.250.190.192" my_player = GMPlayer() game_controller = GameMasterClient(my_player) # player1 = TestPlayer1() # player2 = TestPlayer1() # client1 = PlayerClient(player1) # client2 = PlayerClient(player2) # attempt to connect game controller to the server room game_controller.client_connect(host=HOST) # client1.client_connect(host=HOST) # client2.client_connect(host=HOST) # # verify connection game_controller.verify_connection() # client1.verify_connection() # client2.verify_connection() # TODO set the tournament style # game_controller.set_tournament() # TODO set the game to play # game_controller.set_game() # open the room for registration game_controller.open_tournament_registration() # time.sleep(10) # allow 1 minute for registration before closing # client1.register_player() # client2.register_player() # client1.verify_registration() # client2.verify_registration() # close the room for registration # game_controller.close_tournament_registration() game_controller.set_game_status(True) game_controller.set_max_rounds(3) game_controller.get_game_status() time.sleep(10) # get list of ready players game_controller.list_registered_players() # create player pairs based on ID # game_controller.create_match_pairs() #game_controller.create_all_available_matches() game_controller.close_tournament_registration() time.sleep(2) # client1.submit_move() # #time.sleep(2) # client2.submit_move() # # time.sleep(2) # # #this was being called above and it wasn't working # #game_controller.create_match_pairs() # # # generate all matches we can using the player_id pairs from above # # game_controller.create_all_available_matches() # time.sleep(2) # #time.sleep(15) # # # run all ready matches # game_controller.run_available_matches() # # # client1.get_round_results() # client2.get_round_results() #for x in range(0,6): #client1.submit_move() #client2.submit_move() #time.sleep(2) #game_controller.run_available_matches() #time.sleep(2) #client1.get_round_results() #client2.get_round_results() #time.sleep(2) # close the connection game_controller.close_connection() # client1.close_connection() # client2.close_connection()
{ "repo_name": "PaulieC/sprint3-Council", "path": "ClientPackage/EasyGameController.py", "copies": "1", "size": "2661", "license": "apache-2.0", "hash": -903276169993125400, "line_mean": 25.8787878788, "line_max": 98, "alpha_frac": 0.7534761368, "autogenerated": false, "ratio": 3.249084249084249, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4502560385884249, "avg_score": null, "num_lines": null }
__author__ = 'Anand Patil, anand.prabhakar.patil@gmail.com' from pymc import * def find_generations(stochastics): """ A generation is the set of stochastic variables that only has parents in previous generations. """ generations = [] # Find root generation generations.append(set()) all_children = set() for s in stochastics: all_children.update(s.extended_children & stochastics) generations[0] = stochastics - all_children # Find subsequent _generations children_remaining = True gen_num = 0 while children_remaining: gen_num += 1 # Find children of last generation generations.append(set()) for s in generations[gen_num-1]: generations[gen_num].update(s.extended_children & stochastics) # Take away stochastics that have parents in the current generation. thisgen_children = set() for s in generations[gen_num]: thisgen_children.update(s.extended_children & stochastics) generations[gen_num] -= thisgen_children # Stop when no subsequent _generations remain if len(thisgen_children) == 0: children_remaining = False return generations def ravel_submodel(stochastic_list): """ Takes a list of stochastics and returns: - Indices corresponding to each, - Length of each, - Slices corresponding to each, - Total length, """ N_stochastics = len(stochastic_list) stochastic_indices = [] stochastic_len = np.zeros(N_stochastics, dtype=int) slices = np.zeros(N_stochastics, dtype=object) _len = 0 for i in xrange(len(stochastic_list)): stochastic = stochastic_list[i] # Inspect shapes of all stochastics and create stochastic slices. if isinstance(stochastic.value, np.ndarray): stochastic_len[i] = len(np.ravel(stochastic.value)) else: stochastic_len[i] = 1 slices[i] = slice(_len, _len + stochastic_len[i]) _len += stochastic_len[i] # Record indices that correspond to each stochastic. for j in xrange(len(np.ravel(stochastic.value))): stochastic_indices.append((stochastic, j)) return stochastic_indices, stochastic_len, slices, _len def set_ravelled_stochastic_values(vec, stochastics, slices): for stochastic in stochastics: stochastic.value = vec[slices[stochastic]].reshape(np.shape(stochastic.value)) def find_children_and_parents(stochastic_list): children = [] parents = [] for s in stochastic_list: if len(s.extended_children) > 0: if all([not child in stochastic_list for child in s.extended_children]): children.append(s) if all([not parent in stochastic_list for parent in s.extended_parents]): parents.append(s) return set(children), set(parents) def order_stochastic_list(stochastics): generations = find_generations(stochastics) out = [] for generation in generations[::-1]: out += list(generation) return out
{ "repo_name": "matthew-brett/pymc", "path": "pymc/sandbox/graphical_utils.py", "copies": "1", "size": "3112", "license": "mit", "hash": -1992872839540568600, "line_mean": 29.213592233, "line_max": 86, "alpha_frac": 0.6452442159, "autogenerated": false, "ratio": 3.7314148681055155, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.986672175157044, "avg_score": 0.0019874664870150912, "num_lines": 103 }
__author__ = 'Anand Patil, anand.prabhakar.patil@gmail.com' from pymc.NormalApproximation import * import pymc as pm import numpy as np class EM(MAP): """ N = EM(input, sampler, db='ram', eps=.001, diff_order = 5) Normal approximation to the posterior of a model via the EM algorithm. Useful methods: draw: Draws values for all stochastic variables using normal approximation revert_to_max: Sets all stochastic variables to mean value under normal approximation fit: Finds the normal approximation. Useful attributes (after fit() is called): mu[p1, p2, ...]: Returns the posterior mean vector of stochastic variables p1, p2, ... C[p1, p2, ...]: Returns the posterior covariance of stochastic variables p1, p2, ... logp: Returns the log-probability of the model logp_at_max: Returns the maximum log-probability of the model len: The number of free stochastic variables in the model ('k' in AIC and BIC) data_len: The number of datapoints used ('n' in BIC) AIC: Akaike's Information Criterion for the model BIC: Bayesian Information Criterion for the model :Arguments: input: As for Model sampler: Should be a Sampler instance handling a submodel of input. The variables in sampler will be integrated out; only the marginal probability of the other variables in input will be maximized. The 'expectation' step will be computed using samples obtained from the sampler. db: A database backend. eps: 'h' for computing numerical derivatives. May be a dictionary keyed by stochastic variable as well as a scalar. diff_order: The order of the approximation used to compute derivatives. :SeeAlso: Model, NormApprox, Sampler, scipy.optimize """ def __init__(self, input, sampler, db='ram', eps=.001, diff_order = 5, verbose=0, tune_interval=10): Q = pm.Container(input) new_input = (Q.nodes | sampler.nodes) - sampler.stochastics MAP.__init__(self, input=new_input, eps=eps, diff_order=diff_order) self.tune_interval = tune_interval self.verbose = verbose self.sampler = sampler # Figure out which stochastics' log-probabilities need to be averaged. self.stochastics_to_integrate = set() for s in self.stochastics: mb = s.markov_blanket if any([other_s in mb for other_s in sampler.stochastics]): self.stochastics_to_integrate.add(s) def fit(self, iterlim=1000, tol=.0001, na_method = 'fmin', na_iterlim=1000, na_tol=.0001, sa_iter = 10000, sa_burn=1000, sa_thin=10): """ N.fit(iterlim=1000, tol=.0001, na_method='fmin', na_iterlim=1000, na_tol=.0001, sa_iter = 10000, sa_burn=1000, sa_thin=10) Arguments 'iterlim' and 'tol' control the top-level EM iteration. Arguments beginning with 'na' are passed to NormApprox.fit() during the M steps. Arguments beginning with 'sa' are passed to self.sampler during the E-steps. The 'E' step consists of running the sampler, which will keep a trace. In the 'M' step, the log-probability of variables in the sampler's Markov blanket are averaged and combined with the log-probabilities of self's other variables to produce a joint log-probability. This quantity is maximized. """ logps = [] for i in xrange(iterlim): # E step self.sampler.sample(sa_iter, sa_burn, sa_thin, verbose = self.verbose, tune_interval = self.tune_interval) # M step MAP.fit(self, method = na_method, iterlim=na_iterlim, tol=na_tol, post_fit_computations=False, verbose=self.verbose) logps.append(self.logp) if abs(logps[i-1] - logps[i])<= tol: print 'EM algorithm converged' break if i == iterlim-1: print 'EM algorithm: Maximum number of iterations exceeded.' self.post_fit_computations() def i_logp(self, index): """ Evaluates the log-probability of the Markov blanket of a stochastic owning a particular index. Used for computing derivatives. Averages over the sampler's trace for variables in the sampler's Markov blanket. """ all_relevant_stochastics = set() p,i = self.stochastic_indices[index] logps = [] # If needed, run an MCMC loop and use those samples. if p in self.stochastics_to_integrate: for i in xrange(sampler.db.length): sampler.remember(i-1) try: logps.append(p.logp + np.sum([child.logp for child in self.extended_children[p]])) except ZeroProbability: return -Inf return mean(logps) # Otherwise, just return the log-probability of the Markov blanket. else: try: return p.logp + np.sum([child.logp for child in self.extended_children[p]]) except pm.ZeroProbability: return -np.Inf def func(self, p): """ The function that gets passed to the optimizers. """ self._set_stochastics(p) logps = [] for i in xrange(sampler.db.length): sampler.remember(i-1) try: logps.append(self.logp) except pm.ZeroProbability: return np.Inf return -np.mean(logps) class SEM(EM, NormApprox): """ Normal approximation via SEM algorithm """ pass
{ "repo_name": "matthew-brett/pymc", "path": "pymc/sandbox/EM.py", "copies": "1", "size": "5716", "license": "mit", "hash": -2909140508251264000, "line_mean": 37.8843537415, "line_max": 128, "alpha_frac": 0.616340098, "autogenerated": false, "ratio": 3.9017064846416383, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9999858434747971, "avg_score": 0.0036376295787332196, "num_lines": 147 }
__author__ = 'Anand Patil, anand.prabhakar.patil@gmail.com' """ Dirichlet process classes: - DPRealization: A Dirichlet process realization. Based on stick-breaking representation, but step methods should use other representations. Attributes: - atoms: A list containing the atom locations. Methods: - rand(m): Returns m random values. - logp(x): A function returning the log-probability of x. DP: A stochastic valued as a DP realization. DPDraw: A stochastic distributed as a DP object's value. Neal cite: Markov chain random methods for Dirichlet process mixture models. Also study up on Gibbs sampler. This should all be written in Pyrex eventually. Many things are screaming for optimization. The C++ vector class would be helpful too but that would have to be swigged. """ import numpy as np from copy import copy from pymc import * def draws_to_atoms(draws): """ atoms, n = draws_to_atoms(draws) atoms is a list of the unique elements in draws, and n is a list of their corresponding multiplicities. Needs optimization badly I'm sure. """ atoms = [] n = [] for element in np.atleast_1d(draws): match=False for i in xrange(len(atoms)): if all(element == atoms[i]): n[i] += 1 match=True break if not match: atoms.append(element) n.append(1) return atoms, n try: import pylab as pl def plot_atoms(DPr): """ plot_atoms(DPr) Plots the atoms of DP realization DPr. Base measure must be over the real line. """ for pair in zip(DPr.atoms, DPr.n): plot([pair[0], pair[0]], [0,pair[1]], 'k-') except ImportError: pass class DPRealization(object): """ A Dirichlet process realization. This is based on the stick-breaking representation rather than the Chinese restaurant process in order to provide a logp method. Step methods are free to use the Chinese restaurant process, though. Arguments: - basemeas: The base measure. Must be a function which, when called with argument n, returns a value. - nu: The whatever parameter. - draws (optional): DPRealization can be initialized conditional on previous draws. Useful for Gibbs sampling, maybe MH too. - basemeas_params: The parameters of the base measure. Methods: - rand(m): Returns m random values. - logp(x): Returns the log-probability of x. """ def __init__(self, basemeas_rand, nu, draws=[], **basemeas_params): # The base measure and its parameters. self.basemeas_rand = basemeas_rand self.basemeas_params = basemeas_params # The tightness parameter. self.nu = np.float(nu) if len(draws)>0: atoms, n = draws_to_atoms(draws) # The number of draws from each atom. self.n = n # The values of the atoms. self.atoms = atoms # Need to triple-check that this is OK! # The probability masses of the atoms. mass_sofar = rbeta(sum(n), nu) if len(n) > 1: self.mass = list((rdirichlet(n) * mass_sofar).squeeze()) else: self.mass = [mass_sofar] self.mass_sofar = mass_sofar self.mass_prod = 1. for m in self.mass: self.mass_prod *= (1.-m) else: self.n = [] self.atoms = [] self.mass = [] self.mass_sofar = 0. self.mass_prod = 1. def logp(self, value): """ F.logp(x) Returns the log of the probability mass assigned to x. Returns -Inf if x is not in self.atoms; this behavior is fine for continuous base distributions but incorrect for discrete. """ logp_out = 0. value = np.atleast_1d(value) for val_now in value: match=False for i in xrange(len(self.atoms)): if all(val_now == self.atoms[i]): logp_out += log(self.mass[i]) match=True break if not match: return -Inf return logp_out def rand(self, m=1): """ F.rand(m=1) Returns m values from the random probability distribution. """ draws = np.empty(m, dtype=float) for i in xrange(m): # Draw from existing atoms if np.random.random() < self.mass_sofar: atom_index = int(flib.rcat(np.asarray(self.mass) / self.mass_sofar,0,1,1)) new_draw = self.atoms[atom_index] self.n[atom_index] += 1 # Make new atom else: new_draw = self.basemeas_rand(**self.basemeas_params) self.atoms.append(new_draw) self.n.append(1) new_mass = self.mass_prod * rbeta(1, self.nu) self.mass.append(new_mass) self.mass_prod *= 1.-new_mass self.mass_sofar += new_mass draws[i] = new_draw if m==1: draws = draws[0] return draws class DP(Stochastic): """ value: A DP realization. Parents: 'alpha': concentration parameter, 'base': base probability distribution. Base parent must have random() and logp() methods (must be an actual distribution object). Should get intrinsic set of clusters. Step methods will update them with the children. A new value should be created conditional on the intrinsic clusters every time a parent is updated. """ def __init__(self, name, basemeas_rand, basemeas_logp, nu, doc=None, trace=True, value=None, cache_depth=2, plot=False, verbose=0, **basemeas_params): self.basemeas_logp = basemeas_logp self.basemeas_rand = basemeas_rand self.basemeas_params = basemeas_params parents = {} parents['basemeas_logp'] = basemeas_logp parents['basemeas_rand'] = basemeas_rand parents['basemeas_params'] = basemeas_params parents['nu'] = nu def dp_logp_fun(value, **parents): return 0. # raise ValueError, 'DPStochastic objects have no logp attribute' def dp_random_fun(basemeas_logp, basemeas_rand, nu, basemeas_params): return DPRealization(basemeas_rand, nu, **basemeas_params) # If value argument provided, read off intrinsic clusters. # If clusters argument provided, well store them. # If no clusters argument provided, propose from prior all over the place. Stochastic.__init__(self, logp=dp_logp_fun, random=dp_random_fun, doc=doc, name=name, parents=parents, trace=trace, value=value, dtype=np.object, rseed=True, observed=False, cache_depth=cache_depth, plot=plot, verbose=verbose) class DPDraw(Stochastic): """ value: An array of values. May want to hide these in the step method, but many step methods need them so it's probably better to keep them here: N: length of value. N_clusters: number of clusters. clusters: values of clusters, length-N list. cluster_multiplicities: multiplicities of clusters. Note may want to make these things their own Stochastics, in case people want to have Deterministics etc. depending on them or to trace them. Parent: 'dist': a DPStochastic. logp: product of base logp evaluated on each cluster (each cluster appears only once regardless of multiplicity) plus some function of alpha and the number of clusters. """ def __init__( self, name, DP, N=1, doc=None, trace=True, observed=False, cache_depth=2, plot=True, verbose = 0): self.N = N def DP_logp_fun(value, dist): return dist.logp(value) def DP_random_fun(dist): return dist.rand(N) Stochastic.__init__(self, logp = DP_logp_fun, doc=doc, name=name, parents={'dist': DP}, random = DP_random_fun, trace=trace, value=None, dtype=float, rseed=True, observed=observed, cache_depth=cache_depth, plot=plot, verbose = verbose) self.clusters = lam_dtrm('clusters',lambda draws=self: draws_to_atoms(draws)) from numpy.testing import * from pylab import * class test_DP(NumpyTestCase): def check_correspondence(self): x_d = linspace(-5.,5.,1000) dx = x_d[1] - x_d[0] nu = 10 p = nu * dx/sqrt(2.*pi)*exp(-x_d**2) DP_approx = rdirichlet(p).squeeze() DP_approx = hstack((DP_approx, 1.-sum(DP_approx))) true_DP = DPRealization(rnormal, nu, mu=0,tau=1) true_DP.rand(1000) clf() subplot(2,1,1) plot(x_d, DP_approx,'k.',markersize=8) subplot(2,1,2) plot_atoms(true_DP) def check_draws(self): D = DPRealization(rnormal,100,mu=-10,tau=.1) draws = D.rand(1000) clf() hist(draws) def check_stochastics(self): S = DP('S', rnormal,normal_like, 100, mu=10, tau=.1) q = DPDraw('q', S, N=1000) clf() hist(q.value) if __name__=='__main__': NumpyTest().run() """ Note: If you could get a distribution for the multiplicities of the currently- found clusters in a DP, could you give its children a logp attribute? Then you could do something like with the GP: give the DPStochastic an intrinsic set of clusters unrelated to its children, assess its logp using only its intrinsic clusters, etc. Yes, you can easily do this. Give the DP object its intrinsic clusters, and let the step methods treat those as the things that are really participating in the model even though from the user's perspective the entire DP is participating. """ # Old random method # val = [] # N = len(self.atoms) # # # Initialize. Optimization 1: keep running sum. # if N>0: # sum_n = np.sum(self.n) # else: # sum_n = 0 # # float_sumn = np.float(sum_n) # # for i in xrange(m): # # # Optimization 2: update cumulative sum on the fly. # self.tables = np.cumsum(self.n) # # # Maybe draw a new atom # if uniform() > float_sumn / (float_sumn+self.nu): # new_val = self.basemeas_rand(**self.basemeas_params) # self.atoms.append(new_val) # self.n.append(1) # N = N + 1 # # # Otherwise draw from one of the existing algorithms # else: # # Optimization 3: Draw uniforms ahead of time. # # DON'T use the same uniform for checking new atom # # creation AND for finding which old atom to draw from, # # you'll introduce painful bias. # # unif = uniform() * float_sumn # for i in xrange(N): # if unif < self.tables[i]: # new_val = self.atoms[i] # self.n[i] = self.n[i]+1 # break # # float_sumn = float_sumn + 1. # val.append(new_val) # # if m>1: # return array(val, dtype=float) # else: # return val[0]
{ "repo_name": "matthew-brett/pymc", "path": "pymc/sandbox/DP/DP.py", "copies": "1", "size": "11854", "license": "mit", "hash": 6247141589990228000, "line_mean": 27.912195122, "line_max": 123, "alpha_frac": 0.561329509, "autogenerated": false, "ratio": 3.7751592356687897, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9810019562990167, "avg_score": 0.005293836335724701, "num_lines": 410 }
from fast_givens import fg import pymc as pm import numpy as np from ortho_basis import OrthogonalBasis __all__ = ['fast_givens', 'GivensStepper'] def fast_givens(o,i,j,t): "Givens rotates the matrix o." if i==j: raise ValueError, 'i must be different from j.' oc = o.copy('F') fg(o,oc,i+1,j+1,t) return oc class GivensStepper(pm.Metropolis): """docstring for GivensStepper""" def __init__(self, o, kappa=1.): pm.Metropolis.__init__(self, o) self.o = o self.adaptive_scale_factor = 1./kappa def propose(self): t_p = pm.rvon_mises(0, 1./self.adaptive_scale_factor) i_p = np.random.randint(self.o.n-1) j_p = np.random.randint(i_p+1, self.o.n) self.o.value = fast_givens(self.o.value, i_p, j_p, t_p) def tune(self, *args, **kwargs): if self.adaptive_scale_factor>=1e6: return False else: return pm.Metropolis.tune(self, *args, **kwargs) @staticmethod def competence(o): if isinstance(o, OrthogonalBasis): if o.value.shape[0] > 1: return 3 else: return 0 else: return 0
{ "repo_name": "apatil/covariance-prior", "path": "cov_prior/givens_step.py", "copies": "1", "size": "1366", "license": "mit", "hash": 5670105406355458000, "line_mean": 26.34, "line_max": 63, "alpha_frac": 0.541727672, "autogenerated": false, "ratio": 3.1474654377880182, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9055777551915939, "avg_score": 0.026683111574415923, "num_lines": 50 }
import pymc as pm import numpy as np __all__ = ['OrthogonalBasis','check_orthogonality','covariance'] def check_orthogonality(value, tol=1e-10): """ Returns 0 if the matrix is orthogonal (up to tolerance), -inf if it is not. You can use this in potentials. """ if np.abs(np.dot(value,value.T) - np.eye(value.shape[0])).max() > tol: return -np.inf else: return 0 class OrthogonalBasis(pm.Stochastic): "An orthogonal basis, stored in the columns of a matrix." def __init__(self, name, n, constrain=True, *args, **kwargs): self.n = n if constrain: lpf = check_orthogonality else: lpf = lambda value: 0 pm.Stochastic.__init__(self, lpf, 'An orthonormal basis', name, {}, dtype=np.dtype('float'), value=np.array(np.eye(n),order='F'), *args, **kwargs) def ev_to_cov(o,v): return np.dot(o*v,o.T) def covariance(name, v, o=None, doc="A covariance matrix", constrain=True, *args, **kwds): """ Deterministic converting a vector of eigenvalues and an orthogonal basis to a covariance matrix. If with_potential, also returns a potential enforcing orthogonality. """ if o is None: o = OrthogonalBasis(name + '_eigenvectors', len(v.value), constrain=constrain) c = pm.Deterministic(eval=ev_to_cov, name=name, parents={'o': o, 'v': v}, doc=doc, *args, **kwds) return o,c
{ "repo_name": "apatil/covariance-prior", "path": "cov_prior/ortho_basis.py", "copies": "1", "size": "1564", "license": "mit", "hash": -5487920617320456000, "line_mean": 33.0217391304, "line_max": 154, "alpha_frac": 0.6061381074, "autogenerated": false, "ratio": 3.2857142857142856, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43918523931142855, "avg_score": null, "num_lines": null }
import matplotlib.pyplot as pl import numpy as np def symmetric(sorted_streams, stream_bounds): """Symmetric baseline""" lb, ub = np.min(stream_bounds[:,0,:],axis=0), np.max(stream_bounds[:,1,:],axis=0) return .5*(lb+ub) def pos_only(sorted_streams, stream_bounds): """Lumps will only be positive""" lb, ub = np.min(stream_bounds[:,0,:],axis=0), np.max(stream_bounds[:,1,:],axis=0) return lb def zero(sorted_streams, stream_bounds): """Zero baseline""" return np.zeros(stream_bounds.shape[2]) def min_weighted_wiggles(sorted_streams, stream_bounds): """Baseline recommended by Byron and Wattenberg""" lb, ub = np.min(stream_bounds[:,0,:],axis=0), np.max(stream_bounds[:,1,:],axis=0) weight = ub-lb sorted_streams = np.abs(sorted_streams) for i in xrange(len(sorted_streams)): sorted_streams[i,:] *= (-1)**i cusum_f = np.vstack((np.zeros(sorted_streams.shape[1]), np.cumsum(sorted_streams[:-1,:], axis=0))) f_prime = np.diff(sorted_streams, axis=1) cusum_f_prime = np.diff(cusum_f, axis=1) g_prime = np.hstack(([0],-np.sum((f_prime*.5 + cusum_f_prime)*sorted_streams[:,1:],axis=0) / weight[1:])) g_prime[np.where(weight==0)] = 0 g = np.cumsum(g_prime) return g def stacked_graph(streams, cmap=pl.cm.bone, color_seq='linear', baseline_fn=min_weighted_wiggles): """ Produces stacked graphs using matplotlib. Reference: 'Stacked graphs- geometry & aesthetics' by Byron and Wattenberg http://www.leebyron.com/else/streamgraph/download.php?file=stackedgraphs_byron_wattenberg.pdf Parameters: - streams: A list of time-series of positive values. Each element must be of the same length. - cmap: A matplotlib color map. Defaults to 'bone'. - colo_seq: 'linear' or 'random'. - baseline_fn: Current options are symmetric, pos_only, zero and min_weighted_wiggles. """ # Sort by onset times onset_times = [np.where(np.abs(stream)>0)[0][0] for stream in streams] order = np.argsort(onset_times) streams = np.asarray(streams) sorted_streams = streams[order] t = np.arange(streams.shape[1]) # Establish bounds stream_bounds = [np.vstack((np.zeros(streams.shape[1]), sorted_streams[0])), np.vstack((-sorted_streams[1], (np.zeros(streams.shape[1]))))] side = -1 for stream in sorted_streams[2:]: side *= -1 if side==1: stream_bounds.append(np.vstack((stream_bounds[-2][1], stream_bounds[-2][1]+stream))) else: stream_bounds.append(np.vstack((stream_bounds[-2][0]-stream, stream_bounds[-2][0]))) stream_bounds = np.array(stream_bounds) # Compute baseline baseline = baseline_fn(sorted_streams, stream_bounds) # Choose colors t_poly = np.hstack((t,t[::-1])) if color_seq=='linear': colors = np.linspace(0,1,streams.shape[1]) elif color_seq=='random': colors = np.random.random(size=streams.shape[1]) else: raise ValueError, 'Color sequence %s unrecognized'%color_seq # Plot pl.axis('off') for i in xrange(len(stream_bounds)): bound = stream_bounds[i] color = cmap(colors[i]) pl.fill(t_poly, np.hstack((bound[0]-baseline,(bound[1]-baseline)[::-1])), facecolor=color, linewidth=0.,edgecolor='none') # Demo if __name__ == '__main__': pl.clf() N_dsets = 50 T = 100 amp = 1 fade = .15 dsets = [] for i in xrange(N_dsets): this_dset = np.zeros(T) t_onset = np.random.randint(.9*T)-T/3 if t_onset >= 0: remaining_t = np.arange(T-t_onset) else: remaining_t = np.arange(T)-t_onset this_dset[max(t_onset,0):]=np.exp(-.15*np.random.gamma(10,.1)*remaining_t)\ * remaining_t * np.random.gamma(6,.2)# * np.cos(-fade*remaining_t*np.random.gamma(10,.1))**2 dsets.append(this_dset) stacked_graph(dsets, baseline_fn = min_weighted_wiggles, color_seq='random')
{ "repo_name": "ActiveState/code", "path": "recipes/Python/576633_Stacked_graphs_using_matplotlib/recipe-576633.py", "copies": "1", "size": "4165", "license": "mit", "hash": 3249772835440081000, "line_mean": 37.2110091743, "line_max": 129, "alpha_frac": 0.6040816327, "autogenerated": false, "ratio": 3.2187017001545595, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43227833328545595, "avg_score": null, "num_lines": null }
import json import io import os import re from hl7apy.parser import parse_message from hl7apy.exceptions import UnsupportedVersion #receives the name of the file and reads the messages in the file def readMessageFile(filename): #read the file message = open(filename, 'r').read() print("Step 1: File read successfully") return message #This method splits the 3 messages based on the blank line that is between messages def splitMessages(strmsg): messageslist = re.split('\\n\\n', strmsg) print("Step 2: Messages splitted successfully") return messageslist def hl7StrToDictionary(hl7string, use_long_name=True): """ Takes a string parameter and converts it to a Dictionary :param hl7string: HL7 string that is passed to the method :returns: A dictionary representation of the HL7 message """ hl7string = hl7string.replace("\n", "\r") try : m = parse_message(hl7string) except UnsupportedVersion: print(" Error! : The specified version in the file is unsurpoted.") print(" Kindly change the version number in the text file to 2.5") #We create a dictionary to ensure it is json serializable return hl7MessageToDictionary(m, use_long_name=use_long_name) def hl7MessageToDictionary(m, use_long_name=True): """Convert an HL7 message to a dictionary """ if m.children: d = {} for c in m.children: name = c.name.lower() if use_long_name: name = c.long_name.lower() if c.long_name else name dictified = hl7MessageToDictionary(c, use_long_name=use_long_name) if name in d: if not isinstance(d[name], list): d[name] = [d[name]] d[name].append(dictified) else: d[name] = dictified return d else: return m.to_er7() def writeJsonFile(dictionary): # Write JSON file try: to_unicode = unicode except NameError: to_unicode = str #we want to write all messages into one file so we append #to file first and then delete any previously writen file with io.open('ml7tojson.json', 'a', encoding='utf8') as outfile: str_ = json.dumps(dictionary, indent=4, sort_keys=False, separators=(',', ':'), ensure_ascii=False) outfile.write(to_unicode(str_)) #read messages from file strmsg = readMessageFile("HL7_Final.hl7") #split the messages based on the blank line between messages msgList = splitMessages(strmsg) #lets remove a previously writen json file if the file exists, delete it if os.path.isfile('ml7tojson.json'): os.remove('ml7tojson.json') print("Step 3: Previous json file deleted Successfully") #Loop through the message to handle each message at a time for message in msgList: # Convert it to a dictionary d = hl7StrToDictionary(message) #write JSON file writeJsonFile(d) print ("A jason file with the message has been created")
{ "repo_name": "AnaniSkywalker/HL7_Parser", "path": "Hl7_Parser.py", "copies": "1", "size": "3239", "license": "mit", "hash": -6582345172317627000, "line_mean": 34.2065217391, "line_max": 83, "alpha_frac": 0.6236492745, "autogenerated": false, "ratio": 4.0538172715894865, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5177466546089486, "avg_score": null, "num_lines": null }
import os, io def get_file_path(filename): return os.path.abspath(os.path.join(os.getcwd(), filename)) def read_file(file): return io.IOBase.readable(file) def main(): fileName = input("Please Enter the name of your file: ".upper()) file = get_file_path(fileName) if fileName == "alt1.csv".lower(): fileOne = get_file_path(fileName) file = open(fileOne, 'r') if len(read_file(file).columns) == 4: header = io.FileIO.read(file, delimiter=",") header['One'] = 'a' header['Two'] = 'b' header['Three'] = 'c' header['Four'] = 'd' elif fileName == "alt2.csv".lower(): filetwo = get_file_path(fileName) file = open(filetwo, 'r') if len(read_file(file).columns) ==4: header = io.FileIO.read(file, delimiter=",") header["TWO"][header["ONE"] == 1] = 3 header["TWO"][header["ONE"] == 2] = 4 header["TWO"][header["ONE"] == 3] = 6 header["ONE"][header["TWO"] == 3] = 1 header["ONE"][header["TWO"] == 4] = 2 header["ONE"][header["TWO"] == 6] = 3 elif fileName == "alt3.csv".lower(): fileThree = get_file_path(fileName) file = open(fileThree, 'r') if len(read_file(file).columns) == 7: header = io.FileIO.read(file, delimiter=":") header['One'] = 1 header['Two'] = 2 header['Three'] = 3 header['Four'] = 4 header['Five'] = 5 header['Six'] = 6 header['Seven'] = 7 if __name__== "__main__": main()
{ "repo_name": "FourthCohortAwesome/NightThree", "path": "NightThree.py", "copies": "1", "size": "1662", "license": "mit", "hash": 976967313248532600, "line_mean": 30.9615384615, "line_max": 68, "alpha_frac": 0.5024067389, "autogenerated": false, "ratio": 3.4625, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9429776571641185, "avg_score": 0.007026033451762894, "num_lines": 52 }
import os, sys, glob, pdb, scipy, scipy.misc import numpy as N import cv2 as cv2 import random import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib as mpl import pylab import pickle as pickle from dataset import * # For color_mask_img function from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.gridspec as gridspec from skimage import data, color, io, img_as_float #from mlabwrap import mlab ################## Image Sequence Data ################ class NavcamData(Dataset): # Containes the load, init and plot functions # for sequence of image dataset - uses sol number _VL_SIFT_ = 0 def __init__(self, input_folder=None, sol_number=None, init_sols=None, scaleInvariant=None): self.input_folder = None self.sol_number = None self.init_sols = None self.dataset_folder = os.path.join(input_folder, 'sol%.4d' % sol_number) self.datafiles = [] self.init_data_folder = [] self.data_files_count = 0 self.img_label_split = [0] self.data_split = [0] self.selections = [] self.init_sols = [] self.priority = 0 self.score = [] self.shadow_score = [] self.met_score = [] self.select_rect = [] self.rand_score = [] self.shadow_rand_score = [] self.met_rand_score = [] self.rec_features = {} self.orig_features = {} self.loc = {} self.zoom_window = {} # Variables from TCData self.feature_string = ('dsift') # Used for extracting sub images to extract features self.winsize = 100 self.nbins = 101 self.scaleInvariant = scaleInvariant if ~(input_folder is None): self.input_folder = input_folder if ~(sol_number is None): self.sol_number = sol_number if ~(init_sols is None): self.init_sols = init_sols if ~(scaleInvariant is None): self.scaleInvariant = scaleInvariant # Data folder for analysis print('Input Data') for i,data_file in enumerate(glob.glob('%s/*eff*.img.jpg'%(self.dataset_folder))): print(data_file) self.datafiles.append(data_file) if not scaleInvariant: pkl_file = data_file.split('.')[0] + '.pkl' else: pkl_file = data_file.split('.')[0] + '.si' if not i: # Initialized for the first run and extended thereafter Dataset.__init__(self, pkl_file, 'tc-sol%d-prior%s' % (self.sol_number, len(self.init_sols))) # pkl_file[pkl_file.rfind('/')+1:pkl_file.rfind('_')+1]) if not scaleInvariant: (self.data, self.labels, feature_string, self.width, self.height, \ self.winsize, self.nbins) = self.read_ppm(data_file, pkl_file) else: (self.data, self.labels, feature_string, self.width, self.height, \ self.winsize, self.nbins) = self.si_read_ppm(data_file, pkl_file) self.npixels = self.width * self.height self.xlabel = 'Grayscale intensity' self.ylabel = 'Probability' self.xvals = scipy.arange(self.data.shape[0]).reshape(-1,1) self.img_label_split.extend([len(self.labels)]) self.data_split.extend([self.data.shape[1]]) self.selections.append(N.zeros((self.height, self.width))) self.select_rect.append({}) self.width = N.array([self.width]) self.height = N.array([self.height]) self.xvals = N.array([self.xvals]) continue if not scaleInvariant: extracted_features = self.read_ppm(data_file, pkl_file) else: extracted_features = self.si_read_ppm(data_file, pkl_file) self.extend(extracted_features) self.data_files_count = self.data_files_count + 1 self.selections.append(N.zeros((self.height[i], self.width[i]))) self.select_rect.append({}) # Data folder for initialization print('Init Data Folders') for init_sol in init_sols: init_dataset_folder = os.path.join(input_folder, 'sol%.4d' % init_sol) print(init_dataset_folder) if os.path.isdir(init_dataset_folder): for init_data_file in glob.glob('%s/*eff*.img.jpg'%(init_dataset_folder)): self.initfilename = init_data_file if not scaleInvariant: init_pkl_file = init_data_file.split('.')[0] + '.pkl' else: init_pkl_file = init_data_file.split('.')[0] + '.si' if not scaleInvariant: (initdata, labels, features_string, width, height, \ winsize, nbins) = self.read_ppm(init_data_file, init_pkl_file) else: (initdata, labels, features_string, width, height, \ winsize, nbins) = self.si_read_ppm(init_data_file, init_pkl_file) if not len(self.initdata): self.initdata = initdata else: self.initdata = N.concatenate((self.initdata, initdata),axis=1) @classmethod def extract_sift(cls, rawfilename, winsize, nbins): """read_ppm(rawfilename, filename) Read in raw pixel data from rawfilename (.ppm). Create a histogram around each pixel to become the feature vector for that obsevation (pixel). Pickle the result and save it to filename. Note: does NOT update object fields. Follow this with a call to readin(). """ if cls._VL_SIFT_: # VLSIFT matlab im = Image.open(rawfilename) (width, height) = im.size mlab.bb_sift(N.array(im), 'temp.mat') sift_features = scipy.io.loadmat('temp.mat') kp = sift_features['f_'] sift_features = sift_features['d_'] sift_features = scipy.concatenate((sift_features.transpose(), kp[2:4].transpose()), 1).transpose() labels = []; for ikp in kp.transpose(): (x,y) = ikp[0:2] labels += ['(%d,%d)' % (y,x)] else: #Opencv SIFT img = cv2.imread(rawfilename) gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) height, width = gray.shape # Computing SIFT sift = cv2.SIFT(edgeThreshold = 3) kp, des = sift.detectAndCompute(gray,None) labels = [] sift_features = N.transpose(des) scale_angle = [] for ikp in kp: (x,y) = ikp.pt scale_angle.append([ikp.size/12, ikp.angle]) labels += ['(%d,%d)' % (y,x)] scale_angle = N.array(scale_angle) sift_features = scipy.concatenate((sift_features.transpose(), scale_angle), 1).transpose() return (sift_features, labels, width, height) @classmethod def extract_dsift(cls, rawfilename, winsize, nbins): """read_ppm(rawfilename, filename) Read in raw pixel data from rawfilename (.ppm). Create a histogram around each pixel to become the feature vector for that obsevation (pixel). Pickle the result and save it to filename. Note: does NOT update object fields. Follow this with a call to readin(). """ im = Image.open(rawfilename) (width, height) = im.size # To be removed in the future # Pick up all windows, stepping by half of the window size labels = [] halfwin = int(winsize/2) for y in range(halfwin, height-halfwin, int(halfwin/2)): for x in range(halfwin, width-halfwin, int(halfwin/2)): labels += ['(%d,%d)' % (y,x)] mlab.bb_dsift(N.array(im), winsize, 'temp.mat') sift_features = scipy.io.loadmat('temp.mat') sift_features = sift_features['d_'] return (sift_features, labels, width, height) @classmethod def extract_hist(cls, rawfilename, winsize, nbins): # This function extracts the histogram features from the image im = Image.open(rawfilename) (width, height) = im.size npixels = width * height pix = scipy.array(im) # Generate one feature vector (histogram) per pixel #winsize = 20 # for test.pgm #winsize = 0 # for RGB halfwin = int(winsize/2) bins = scipy.linspace(0, 255, nbins) # Only use windows that are fully populated mywidth = width-winsize myheight = height-winsize #data = scipy.zeros((nbins-1, mywidth * myheight)) #data = scipy.zeros((3*winsize*winsize, mywidth * myheight)) data = [] labels = [] # Pick up all windows, stepping by half of the window size for y in range(halfwin, height-halfwin, int(halfwin/2)): for x in range(halfwin, width-halfwin, int(halfwin/2)): # Read in data in row-major order ind = (y-halfwin)*mywidth + (x-halfwin) #data[:,ind] = \ # scipy.histogram(pix[y-halfwin:y+halfwin, # x-halfwin:x+halfwin], # bins)[0] # Just RGB #data[:,ind] = pix[y,x] # RGB window #data[:,ind] = pix[y-halfwin:y+halfwin,x-halfwin:x+halfwin].flat hist_features = TCData.extract_hist_subimg(pix[y-halfwin:y+halfwin,x-halfwin:x+halfwin]) if data == []: data = hist_features.reshape(-1,1) else: data = scipy.concatenate((data, hist_features.reshape(-1,1)),1) labels += ['(%d,%d)' % (y,x)] return (data, labels, width, height) @staticmethod def extract_hist_subimg(sub_image): hist_bins = range(0,260,1) hist_features = N.histogram(sub_image.ravel(), hist_bins)[0] return hist_features def si_read_ppm(self, rawfilename, filename): # This function reads the ppm/jpg file and extracts the features if the # features pkl file doesn't exist. It is also compatible for extension # of the feauture vector and doesn't compute the already computed features new_feature_string = [] updated_feature = 0 data = N.array([], dtype=int) if os.path.exists(filename): pkl_f = open(filename, 'r') (data, labels, feature_string, width, height, winsize, nbins)= pickle.load(pkl_f) self.winsize = winsize self.nbins = nbins new_feature_string = list(feature_string) pkl_f.close() if not new_feature_string.count('sift'): updated_feature = 1 (sift_features, labels, width, height) = self.extract_sift(rawfilename, self.winsize, self.nbins) if data.size: data = scipy.concatenate((data.transpose(), sift_features.transpose()), 1).transpose() else: data = sift_features new_feature_string.append('sift') if updated_feature: outf = open(filename, 'w') pickle.dump((data, labels, new_feature_string, width, height, self.winsize, self.nbins),outf) outf.close() print 'Saved data to %s.' % filename return (data, labels, new_feature_string, width, height, self.winsize, self.nbins) def read_ppm(self, rawfilename, filename): # This function reads the ppm/jpg file and extracts the features if the # features pkl file doesn't exist. It is also compatible for extension # of the feauture vector and doesn't compute the already computed features new_feature_string = [] updated_feature = 0 data = N.array([], dtype=int) if os.path.exists(filename): pkl_f = open(filename, 'r') (data, labels, feature_string, width, height, winsize, nbins)= pickle.load(pkl_f) self.winsize = winsize self.nbins = nbins new_feature_string = list(feature_string) pkl_f.close() if not new_feature_string.count('dsift'): updated_feature = 1 (sift_features, labels, width, height) = self.extract_dsift(rawfilename, self.winsize, self.nbins) if data.size: data = scipy.concatenate((data.transpose(), sift_features.transpose()), 1).transpose() else: data = sift_features new_feature_string.append('dsift') if not new_feature_string.count('histogram'): updated_feature = 1 (hist_features, labels, width, height) = self.extract_hist(rawfilename, self.winsize, self.nbins) hist_features = hist_features/(self.winsize) if data.size: data = scipy.concatenate((data.transpose(), hist_features.transpose()), 1).transpose() else: data = hist_features new_feature_string.append('histogram') ''' if not new_feature_string.count('position'): updated_feature = 1 position_features = [] for label in labels: (y,x) = map(int, label.strip('()').split(',')) position_features.append([x,y]) position_features = N.array(position_features) if data.size: data = scipy.concatenate((data.transpose(), position_features), 1).transpose() else: data = position_features new_feature_string.append('position') ''' if updated_feature: outf = open(filename, 'w') pickle.dump((data, labels, new_feature_string, width, height, self.winsize, self.nbins),outf) outf.close() print 'Saved data to %s.' % filename return (data, labels, new_feature_string, width, height, self.winsize, self.nbins) def extend(self, extracted_features): # This method reads the pkl files in a folder and adds them to the # existing data for processing in the TCData class. (data, labels, feature_string, width, height, winsize, nbins) = extracted_features npixels = width * height xlabel = 'Grayscale intensity' ylabel = 'Probability' xvals = scipy.arange(self.data.shape[0]).reshape(-1,1) self.data = N.concatenate((self.data, data),axis=1) self.width = N.append(self.width, width) self.height = N.append(self.height, height) self.xvals = N.append(self.xvals, xvals) self.labels.extend(labels) self.img_label_split.extend([len(self.labels)]) self.data_split.extend([self.data.shape[1]]) def compute_score(self, img_idx, y, x, mask): " Compute the score for deck or met with idx " qtrwin = self.winsize/2 if mask==0: mask_file = self.datafiles[img_idx].split('.')[0] + '.jpg' elif mask==1: mask_file = self.datafiles[img_idx].split('.')[0] + '.msk.jpg' else: mask_file = self.datafiles[img_idx].split('.')[0] + '.shadow.jpg' selections_pad = N.zeros((self.height[img_idx] + self.winsize, self.width[img_idx] + self.winsize)) mask_img = cv2.imread(mask_file, 0) selections_pad[qtrwin:self.height[img_idx]+qtrwin, qtrwin:self.width[img_idx]+qtrwin] = mask_img csel_mask = selections_pad[y:y+self.winsize, x:x+self.winsize] # Matches are pixels with intensity 255, so divide by this # to get number of matching pixels. return (csel_mask.sum()/255) def save_rec(self, reconst_features, ind, orig_features, k): img_idx = N.where(self.img_label_split > ind)[0][0] - 1 (y,x) = map(int, self.labels[ind].strip('()').split(',')) outdir = os.path.join('results', self.name) figfile = os.path.join(outdir, '%s/%s-priority-k-%d-%d.png' % (self.name, k, img_idx)) if figfile in self.rec_features: self.rec_features[figfile].append(reconst_features) self.orig_features[figfile].append(orig_features) self.loc[figfile].append([x,y]) else: self.rec_features[figfile]= [reconst_features] self.orig_features[figfile]= [orig_features] self.loc[figfile] = [[x,y]] def plot_item(self, m, ind, x, r, k, label): """plot_item(self, m, ind, x, r, k, label) Plot selection m (index ind, data in x) and its reconstruction r, with k and label to annotate the plot. """ img_idx = N.where(self.img_label_split > ind)[0][0] - 1 img_data_file = self.datafiles[img_idx] rand_ind = random.randint(0, self.img_label_split[-1]) rand_idx = N.where(self.img_label_split > rand_ind)[0][0] - 1 if x == [] or r == []: print "Error: No data in x and/or r." return # im = Image.fromarray(x.reshape(self.winsize, self.winsize, 3)) outdir = os.path.join('results', self.name) if not os.path.exists(outdir): os.mkdir(outdir) # figfile = '%s/%s-sel-%d-k-%d.pdf' % (outdir, self.name, m, k) # im.save(figfile) # print 'Wrote plot to %s' % figfile # record the selections in order, at their x,y coords # subtract selection number from n so first sels have high values mywidth = self.width[img_idx] - self.winsize myheight = self.height[img_idx] - self.winsize # set all unselected items to a value 1 less than the latest (y,x) = map(int, label.strip('()').split(',')) qtrwin = self.winsize/2 if y < qtrwin: y = qtrwin if x < qtrwin: x = qtrwin if y + qtrwin > mywidth: y = mywidth - qtrwin if x + qtrwin > mywidth: x = mywidth - qtrwin im = cv2.imread(img_data_file,0) im1 = cv2.equalizeHist(im) im1 = cv2.medianBlur(im1,5) # Selection matrix manipulation #self.selections[ind/mywidth, ind%myheight] = priority self.priority = self.priority + 1 self.selections[img_idx][y-qtrwin:y+qtrwin, x-qtrwin:x+qtrwin] = self.priority self.select_rect[img_idx][self.priority] = ((x-qtrwin, y-qtrwin), (x+qtrwin, y+qtrwin)) figfile = os.path.join(outdir, '%s-priority-k-%d-%d.pdf' % (self.name, k, img_idx)) figfile_jpg = os.path.join(outdir, '%s-priority-k-%d-%d.png' % (outdir, self.name, k, img_idx)) (img_masked, cmap, num_classes)= self.color_mask_img(im1, im, self.selections[img_idx], self.select_rect[img_idx], self.priority, figfile, 0, 0) # Saving the masked image separately img_disp = plt.imshow(img_masked) plt.axis('off') plt.savefig(figfile_jpg, bbox_inches='tight') self.zoom_window[len(self.score)] = im[y-qtrwin:y+qtrwin, x-qtrwin:x+qtrwin] # Deck mask score = self.compute_score(img_idx, y, x, 0) * 100.0 / self.winsize / self.winsize print 'Deck score: %.2f%%' % score self.score.append(score) # Meteorite mask met_score = self.compute_score(img_idx, y, x, 1) * 100.0 / self.winsize / self.winsize print 'Meteorite score: %.2f%%' % met_score self.met_score.append(met_score) # Shadow mask score = self.compute_score(img_idx, y, x, 2) self.shadow_score.append(score) # zoom pictures (left_top, bottom_right) = ((x-qtrwin, y-qtrwin), (x+qtrwin, y+qtrwin)) zoom_file = os.path.join(outdir, '%d.png' % (self.priority-1)) f, (ax1, ax2) = plt.subplots(1,2) ax1.imshow(img_masked) ax1.set_title('Original Image with selected block') ax1.axis('off') ax2.imshow(im[y-qtrwin:y+qtrwin,x-qtrwin:x+qtrwin], cmap = cm.Greys_r) ax2.set_title('Selected Block (Filtered)') ax2.axis('off') plt.savefig(zoom_file, bbox_inches='tight') print 'writing selection to %s/sel-%d.png' % (outdir, self.priority-1) scipy.misc.imsave(os.path.join(outdir, 'sel-%d.png' % (self.priority-1)), im[y-qtrwin:y+qtrwin,x-qtrwin:x+qtrwin]) # rand choices (y,x) = map(int, self.labels[rand_ind].strip('()').split(',')) score = self.compute_score(rand_idx, y, x, 0) self.rand_score.append(score) met_score = self.compute_score(rand_idx, y, x, 1) self.met_rand_score.append(met_score) score = self.compute_score(rand_idx, y, x, 2) self.shadow_rand_score.append(score) def plot_score(self, outdir): # Summary scoring print 'Avg deck score: %.2f%%' % N.mean(self.score) print 'Avg meteorite score: %.2f%%' % N.mean(self.met_score) # Deck scoring technique pylab.clf() pylab.scatter(range(0,len(self.score)),self.score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Deck score') figfile = os.path.join(outdir, 'deck_score.png') pylab.savefig(figfile, bbox_inches='tight') pylab.clf() # Deck scoring technique pylab.scatter(range(0,len(self.score)),self.met_score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Meteorite Score') figfile = os.path.join(outdir, 'met_score.png') pylab.savefig(figfile, bbox_inches='tight') pylab.clf() # Deck scoring technique pylab.scatter(range(0,len(self.score)),self.rand_score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Random Deck Score') figfile = os.path.join(outdir, 'deck_rand_score.png') pylab.savefig(figfile, bbox_inches='tight') pylab.clf() # Deck scoring technique pylab.clf() pylab.scatter(range(0,len(self.score)),self.met_rand_score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Random Meteorite Score') figfile = os.path.join(outdir, 'met_rand_score.png') pylab.savefig(figfile, bbox_inches='tight') # Deck scoring technique pylab.clf() pylab.scatter(range(0,len(self.score)),self.shadow_score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Shadow overlap Score') figfile = os.path.join(outdir, 'shadow_score.png') pylab.savefig(figfile, bbox_inches='tight') # Deck scoring technique pylab.clf() pylab.scatter(range(0,len(self.met_score)),self.shadow_rand_score) pylab.xlabel('Iterations') pylab.ylabel('Score') pylab.title('Random Shadow overlap Score') figfile = os.path.join(outdir, 'shadow_rand_score.png') pylab.savefig(figfile, bbox_inches='tight') pylab.clf() @staticmethod def color_mask_img(img, original_img, mask, rect, idx, figfile = None, show_image = 0, hist_overlay = 0): alpha = 0.6 img = img_as_float(img) rows, cols = img.shape classes = rect.keys() num_classes = len(classes) + 1 # Construct a colour image to superimpose colors = [(1.0,1.0,1.0,1.0)] colors.extend(cm.jet(N.linspace(0,1,num_classes-1)[::-1])) norm = mpl.colors.Normalize(vmin=0, vmax=num_classes - 1) cmap = mpl.colors.ListedColormap(colors) m = cm.ScalarMappable(norm=norm, cmap=cmap) color_mask = m.to_rgba(mask) color_mask = color_mask[:,:,0:3] # Construct RGB version of grey-level image img_color = N.dstack((img, img, img)) # Convert the input image and color mask to Hue Saturation Value (HSV) # colorspace img_hsv = color.rgb2hsv(img_color) ## Replace the hue and saturation of the original image ## with that of the color mask img_masked = color.hsv2rgb(img_hsv) img_masked_copy = img_masked.copy() if not hist_overlay: for i,keys in enumerate(rect): (left_top, bottom_right) = rect[keys] cv2.rectangle(img_masked, left_top, bottom_right,color=colors[i+1],thickness=3) else: color_mask_hsv = color.rgb2hsv(color_mask) img_hsv[..., 0] = color_mask_hsv[..., 0] img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha (left_top, bottom_right) = rect[idx] cv2.rectangle(img_masked_copy, left_top, bottom_right,color=colors[-1],thickness=3) # Width ratio is uneven because of the colorbar - image with colorbar seemed to be smaller othewise gs = gridspec.GridSpec(1, 2,width_ratios=[1.12,1]) # Display image with overlayed demud output fig = plt.figure() a = fig.add_subplot(gs[0]) a.set_title('Demud Output') img_disp = plt.imshow(img_masked, cmap = cmap, vmin=0, vmax=num_classes) plt.setp( a.get_yticklabels(), visible=False) plt.setp( a.get_xticklabels(), visible=False) divider = make_axes_locatable(plt.gca()) cax = divider.append_axes("left", "8%", pad="5%") cax = plt.colorbar(img_disp, ticks = N.linspace(0.5,num_classes-.5, num_classes), cax = cax) cax.set_ticklabels(range(0,num_classes) ) cax.ax.tick_params(labelsize=5) # Display original image as well a = fig.add_subplot(gs[1]) original_img = cv2.cvtColor(original_img, cv2.COLOR_GRAY2RGB) a.set_title('Original Image') img_disp = plt.imshow(original_img) plt.setp( a.get_yticklabels(), visible=False) plt.setp( a.get_xticklabels(), visible=False) if not (figfile is None): plt.savefig(figfile, bbox_inches='tight') print 'Wrote selection priority plot to %s' % figfile # Display the output if show_image: plt.show() plt.close('all') return (img_masked_copy, cmap, num_classes)
{ "repo_name": "wkiri/DEMUD", "path": "demud/dataset_navcam.py", "copies": "1", "size": "25050", "license": "apache-2.0", "hash": 1680935877172980700, "line_mean": 35.7841409692, "line_max": 148, "alpha_frac": 0.6268263473, "autogenerated": false, "ratio": 3.230590662883673, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.43574170101836734, "avg_score": null, "num_lines": null }
__author__ = 'anass' from flask import Flask, make_response, jsonify, request import json from JSONtoObject import wrap from JSONtoObject.wrapper import Wrapper from database import Database from utils import path_to_property, dict_to_json from default import id_tag, SLASH from message import BAD_REQUEST, NOT_FOUND from ditto import d from uuid import uuid1 app = Flask(__name__) _database = None _model = None @app.errorhandler(400) def bad_request(): return make_response(jsonify({d.error: BAD_REQUEST}), 400) @app.errorhandler(404) def not_found(): return make_response(jsonify({d.error: NOT_FOUND}), 404) # GET / # Returns your JSON @app.route('/', methods=[d.GET]) def get_all(): return jsonify(_model.to_json()) # GET /:resource # GET /:resource/:id # GET /:what/:ever/:you/:want @app.route('/<path:path>', methods=[d.GET]) def get(path): # No control in path variable, we'll just keep it simple # We'll add a regex try: key, value = path_to_property(_model, path.split(SLASH)) except: return not_found() if isinstance(value, Wrapper): return json.dumps(value.to_json()) if isinstance(value, list): return json.dumps([v.to_json() for v in value]) return json.dumps({key: value}) # POST /:resource # NEXT POST / (add table) @app.route('/<path:path>', methods=[d.POST]) def create(path): global _database value = path_to_property(_model, path.split(SLASH))[1] # Return response and save state of the database res = dict_to_json(request.form) res[id_tag] = uuid1().int value.append(wrap(res)) _database.save() return jsonify(res), 201 # PUT /:resource/:id @app.route('/<path:path>', methods=[d.PUT]) def update(path): # tests if the resource exists global _database try: value = path_to_property(_model, path.split(SLASH))[1] except: return not_found() res = dict_to_json(request.form) res[id_tag] = int(path.split(SLASH)[-1]) # update value resources = path_to_property(_model, path.split(SLASH)[:-1])[1] resources.remove(value) resources.append(wrap(res)) # save database into file _database.save() return jsonify(res), 201 # DELETE /:resource/:id # DELETE /:resource @app.route('/<path:path>', methods=[d.DELETE]) def delete(path): # tests if the resource exists global _database, _model try: key, value = path_to_property(_model, path.split(SLASH)) except: return not_found() if len(path.split(SLASH)) is 1: resources = _model else: resources = path_to_property(_model, path.split(SLASH)[:-1])[1] if isinstance(resources, Wrapper): resources.__delattr__(key) _database.save() return jsonify({}), 201 elif isinstance(resources, list): resources.remove(value) _database.save() return jsonify({}), 201 else: resources.remove(value) _database.save() return jsonify({}), 201 def serve(json_file, tag_id=None): init(json_file, tag_id) app.run() def init(json_file, tag_id=None): global _database, _model, id_tag if tag_id: id_tag = tag_id _database = Database(json_file) _model = _database.data
{ "repo_name": "lahlali/JSONMock.py", "path": "jsonmock/server.py", "copies": "2", "size": "3277", "license": "mit", "hash": 7507502070611609000, "line_mean": 22.4071428571, "line_max": 71, "alpha_frac": 0.6368629844, "autogenerated": false, "ratio": 3.420668058455115, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5057531042855115, "avg_score": null, "num_lines": null }
__author__ = "Anatolij Zubow, Piotr Gawlowicz" __copyright__ = "Copyright (c) 2015, Technische Universitat Berlin" __version__ = "0.1.0" __email__ = "{zubow, gawlowicz}@tkn.tu-berlin.de" class UniFlexException(Exception): ''' Base class for all exceptions. ''' message = 'An unknown exception' def __init__(self, msg=None, **kwargs): self.kwargs = kwargs if msg is None: msg = self.message try: msg = msg % kwargs except Exception: msg = self.message super(UniFlexException, self).__init__(msg) class AgentNotAvailable(UniFlexException): message = 'agent %(id)s not available' class InvalidArgumentException(UniFlexException): message = 'function %(func_name)s called with wrong arguments' class UnsupportedFunctionException(UniFlexException): message = ("function %(func_name)s is not supported" + "by connector_module %(conn_module)s") class SchedulingFunctionCallsInThePastException(UniFlexException): message = 'function %(func_name)s was scheduled in the past for execution' class FunctionExecutionFailedException(UniFlexException): message = ("function %(func_name)s was not correctly executed;" + " error msg: %(err_msg)s") FunctionExecutionFailed = FunctionExecutionFailedException
{ "repo_name": "uniflex/uniflex", "path": "uniflex/core/exceptions.py", "copies": "1", "size": "1353", "license": "mit", "hash": 356736939890069060, "line_mean": 27.1875, "line_max": 78, "alpha_frac": 0.6711012565, "autogenerated": false, "ratio": 3.727272727272727, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4898373983772727, "avg_score": null, "num_lines": null }
__author__ = 'anderson' # -*- coding: utf-8 -*- from threading import Thread, Condition from datetime import datetime from santos.exceptions import TaskException import logging log = logging.getLogger(__name__) class ThreadSchedule: __jobs = [] # jobs que serão executados def pause_job(self, job_name): log.debug("Job name %s" % job_name) log.debug(self.__jobs) for j in self.__jobs: if job_name == j.name: j.pause() # job é bloqueado e fica esperando ser notificado #del self.__jobs[idx] break def remove_job(self, job_name): log.debug("Job name %s" % job_name) for idx, job in enumerate(self.__jobs): if job_name == job.name: job._stop() del self.__jobs[idx] break def resume_job(self, job_name): for job in self.__jobs: if job_name == job.name: job.paused = False break def add_job(self, func, id, **parameters): """No kwargs estará todos os parâmetros para a função que será executada""" # é bom fazer um teste pra ver se de fato o id foi colocando, pois acho melhor esse parâmetro ser obrigatório job = Job(func, id, **parameters) self.__jobs.append(job) job.start() def __len__(self): return len(self.__jobs) class Job(Thread): """ Os parâmetros aceitos são: seconds, minutes, hour, time_of_the_day, day_of_the_week, day_of_the_month Descrição: O parâmetro seconds define que a função será executada repetidamente na frequência do valor passado em segundos ex: seconds="20", será executado de 20 em 20 segundos O parâmetro minutes define que a função será executada repetidamente na frequência do valor passado em minutos ex: minutes="20", será executado de 20 em 20 minutos O parâmetro hour define que a função será executada repetidamente na frequência do valor passado em horas ex: hour="2", será executado de 2 em 2 horas obs: Esses três parâmetros não podem ser combinados, nem entre e nem com os dois abaixo. O parâmetro time_of_the_day define que a função será executada todo dia em um horário específico, que deve ser passado no seguinte formato hh:mm:ss.(hh: 0..23 ; mm: 0..59, ss: 0..59) ex: time_of_the_day="14:15:00", será executada todo dia às quartoze horas e quinze minutos O parâmetro day_of_the_week define que a função será executada no dia da semana passado como valor. Os valores possíveis são: Su(Sunday/Domingo), M(Monday/Segunda), Tu(Tuesday/Terça), W(Wednesday/Quarta), Th(Thursday/Quinta), F(Friday/Sexta), Sa(Saturday/Sábado) em maiúsculo. Tem que ser combinado com o parâmetro time_of_the_day para especificar a hora, minuto e segundo daquele dia da semana. ex: day_of_the_week="W" time_of_the_day="22:00:00", Será executado toda quarta às vinte e dua horas. ***************************************** """ days = {"M": 0, "Tu": 1, "W": 2, "Th": 3, "F": 4, "Sa": 5, "Su": 6} def __init__(self, func, id, **arguments): Thread.__init__(self) try: self.name = id or "" self.args_function = arguments["kwargs"] or None # argumentos para a função a ser executada if self.args_function: del arguments["kwargs"] # removendo os parâmetros da função a ser executada self.arguments_map = arguments # parâmetros de tempo self.function = func self.condict = Condition() # controlará o bloqueio/desbloqueio do job except Exception as e: log.debug("Erro {}".format(e.__str__())) log.debug("name:{}, args_functio:{},args_map:{}, func:{}".format(self.name, self.args_function, self.arguments_map, self.function)) def run(self): try: log.debug("JOB RUNNING") import time self.execute = True self.paused = False while self.execute: if not self.paused: interval = self.calculateInterval() time.sleep(interval) self.function(**self.args_function) except TaskException as t: log.debug(t) def pause(self): log.debug("PAUSE") self.paused = True def _stop(self): log.debug("STOP") self.execute = False def calculateInterval(self): """ É responsável por determinar o tempo em segundos da próxima tarefa. Quando o parâmetro para determinar o tempo da pŕoxima tarefa for time_of_the_day é chamado o método auxCalculate para determinar tal tempo. :return: """ if "day_of_the_week" in self.arguments_map: if "hour" in self.arguments_map or "minutes" in self.arguments_map or "seconds" in self.arguments_map: raise TaskException("Parametros extras que não combinam") if "time_of_the_day" in self.arguments_map: return self.calculateDayOfTheWeek(self.arguments_map["day_of_the_week"], self.arguments_map["time_of_the_day"]) else: raise TaskException("Parâmetro time_of_the_day não está presente") elif "time_of_the_day" in self.arguments_map: if "hour" in self.arguments_map or "minutes" in self.arguments_map or "seconds" in self.arguments_map: raise TaskException("Parametros extras que não combinam") return self.auxCalculate(self.arguments_map["time_of_the_day"])[0] elif "hour" in self.arguments_map: if "seconds" in self.arguments_map or "minutes" in self.arguments_map: raise TaskException("Parametros extras que não combinam") return int(self.arguments_map["hour"]) * 3600 elif "minutes" in self.arguments_map: if "seconds" in self.arguments_map: raise TaskException("Parametros extras que não combinam") else: return int(self.arguments_map["minutes"]) * 60 elif "seconds" in self.arguments_map: log.debug("seconds") return int(self.arguments_map["seconds"]) else: raise TaskException("Parâmetro(s): %r inválidos" % self.arguments_map) def calculateDayOfTheWeek(self, day_of_the_week, time_of_the_day): entrada = day_of_the_week weekday = datetime.now().weekday() dif = self.days[entrada] - weekday sleep, diference = self.auxCalculate(time_of_the_day) if self.days[entrada] == weekday: if diference > 0: return sleep else: return sleep + (6 * (24*3600)) #24 horas para segundo elif self.days[entrada] > weekday: if diference > 0: return sleep + (dif * (24*3600)) else: #Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia. if dif == 1: return sleep else: return sleep + ((dif-1) * (24*3600)) #24 horas para segundo else: #numero de dias de diferença resp = 7 - abs(dif) if diference > 0: return sleep + (resp * (24*3600)) else: #Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia. if resp == 1: return sleep else: return sleep + ((resp-1) * (24*3600)) #24 horas para segundo def auxCalculate(self, time_of_the_day): """ Essa método retorno o tempo em segundos para que a tarefa seja sempre executada na hora escolhida. :param time_of_the_day: :return: sleep_time """ try: times = [3600, 60, 1] one_day_has = '24:00:00'.split(":") time_day = sum([a*b for a, b in zip(times, [int(i) for i in one_day_has])]) aux_time = time_of_the_day.split(":") time_want = sum([a*b for a, b in zip(times, [int(i) for i in aux_time])]) #Transforma o tempo atual para segundos hjf = datetime.now().strftime("%H:%M:%S").split(":") now = sum([a*b for a, b in zip(times, [int(i) for i in hjf])]) #diferença entre o tempo atual e o tempo desejado em segundos diference = time_want - now sleep_time = None if diference <= 0: #só será executado no outro dia sleep_time = time_day - (diference * (-1)) else: #ainda será executado no mesmo dia sleep_time = diference except TaskException as t: log.debug(t) return sleep_time, diference
{ "repo_name": "anderson89marques/Santos", "path": "santos/santos.py", "copies": "1", "size": "9252", "license": "mit", "hash": -8460936553679018000, "line_mean": 37.6455696203, "line_max": 139, "alpha_frac": 0.5692290893, "autogenerated": false, "ratio": 3.474203338391502, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9527900704046421, "avg_score": 0.0031063447290160394, "num_lines": 237 }
__author__ = 'andersonpaac' import numpy as np filename="/Users/andersonpaac/Downloads/question_2.csv" #PLEASE INSERT FULL PATH TO FILENAME HERE timestamp=[] acc_data=[] gyro_data=[] mag_data=[] light_data=[] running = [] threshold=1.4 timediff=1000 ooind=10 def parser(): fd = open(filename) data = fd.read() raw_data = data.split(",") raw_data = raw_data[10:-1] for i in xrange(len(raw_data)): a = raw_data[i].split("\n") if len(a)>1: raw_data[i] = a[1] else: raw_data[i] = a[0] for i in range(len(raw_data)): #print raw_data[i] raw_data[i] = float(raw_data[i]) ts_ind =0 a_ind = 1 notdone=1 while(notdone): acc_data.append(DataClass(raw_data[ts_ind],raw_data[a_ind],raw_data[a_ind+1],raw_data[a_ind+2])) ts_ind = ts_ind + ooind a_ind = a_ind + ooind if(ts_ind>=len(raw_data)-1): notdone=0 calc() def calc(): #isRunning=0 sum=0 unset=1 idx=0 somearr=[] print acc_data[2].get() t_lval = acc_data[0].gett()+timediff while unset: if(acc_data[idx].gett()<=t_lval): somearr.append(acc_data[idx].gety()) if(acc_data[idx].gett()>=t_lval): distance=max(somearr)-min(somearr) #print distance if (distance>threshold):# && (somearr[26] > ): #if(isRunning==0): running.append(t_lval-timediff)#acc_data[idx_lval-timediff].gett()) running.append(t_lval)#acc_data[idx_lval].gett()) somearr=[] t_lval += timediff idx=idx+1 if(idx >= len(acc_data)): unset=0 if len(running) % 2 != 0: running.append(acc_data[len(acc_data)-1].gett()) print len(running) print running notdone=1 idx=0 while notdone: sum=sum+(running[idx+1]-running[idx]) idx=idx+2 if(idx>=len(running)): notdone=0 print sum print sum/1000 steps = (sum*1.667)/1000 print "MOVED :"+str(steps)+" steps" class DataClass: def __init__(self, t , x , y, z ): self.sensor_x = x self.sensor_y = y self.sensor_z = z self.sensor_t = t def getx(self): return self.sensor_x def gety(self): return self.sensor_y def getz(self): return self.sensor_z def gett(self): return self.sensor_t def get(self): return self.sensor_t,self.sensor_x,self.sensor_y,self.sensor_z parser()
{ "repo_name": "andersonpaac/AndroidStepCounter", "path": "postprocessor_step_calc.py", "copies": "1", "size": "2568", "license": "mit", "hash": -7363705221920111000, "line_mean": 20.9487179487, "line_max": 104, "alpha_frac": 0.5385514019, "autogenerated": false, "ratio": 3.0318772136953953, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8940384593817616, "avg_score": 0.026008804355555724, "num_lines": 117 }
__author__ = "Andrea Biancini, geduldig" __date__ = "January 3, 2014" __license__ = "MIT" from .constants import * import base64 import requests OAUTH2_SUBDOMAIN = 'api' OAUTH2_ENDPOINT = 'oauth2/token' class BearerAuth(requests.auth.AuthBase): """Request bearer access token for oAuth2 authentication. :param consumer_key: Twitter application consumer key :param consumer_secret: Twitter application consumer secret :param proxies: Dictionary of proxy URLs (see documentation for python-requests). """ def __init__(self, consumer_key, consumer_secret, proxies=None): self._consumer_key = consumer_key self._consumer_secret = consumer_secret self.proxies = proxies self._bearer_token = self._get_access_token() def _get_access_token(self): token_url = '%s://%s.%s/%s' % (PROTOCOL, OAUTH2_SUBDOMAIN, DOMAIN, OAUTH2_ENDPOINT) auth = self._consumer_key + ':' + self._consumer_secret b64_bearer_token_creds = base64.b64encode(auth.encode('utf8')) params = {'grant_type': 'client_credentials'} headers = {} headers['User-Agent'] = USER_AGENT headers['Authorization'] = 'Basic ' + b64_bearer_token_creds.decode('utf8') headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8' try: response = requests.post( token_url, params=params, headers=headers, proxies=self.proxies) data = response.json() return data['access_token'] except Exception as e: raise Exception('Error requesting bearer access token: %s' % e) def __call__(self, r): auth_list = [ self._consumer_key, self._consumer_secret, self._bearer_token] if all(auth_list): r.headers['Authorization'] = "Bearer %s" % self._bearer_token return r else: raise Exception('Not enough keys passed to Bearer token manager.')
{ "repo_name": "mpvoss/RickAndMortyWeatherTweets", "path": "env/lib/python3.5/site-packages/TwitterAPI/BearerAuth.py", "copies": "1", "size": "2166", "license": "mit", "hash": 2968697241083509000, "line_mean": 33.935483871, "line_max": 85, "alpha_frac": 0.5747922438, "autogenerated": false, "ratio": 4.11787072243346, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.000571571464294645, "num_lines": 62 }
__author__ = "Andrea Biancini, Jonas Geduldig" __date__ = "January 3, 2014" __license__ = "MIT" import base64 from .constants import * import requests class BearerAuth(requests.auth.AuthBase): """Request bearer access token for oAuth2 authentication. :param consumer_key: Twitter application consumer key :param consumer_secret: Twitter application consumer secret :param proxies: Dictionary of proxy URLs (see documentation for python-requests). """ def __init__(self, consumer_key, consumer_secret, proxies=None): self._consumer_key = consumer_key self._consumer_secret = consumer_secret self.proxies = proxies self._bearer_token = self._get_access_token() def _get_access_token(self): token_url = '%s://%s.%s/%s' % (PROTOCOL, REST_SUBDOMAIN, DOMAIN, OAUTH2_TOKEN_ENDPOINT) auth = self._consumer_key + ':' + self._consumer_secret b64_bearer_token_creds = base64.b64encode(auth.encode('utf8')) params = {'grant_type': 'client_credentials'} headers = {} headers['User-Agent'] = USER_AGENT headers['Authorization'] = 'Basic ' + \ b64_bearer_token_creds.decode('utf8') headers[ 'Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8' try: response = requests.post( token_url, params=params, headers=headers, proxies=self.proxies) data = response.json() return data['access_token'] except Exception as e: raise Exception( 'Error while requesting bearer access token: %s' % e) def __call__(self, r): auth_list = [ self._consumer_key, self._consumer_secret, self._bearer_token] if all(auth_list): r.headers['Authorization'] = "Bearer %s" % self._bearer_token return r else: raise Exception('Not enough keys passed to Bearer token manager.')
{ "repo_name": "Innova4D/twitter-stream-expression", "path": "TwitterAPI/BearerAuth.py", "copies": "1", "size": "2181", "license": "mit", "hash": -4202436073291924000, "line_mean": 34.7540983607, "line_max": 85, "alpha_frac": 0.5570839065, "autogenerated": false, "ratio": 4.243190661478599, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5300274567978599, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini, Jonas Geduldig" __date__ = "January 3, 2014" __license__ = "MIT" from .constants import * import base64 import requests OAUTH2_SUBDOMAIN = 'api' OAUTH2_ENDPOINT = 'oauth2/token' class BearerAuth(requests.auth.AuthBase): """Request bearer access token for oAuth2 authentication. :param consumer_key: Twitter application consumer key :param consumer_secret: Twitter application consumer secret :param proxies: Dictionary of proxy URLs (see documentation for python-requests). """ def __init__(self, consumer_key, consumer_secret, proxies=None): self._consumer_key = consumer_key self._consumer_secret = consumer_secret self.proxies = proxies self._bearer_token = self._get_access_token() def _get_access_token(self): token_url = '%s://%s.%s/%s' % (PROTOCOL, OAUTH2_SUBDOMAIN, DOMAIN, OAUTH2_ENDPOINT) auth = self._consumer_key + ':' + self._consumer_secret b64_bearer_token_creds = base64.b64encode(auth.encode('utf8')) params = {'grant_type': 'client_credentials'} headers = {} headers['User-Agent'] = USER_AGENT headers['Authorization'] = 'Basic ' + b64_bearer_token_creds.decode('utf8') headers['Content-Type'] = 'application/x-www-form-urlencoded;charset=UTF-8' try: response = requests.post( token_url, params=params, headers=headers, proxies=self.proxies) data = response.json() return data['access_token'] except Exception as e: raise Exception('Error requesting bearer access token: %s' % e) def __call__(self, r): auth_list = [ self._consumer_key, self._consumer_secret, self._bearer_token] if all(auth_list): r.headers['Authorization'] = "Bearer %s" % self._bearer_token return r else: raise Exception('Not enough keys passed to Bearer token manager.')
{ "repo_name": "rosudrag/Freemium-winner", "path": "VirtualEnvironment/Lib/site-packages/TwitterAPI/BearerAuth.py", "copies": "2", "size": "2172", "license": "mit", "hash": 597086162918719700, "line_mean": 34.0322580645, "line_max": 85, "alpha_frac": 0.5755064457, "autogenerated": false, "ratio": 4.10586011342155, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.568136655912155, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" from backend import TwitterApiCall, BackendChooser, BackendError class DownloadTweetsStream(TwitterApiCall): def __init__(self, engine_config, language, auth_type): super(DownloadTweetsStream, self).__init__(engine_config, language, auth_type) self.backend = BackendChooser.GetBackend(self.logger) def getMechanism(self): return 'stream' def ProcessTweets(self): squares = self.filters lang = self.language self.logger.info('Executing Twitter API calls') params = {'locations':','.join(squares)} r = self.api.request('statuses/filter', params) for item in r.get_iterator(): if lang and item['lang'] != lang: continue self.logger.debug(item['text']) sql_vals = self.FromTweetToSQLVals(item, True, True) if not sql_vals: continue try: self.backend.InsertTweetIntoDb(sql_vals) except BackendError as be: self.logger.error("Error inserting tweet in the backend: %s" % be)
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/stream.py", "copies": "1", "size": "1034", "license": "apache-2.0", "hash": 3757926393761762300, "line_mean": 31.3125, "line_max": 82, "alpha_frac": 0.6808510638, "autogenerated": false, "ratio": 3.640845070422535, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.972520691042749, "avg_score": 0.01929784475900907, "num_lines": 32 }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import json import time import threading import logging from backend import TwitterApiCall, BackendChooser, BackendError from lastcallbackend import LastcallBackendChooser class DownloadTweetsREST(TwitterApiCall): bulk = True logger = None def __init__(self, engine_config, language, auth_type): super(DownloadTweetsREST, self).__init__(engine_config, language, auth_type) self.logger = logging.getLogger('engine-%s' % engine_config['name']) self.backend = BackendChooser.GetBackend(engine_config) self.lastcall_backend = LastcallBackendChooser.GetBackend(engine_config) self.bulk = engine_config['bulk'] def getMechanism(self): return 'rest' def GetCurrentLimit(self): try: limits = self.GetRateLimits()['resources']['search']['/search/tweets'] return int(limits['remaining']) except BackendError as be: self.logger.error('Error while retrieving current limit: %s' % be) return 0 def GetNextCreds(self, ratelimit=0): try: while ratelimit <= 2: self.logger.info('Using another set of credentials because reached limit.') self.InitializeTwitterApi() ratelimit = self.GetCurrentLimit() self.logger.debug('New limit for this set of credentials: %d' % ratelimit) return ratelimit except Exception as e: self.logger.error('Reached ratelimit.') raise e def BulkInsert(self, statuses): vals = [] for s in statuses: vals.append(self.FromTweetToVals(s, False, False)) try: return self.backend.BulkInsertTweetIntoDb(vals) except BackendError as be: self.logger.error('Backend error during bulk insert: %s' % be) raise Exception() def SingleInsert(self, statuses): max_tweetid = None min_tweetid = None inserted = 0 for s in statuses: vals = self.FromTweetToVals(s, False, False) try: newins = self.backend.InsertTweetIntoDb(vals) inserted += newins if max_tweetid is None or max_tweetid < long(vals['id']): max_tweetid = long(vals['id']) if min_tweetid is None or min_tweetid > long(vals['id']): min_tweetid = long(vals['id']) except BackendError as be: self.logger.error('Backend error during insert: %s' % be) if max_tweetid is None or min_tweetid is None: raise be return [inserted, max_tweetid, min_tweetid] def ManagingCallError(self, jsonresp, last_errcode, ratelimit): must_continue = False if 'statuses' in jsonresp: return [must_continue, last_errcode, ratelimit] elif 'errors' in jsonresp: if type(jsonresp['errors']).__name__ == 'list': errors = jsonresp['errors'] else: errors = [jsonresp['errors']] for error in errors: if 'code' in error and error['code'] == 88: ratelimit = self.GetNextCreds() must_continue = True return [must_continue, last_errcode, ratelimit] if error['code'] != last_errcode: last_errcode = error['code'] self.logger.warning('Got error from API, retrying in 5 seconds: %s' % jsonresp) time.sleep(5) must_continue = True return [must_continue, last_errcode, ratelimit] self.logger.error('Call did not return expected results: %s' % jsonresp) raise Exception() def ExecuteCall(self, params, max_id, since_id): params['max_id'] = max_id params['since_id'] = since_id if max_id is not None and since_id is not None: if max_id < since_id: raise Exception("Wrong max and min id") try: response = self.api.request('search/tweets', params) ratelimit = response.headers['x-rate-limit-remaining'] jsonresp = json.loads(response.text) return [ratelimit, jsonresp] except Exception as e: self.logger.error('Error during API call: %s.' % e) raise e def ProcessCallResults(self, jsonresp): max_tweetid = None min_tweetid = None inserted = 0 statuses = jsonresp['statuses'] if len(statuses) is 0: self.logger.info('API returned no tweet.') raise Exception() if self.bulk: [newins, max_tweetid, min_tweetid] = self.BulkInsert(statuses) else: [newins, max_tweetid, min_tweetid] = self.SingleInsert(statuses) inserted += newins if newins != len(statuses): self.logger.error("Error inserted %d tweets instead of %d." % (newins, len(statuses))) raise Exception() return [inserted, max_tweetid, min_tweetid] def PartialProcessTweets(self, db_initialization, params, max_id, since_id): calls = 0 inserted = 0 updateAfterFirstCall = max_id is None ratelimit = self.GetCurrentLimit() last_errcode = None self.logger.info('Executing Twitter API calls with max_id = %s and since_id = %s' % (max_id, since_id)) max_tweetid = since_id while TwitterApiCall.continuing(): try: if ratelimit <= 2: ratelimit = self.GetNextCreds(ratelimit) [ratelimit, jsonresp] = self.ExecuteCall(params, max_id, since_id) [must_continue, last_errcode, ratelimit] = self.ManagingCallError(jsonresp, last_errcode, ratelimit) if must_continue: continue else: last_errcode = None [newinserted, max_tweetid, min_tweetid] = self.ProcessCallResults(jsonresp) if updateAfterFirstCall and calls == 0 and max_tweetid is not None: self.UpdateLastCallAfterCallExecution(None, None, max_tweetid) calls += 1 inserted += newinserted [max_id, since_id] = self.UpdateCallIds(max_id, since_id, max_tweetid, min_tweetid) if db_initialization: self.logger.info('Performing only one call to initialize DB.') raise Exception() if max_id is None: raise Exception() except Exception as e: if updateAfterFirstCall and calls == 0 and max_id is not None: self.UpdateLastCallAfterCallExecution(max_id, max_tweetid, since_id) self.logger.info('Exiting download cycle %s' % e) break self.logger.info('Total number of calls executed: \t%d.' % calls) self.logger.info('Total number of tweets inserted:\t%d.' % inserted) return [max_id, since_id] def UpdateCallIds(self, max_id, since_id, max_tweetid, min_tweetid): if max_tweetid < since_id or max_tweetid == min_tweetid: self.logger.info('The call obtained all tweets, stopping the loop.') return [None, None] return [min_tweetid, since_id] #def RaescueLastcall(self): # try: # call_ids = self.lastcall_backend.GetLastCallIds(self.engine_name, False) # if len(call_ids) == 0: # self.logger.warn("No lastcall in database, rescuing engine.") # max_tweetid = self.backend.GetMaxId() # self.logger.debug("Found max_tweetid = %s in database." % max_tweetid) # self.lastcall_backend.InsertLastCallIds(self.engine_name, None, max_tweetid) # except Exception as e: # self.logger.error("Error in rescuing last call: %s" % e) def RescueLastcall(self, max_id, since_id): max_tweetid = max_id if max_id is not None else since_id if max_tweetid is None: return try: call_ids = self.lastcall_backend.GetLastCallIds(self.engine_name, False) if len(call_ids) == 0: self.logger.warn("No lastcall in database, rescuing engine.") self.lastcall_backend.InsertLastCallIds(self.engine_name, None, max_tweetid) except Exception as e: self.logger.error("Error in rescuing last call: %s" % e) def UpdateLastCallAfterCallExecution(self, orig_max_id, max_id, since_id): self.logger.info("Updating lastcall with values orig_max_id = %s, max_id = %s, since_id = %s." % (orig_max_id, max_id, since_id)) if since_id is None and max_id is None: self.logger.debug("Performed all calls, no insert in lastcall needed.") return if since_id is None: if orig_max_id is None or max_id < orig_max_id: min_max_id = max_id else: min_max_id = orig_max_id self.lastcall_backend.InsertLastCallIds(self.engine_name, None, min_max_id) else: if max_id is not None and orig_max_id is not None and max_id > orig_max_id: self.logger.warning("Attention, the gap seems to be widening! Old max_id = %s new max_id = %s." % (orig_max_id, max_id)) self.lastcall_backend.InsertLastCallIds(self.engine_name, max_id, since_id) def RunCallEngine(self, params, call_id, db_initialization): orig_max_id = None # orig_since_id = None try: max_id = call_id['max_id'] since_id = call_id['since_id'] orig_max_id = max_id # orig_since_id = since_id [max_id, since_id] = self.PartialProcessTweets(db_initialization, params, max_id, since_id) except Exception as e: self.logger.error("Exception during RunCallEngine: %s" % e) finally: if not db_initialization: self.UpdateLastCallAfterCallExecution(orig_max_id, max_id, since_id) #self.RescueLastcall() self.RescueLastcall(max_id, since_id) def ProcessTweets(self, initialize=False): try: call_ids = self.lastcall_backend.GetLastCallIds(self.engine_name, True) self.logger.debug('Obtained call_ids = %s' % call_ids) except Exception as be: self.logger.error('Error while checking last call state: %s' % be) lat = self.filters[0] lng = self.filters[1] radius = self.filters[2] count = 100 # Number of tweets to retrieve (max. 100) params = { 'geocode': ','.join(map(str, (lat, lng, radius))), 'count': count, 'lang': self.language, 'result_type': 'recent', 'max_id': None, 'since_id': None } first_call = False if len(call_ids) == 0: if initialize: first_call = True call_ids = [{ 'max_id': None, 'since_id': None}] else: self.logger.info("Engine colliding with other executions. Exiting.") return for call_id in call_ids: threading.Thread(target=self.RunCallEngine, args=[params, call_id, first_call]).start()
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/rest.py", "copies": "1", "size": "10319", "license": "apache-2.0", "hash": -2028924356896617000, "line_mean": 35.5921985816, "line_max": 133, "alpha_frac": 0.639887586, "autogenerated": false, "ratio": 3.470904809956273, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46107923959562735, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import logging class BackendChooser(object): @staticmethod def GetBackend(engine_config): # from mysqlbackend import MySQLBackend # backend = MySQLBackend() from elasticsearchbackend import ElasticSearchBackend backend = ElasticSearchBackend(engine_config) return backend class Backend(object): logger = None def __init__(self, engine_config): self.logger = logging.getLogger('engine-%s' % engine_config['name']) def BulkInsertTweetIntoDb(self, vals): raise NotImplementedError def InsertTweetIntoDb(self, sql_vals): raise NotImplementedError def GetLastCallIds(self): raise NotImplementedError def UpdateLastCallIds(self, max_id=None, since_id=None): raise NotImplementedError def GetAllTweetCoordinates(self): raise NotImplementedError def UpdateCoordinates(self, location, lat, lng): raise NotImplementedError def GetLocations(self): raise NotImplementedError def InsertFrenchDepartments(self, vals): raise NotImplementedError class BackendError(Exception): pass
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/lastcallbackend/lastcallbackend.py", "copies": "1", "size": "1131", "license": "apache-2.0", "hash": -9219222756119823000, "line_mean": 23.5869565217, "line_max": 72, "alpha_frac": 0.7391688771, "autogenerated": false, "ratio": 4.235955056179775, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5475123933279775, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import logging class BackendChooser(object): @staticmethod def GetBackend(logger): # from mysqlbackend import MySQLBackend # backend = MySQLBackend(logger) from elasticsearchbackend import ElasticSearchBackend backend = ElasticSearchBackend(logger) return backend class Backend(object): logger = None def __init__(self, engine_config): self.logger = logging.getLogger('engine-%s' % engine_config['name']) def BulkInsertTweetIntoDb(self, vals): raise NotImplementedError def InsertTweetIntoDb(self, sql_vals): raise NotImplementedError def DeleteTweetFromDb(self, tweet_id): raise NotImplementedError def GetUSAKmls(self): raise NotImplementedError def GetFrenchKmls(self): raise NotImplementedError def GetMaxId(self): raise NotImplementedError def RemoveOldTweets(self, max_date): raise NotImplementedError def GetAllTweetCoordinates(self): raise NotImplementedError def UpdateCoordinates(self, location, lat, lng): raise NotImplementedError def GetLocations(self): raise NotImplementedError def InsertUSAStates(self, vals): raise NotImplementedError def InsertFrenchDepartments(self, vals): raise NotImplementedError class BackendError(Exception): pass
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/backend/backend.py", "copies": "1", "size": "1352", "license": "apache-2.0", "hash": -283629175175723970, "line_mean": 22.3103448276, "line_max": 72, "alpha_frac": 0.7389053254, "autogenerated": false, "ratio": 4.305732484076433, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5544637809476434, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import MySQLdb import threading from backend import Backend, LastcallBackendError from ..secrets import dbhost, dbuser, dbpass, dbname def synchronized(func): func.__lock__ = threading.Lock() def synced_func(*args, **kws): with func.__lock__: return func(*args, **kws) return synced_func class MySQLBackend(Backend): con = None cur = None def __init__(self): try: self.con = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpass, db=dbname, charset='utf8') self.logger.info("Connected to MySQL db %s:%s." % (dbhost, dbname)) self.cur = self.con.cursor() except Exception as e: raise LastcallBackendError("Error connecting to MySQL db %s:%s: %s" % (dbhost, dbname, e)) def __del__(self): if self.con: self.con.close() @synchronized def GetLastCallIds(self, engine_name, pop=False): try: self.cur.execute("SELECT `key`, `value` from lastcall WHERE `enginename` = '%s'" % engine_name) rows = self.cur.fetchall() ids = [None, None, None] for row in rows: if row[0] == 'max_id': ids[0] = row[1] elif row[0] == 'since_id': ids[1] = row[1] elif row[0] == 'top_id': ids[2] = row[1] if pop: self.DeleteLastCallId(engine_name, row[0]) return ids except Exception as e: raise LastcallBackendError("Error while retrieving last call ids from DB: %s" % e) @synchronized def DeleteLastCallId(self, engine_name, lastcall_id): self.logger.info("Deleting lastcall id = %s." % (lastcall_id)) try: sql = "DELETE FROM lastcall WHERE `engine_name` = '%s' AND id = %s" % (engine_name, lastcall_id) self.cur.execute(sql) self.con.commit() except Exception as e: raise LastcallBackendError("Error while deleting lastcall id %s from SQLite: %s" % (lastcall_id, e)) @synchronized def InsertLastCallIds(self, engine_name, max_id=None, since_id=None): if since_id is None: self.logger.error("Wrong parameters in lastcall update, since_id cannot be None.") return self.logger.info("Updating lastcall with values max_id = %s and since_id = %s." % (max_id, since_id)) try: self.cur.execute('''SELECT MAX(since_id) FROM lastcall WHERE engine_name = ? AND max_id IS NULL;''', (engine_name,)) row = self.cur.fetchone() if max_id is not None or row[0] is None: self.logger.debug("Inserting lastcall since no clashing lastcall already present into DB") self.cur.execute('''INSERT INTO lastcall (engine_name, max_id, since_id) VALUES (?, ?, ?);''', (engine_name, max_id, since_id)) self.con.commit() elif row[0] < since_id: self.logger.debug("Updating lastcall overwriting clashing lastcall already present into DB") self.cur.execute('''UPDATE lastcall SET since_id = ? WHERE engine_name = ? AND since_id = ? AND max_id IS NULL;''', (since_id, engine_name, row[0])) self.con.commit() else: self.logger.warn("Doing nothing.. database already updated...") except Exception as e: raise LastcallBackendError("Error while inserting lastcall ids into SQLite: %s" % e)
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/lastcallbackend/mysqlbackend.py", "copies": "1", "size": "3363", "license": "apache-2.0", "hash": 980189935953313500, "line_mean": 36.3777777778, "line_max": 156, "alpha_frac": 0.6146297948, "autogenerated": false, "ratio": 3.5141065830721003, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46287363778721, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import MySQLdb from backend import Backend, BackendError from ..secrets import dbhost, dbuser, dbpass, dbname class MySQLBackend(Backend): con = None cur = None def __init__(self, engine_config): Backend.__init__(self, engine_config) try: self.con = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpass, db=dbname, charset='utf8') self.logger.info("Connected to MySQL db %s:%s." % (dbhost, dbname)) self.cur = self.con.cursor() except Exception as e: raise BackendError("Error connecting to MySQL db %s:%s: %s" % (dbhost, dbname, e)) def __del__(self): if self.con: self.con.close() def BulkInsertTweetIntoDb(self, vals): num_inserted = 0 top_id = None try: for val in vals: num_inserted += self.InsertTweetIntoDb(val) if top_id is None or top_id < long(val['id']): top_id = long(val['id']) except BackendError as e: self.logger.error("Bulk insert not ok for tweet %s: %s " % (val['id'], e)) return (num_inserted, top_id) def InsertTweetIntoDb(self, vals): if vals is None: return 0 try: text = vals['text'].encode(encoding='ascii', errors='ignore').decode(encoding='ascii', errors='ignore') sql_vals = (vals['id'], vals['created_at'], text.replace('\\', '\\\\').replace('\'', '\\\''), vals['userid'], vals['location'].replace('\\', '\\\\').replace('\'', '\\\''), vals['latitude'], vals['longitude'], vals['num_friends']) sql = 'INSERT INTO tweets (`tweetid`, `timestamp`, `text`, `userid`, `user_location`, `latitude`, `longitude`, `num_friends`) ' sql += 'VALUES (%s, \'%s\', \'%s\', \'%s\', \'%s\', %s, %s, %s, %s, %s)' % sql_vals self.cur.execute(sql) self.con.commit() return 1 except Exception as e: code, _msg = e if code == 1062: self.con.rollback() raise BackendError("Tried to insert a tweet already present in the DB: %s" % vals[0]) else: self.logger.error("Exception while inserting tweet %s: %s" % (vals[0], e)) self.con.rollback() return 0 def GetUSAKmls(self): try: self.cur.execute("SELECT name, geometry FROM usa_states") rows = self.cur.fetchall() kmls = [] for row in rows: kmls.append((row[0], row[1])); return kmls except Exception as e: raise BackendError("Error while retrieving USA kmls from DB: %s" % e) def GetFrenchKmls(self): try: self.cur.execute("SELECT NOM_REG, KML FROM french_deps") rows = self.cur.fetchall() kmls = [] for row in rows: kmls.append((row[0], row[1])); return kmls except Exception as e: raise BackendError("Error while retrieving French kmls from DB: %s" % e) def GetMaxId(self): self.logger.info("Retrieving max tweet id from database.") try: self.cur.execute("SELECT max(`tweetid`) FROM tweets") row = self.cur.fetchone() return long(row[0]) except Exception as e: raise BackendError("Error while retrieving max tweet id from DB: %s" % e) def GetAllTweetCoordinates(self): try: # self.cur.execute("SELECT `timestamp`, `latitude`, `longitude` FROM tweets ORDER BY `timestamp` LIMIT 100") self.cur.execute("SELECT `timestamp`, `latitude`, `longitude` FROM tweets ORDER BY `timestamp`") rows = self.cur.fetchall() tweets = [] for row in rows: tweets.append([row[0], row[1], row[2]]); return tweets except Exception as e: raise BackendError("Error while retrieving tweet coordinates from DB: %s" % e) def GetLocations(self): try: self.cur.execute("SELECT user_location, COUNT(*) AS `number` FROM tweets WHERE latitude IS NULL GROUP BY user_location ORDER BY number DESC") rows = self.cur.fetchall() locations = [] for row in rows: locations.append(row[0]); return locations except Exception as e: raise BackendError("Error while retrieving locations from DB: %s" % e) def UpdateCoordinates(self, location, lat, lng): self.logger.info("Updating coordinate for location %s: [%s, %s]." % (location, lat, lng)) try: self.cur.execute("UPDATE tweets SET latitude = %s, longitude = %s WHERE user_location = '%s'" % (lat, lng, location.replace('\\', '\\\\').replace('\'', '\\\''))) self.con.commit() except Exception as e: raise BackendError("Error while updating coordinates for location into DB: %s" % e) def InsertUSAStates(self, vals): self.logger.info("Inserting row for %s (%s)." % (vals[0], vals[1])) field_list = 'id, name, geometry' try: sql = "INSERT INTO usa_states (%s) " % field_list sql += "VALUES ('%s','%s','%s')" % vals self.logger.debug(sql) self.cur.execute(sql) self.con.commit() except Exception as e: raise BackendError("Error while inserting USA State into DB: %s" % e) def InsertFrenchDepartments(self, vals): self.logger.info("Inserting row for %s, %s." % (vals[2], vals[4])) field_list = 'ID_GEOFLA,CODE_DEPT,NOM_DEPT,CODE_CHF,NOM_CHF,CODE_REG,NOM_REG,KML' try: sql = "INSERT INTO french_deps (%s) " % field_list sql += "VALUES (%d,'%s','%s','%s','%s','%s','%s','%s')" % vals self.logger.debug(sql) self.cur.execute(sql) self.con.commit() except Exception as e: raise BackendError("Error while inserting French department into DB: %s" % e)
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/backend/mysqlbackend.py", "copies": "1", "size": "5792", "license": "apache-2.0", "hash": 808818195329621000, "line_mean": 33.2721893491, "line_max": 167, "alpha_frac": 0.5816643646, "autogenerated": false, "ratio": 3.570900123304562, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9552752114566674, "avg_score": 0.01996247466757763, "num_lines": 169 }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import os from PIL import Image, ImageDraw, ImageFont # from backend import TwitterApiCall,MySQLBackend from backend import ElasticSearchBackend class DrawMap(): color = (255, 0, 0) lower_left = [41.0, -5.5] top_right = [51.6, 10.0] def __init__(self, auth_type): super(DrawMap, self).__init__(auth_type) self.backend = ElasticSearchBackend() def GetXY(self, size, lat, lng): x = size[0] * (lng - self.lower_left[1]) / (self.top_right[1] - self.lower_left[1]) y = size[1] * (lat - self.top_right[0]) / (self.lower_left[0] - self.top_right[0]) return [int(x), int(y)] def DrawPoint(self, draw, x, y): radius = 5 draw.ellipse((x - radius / 2, y - radius / 2, x + radius / 2, y + radius / 2), fill=self.color) del draw def ProduceImages(self): root_path = os.path.abspath(os.path.join(__file__, '..')) interval = 30 advancement = 0 imgnum = 1 persistence = 10 show = [] for i in range(0, persistence): show.append([]) tweets = self.backend.GetAllTweetCoordinates() if tweets is None: return start_time = tweets[0][0] end_time = tweets[-1][0] font = ImageFont.truetype("%s/../utilites/freesansbold.ttf" % root_path, 16) print "Creating images of France for all tweets in DB." print "Will be creating %d images." % ((end_time - start_time).seconds / interval) for tweet in tweets: if tweet[1] is None or tweet[2] is None: continue if tweet[2] < self.lower_left[0] or tweet[2] > self.top_right[0]: continue if tweet[1] < self.lower_left[1] or tweet[1] > self.top_right[1]: continue delta_secs = (tweet[0] - start_time).seconds if delta_secs <= interval: show[persistence - 1].append([tweet[2], tweet[1]]) else: img = Image.open("%s/../utilites/france.png" % root_path) draw = ImageDraw.Draw(img) allpoints = [item for sublist in show for item in sublist] for point in allpoints: [x, y] = self.GetXY(img.size, point[0], point[1]) self.DrawPoint(draw, x, y) draw.text((0, 0), tweet[0].strftime("%d/%m/%Y %H:%M"), self.color, font=font) img.save("%s/../map/france%05d.png" % (root_path, imgnum)) del draw advancement += 1 if advancement >= interval: print "Last image saved is number: %05d." % imgnum advancement = 0 imgnum += 1 start_time = tweet[0] for i in range(1, persistence): show[i - 1] = show[i] show[persistence - 1] = []
{ "repo_name": "biancini/TwitterAnalyzer", "path": "AnalysisTools/TwitterEngine/drawmap.py", "copies": "1", "size": "2613", "license": "apache-2.0", "hash": -358184455512217100, "line_mean": 29.0344827586, "line_max": 99, "alpha_frac": 0.5935706085, "autogenerated": false, "ratio": 3.1033254156769594, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.41968960241769593, "avg_score": null, "num_lines": null }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import pprint import sys import json import logging import os root_path = os.path.abspath(os.path.join(__file__, '..', '..')) lib_path = os.path.join(root_path, 'lib') sys.path.insert(0, lib_path) from datetime import datetime from TwitterAPI import TwitterAPI from geopy import geocoders from shapely.geometry import shape, Point from shapely.geometry.collection import GeometryCollection from ..secrets import consumer_key, consumer_secret, access_token_key, access_token_secret class TwitterApiCall(object): continuerun = True @staticmethod def stop_run(): TwitterApiCall.continuerun = False @staticmethod def continuing(): return TwitterApiCall.continuerun engine_name = None language = None filters = None lock_file_download = None api = None backend = None auth_type = None initial_apiid = -1 apiid = -1 logger = None def __init__(self, engine_config, language, locking, auth_type='oAuth2'): self.engine_name = engine_config['name'] self.apiid = engine_config['apikey'] self.language = language self.filters = engine_config['filters'] self.auth_type = auth_type self.logger = logging.getLogger('engine-%s' % engine_config['name']) self.InitializeTwitterApi() def GetEngineName(self): return self.engine_name def IsLocking(self): return self.lock_file_download is not None def SetLockFileDownload(self, lock_file_download): self.lock_file_download = lock_file_download def GetLockFileDownload(self): return self.lock_file_download def getMechanism(self): raise NotImplementedError def InitializeTwitterApi(self): self.apiid += 1 self.apiid %= len(consumer_key) if self.apiid == self.initial_apiid: self.logger.warning("Tried to use every application key, no more available.") raise Exception() if self.initial_apiid == -1: self.initial_apiid = self.apiid self.logger.info("Initializing engine with consumer_key = %s" % consumer_key[self.apiid]) if self.auth_type == 'oAuth2': self.api = TwitterAPI(consumer_key=consumer_key[self.apiid], consumer_secret=consumer_secret[self.apiid], auth_type=self.auth_type) else: self.api = TwitterAPI(consumer_key=consumer_key[self.apiid], consumer_secret=consumer_secret[self.apiid], auth_type=self.auth_type, access_token_key=access_token_key[self.apiid], access_token_secret=access_token_secret[self.apiid]) def ProcessTweets(self): raise NotImplementedError def GetRateLimits(self): params = {} response = self.api.request('application/rate_limit_status', params) return json.loads(response.text) def PrintRateLimit(self): pp = pprint.PrettyPrinter(depth=6) pp.pprint(self.GetRateLimits()) def Geolocate(self, location): # g = geocoders.GoogleV3(google_clientid, google_secret) # g = geocoders.MapQuest(api_key=mapquest_appid) g = geocoders.GeoNames() try: for _place, (lat, lng) in g.geocode(location, exactly_one=False): self.logger.debug("Computed coordinates for %s: %s, %s." % (location, lat, lng)) coordinates = [str(lat), str(lng)] except Exception as e: self.logger.error("Error while geocoding: %s" % e) coordinates = ['NULL', 'NULL'] return coordinates def CheckPointInKml(self, kmls, lat, lng): p = Point(lng, lat) found = False for (name, kml_txt) in kmls: kml = eval(kml_txt) if 'geometry' in kml: kml_json = json.loads(json.dumps(kml['geometry'])) found = shape(kml_json).contains(p) if found: return name elif 'geometries' in kml: kml_jsons = json.loads(json.dumps(kml['geometries'])) for kml_json in kml_jsons: if shape(kml_json).contains(p): return name return None def FromTweetToVals(self, tweet, geolocate=True, exclude_out=True): date_object = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') # text = tweet['text'].encode(encoding='ascii', errors='ignore').decode(encoding='ascii', errors='ignore') text = tweet['text'] location = tweet['user']['location'] coordinates = ['NULL', 'NULL'] try: if tweet['coordinates'] and tweet['coordinates']['type'] == 'Point': c = tweet['coordinates']['coordinates'] coordinates[0] = c[1] coordinates[1] = c[0] elif tweet['place'] and tweet['place']['bounding_box']: kml_json = json.loads(json.dumps(tweet['place']['bounding_box'])) geom = shape(kml_json).centroid if type(geom) == GeometryCollection: if len(list(geom.geoms)) >= 1: coordinates = [list(geom.geoms)[0].y, list(geom.geoms)[0].x] elif type(geom) == Point: coordinates = [geom.y, geom.x] else: self.logger.warning("Tweet place is of unknown type: %s." % type(geom)) except Exception as e: self.logger.error("Error while parsing coordinates: %s" % e) if (coordinates[0] == 'NULL' or coordinates[1] == 'NULL') and geolocate: coordinates = self.Geolocate(location) kmls = None if exclude_out: if coordinates[0] == 'NULL' or coordinates[1] == 'NULL': return None kmls = self.backend.GetKmls() if kmls and not self.CheckPointInKml(kmls, float(coordinates[0]), float(coordinates[1])): return None ret_vals = {} ret_vals['id'] = tweet['id'] ret_vals['created_at'] = date_object.strftime('%Y-%m-%d %H:%M:%S') ret_vals['text'] = text ret_vals['userid'] = tweet['user']['id'] # ret_vals['hashtags'] = ', '.join([h['text'] for h in tweet['entities']['hashtags']]) ret_vals['location'] = location ret_vals['latitude'] = coordinates[0] ret_vals['longitude'] = coordinates[1] ret_vals['num_friends'] = tweet['user']['friends_count'] return ret_vals
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/backend/twitterapi.py", "copies": "1", "size": "6089", "license": "apache-2.0", "hash": -3997585497951330000, "line_mean": 32.0923913043, "line_max": 110, "alpha_frac": 0.635408113, "autogenerated": false, "ratio": 3.5421756835369402, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9522401333776471, "avg_score": 0.031036492552093682, "num_lines": 184 }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import requests import json from datetime import datetime from backend import Backend, BackendError from ..secrets import es_server class ElasticSearchBackend(Backend): def GetUSAKmls(self): print("Retrieving all USA states") try: start = 0 pagesize = 10 last = None rows = [] while True: data = { 'query' : { 'match_all' : { } }, 'from' : start, 'size' : pagesize } data_json = json.dumps(data, indent=2) host = "%s/twitter/usa_states/_search" % es_server req = requests.get(host, data=data_json) ret = json.loads(req.content) for hit in ret['hits']['hits']: curhit = [] if 'name' in hit['_source'] and 'geometry' in hit['_source']: curhit.append(hit['_source']['name'].replace('\\\'', '\'')) curhit.append(hit['_source']['geometry'].replace('\\\'', '\'')) rows.append(curhit) last = ret['hits']['total'] start += pagesize if start > last: break return rows except Exception as e: raise BackendError("Error while retrieving USA kmls from ElasticSearch: %s" % e) def GetFrenchKmls(self): print("Retrieving all French departments") try: start = 0 pagesize = 10 last = None rows = [] while True: data = { 'query' : { 'match_all' : { } }, 'from' : start, 'size' : pagesize } data_json = json.dumps(data, indent=2) host = "%s/twitter/french_depts/_search" % es_server req = requests.get(host, data=data_json) ret = json.loads(req.content) for hit in ret['hits']['hits']: curhit = [] if 'NOM_REG' in hit['_source'] and 'KML' in hit['_source']: curhit.append(hit['_source']['NOM_REG'].replace('\\\'', '\'')) curhit.append(hit['_source']['KML'].replace('\\\'', '\'')) rows.append(curhit) last = ret['hits']['total'] start += pagesize if start > last: break return rows except Exception as e: raise BackendError("Error while retrieving French kmls from ElasticSearch: %s" % e) def GetAllTweetCoordinates(self): try: start = 0 pagesize = 10 last = None tweets = [] while True: data = { 'query' : { 'match_all' : { } }, 'from' : start, 'size' : pagesize, 'fields' : ['coordinates', 'created_at'], 'sort' : [ { 'created_at' : 'asc' } ] } data_json = json.dumps(data, indent=2) host = "%s/twitter/tweets/_search" % es_server req = requests.get(host, data=data_json) ret = json.loads(req.content) for hit in ret['hits']['hits']: curhit = [] if 'created_at' in hit['fields'] and 'coordinates' in hit['fields']: created_at = datetime.strptime(hit['fields']['created_at'], '%Y-%m-%d %H:%M:%S') curhit.append(created_at) coordinates = hit['fields']['coordinates'] if ',' in coordinates: curhit.append(coordinates.split(',')[1]) curhit.append(coordinates.split(',')[0]) else: curhit.append(None) curhit.append(None) tweets.append(curhit) print("new google.maps.LatLng(%s, %s)," % (curhit[2], curhit[1])) last = ret['hits']['total'] start += pagesize if start > last: break return tweets except Exception as e: raise BackendError("Error while retrieving tweet coordinates from ElasticSearch: %s" % e) def GetLocations(self): try: data = { 'size' : 0, 'facets' : { 'locations': { 'terms' : { 'field' : 'location', 'size' : 20 }, 'global': True } } } data_json = json.dumps(data, indent=2) host = "%s/twitter/tweets/_search" % es_server req = requests.get(host, data=data_json) ret = json.loads(req.content) locations = [] for hit in ret['facets']['locations']['terms']: locations.append(hit['term']) return locations except Exception as e: raise BackendError("Error while retrieving locations from ElasticSearch: %s" % e) def _GetTweetsIdForLocation(self, location): try: start = 0 pagesize = 10 last = None rows = [] while True: data = { 'query' : { "term": { "location" : location } }, 'fields' : [ ], 'from' : start, 'size' : pagesize } data_json = json.dumps(data, indent=2) host = "%s/twitter/tweets/_search" % es_server req = requests.get(host, data=data_json) ret = json.loads(req.content) for hit in ret['hits']['hits']: if '_id' in hit: rows.append(hit['_id']) last = ret['hits']['total'] start += pagesize if start > last: break return rows except Exception as e: raise BackendError("Error while retrieving kmls from ElasticSearch: %s" % e) def UpdateCoordinates(self, location, lat, lng): print("Updating coordinate for location %s: [%s, %s]." % (location, lat, lng)) try: tweetids = self._GetTweetsIdForLocation(location) for tweetid in tweetids: data = { 'script' : 'ctx._source.coordinates = newcoords', 'params' : { 'newcoords' : "%s,%s" % (lat, lng) } } data_json = json.dumps(data, indent=2) host = "%s/twitter/tweets/%s/_update" % (es_server, tweetid) req = requests.post(host, data=data_json) ret = json.loads(req.content) if not ret["ok"]: raise Exception("Insert not ok") except Exception as e: raise BackendError("Error while updating coordinates for location into ElasticSearch: %s" % e) def InsertUSAStates(self, vals): print("Inserting row for %s (%s)." % (vals[0], vals[1])) try: data = { 'name' : vals[0], 'id' : vals[1], 'geometry' : vals[2] } data_json = json.dumps(data, indent=2) host = "%s/twitter/usa_states/%s" % (es_server, vals[0]) req = requests.put(host, data=data_json) ret = json.loads(req.content) if not ret["ok"]: raise BackendError("Insert not ok") except Exception as e: raise BackendError("Error while inserting USA State into ElasticSearch: %s" % e) def InsertFrenchDepartments(self, vals): print("Inserting row for %s, %s." % (vals[2], vals[6])) try: data = { 'ID_GEOFLA' : vals[0], 'CODE_DEPT' : vals[1], 'NOM_DEPT' : vals[2], 'CODE_CHF' : vals[3], 'NOM_CHF' : vals[4], 'CODE_REG' : vals[5], 'NOM_REG' : vals[6], 'KML' : vals[7] } data_json = json.dumps(data, indent=2) host = "%s/twitter/french_depts/%s" % (es_server, vals[0]) req = requests.put(host, data=data_json) ret = json.loads(req.content) if not ret["ok"]: raise BackendError("Insert not ok") except Exception as e: raise BackendError("Error while inserting French department into ElasticSearch: %s" % e)
{ "repo_name": "biancini/TwitterAnalyzer", "path": "AnalysisTools/TwitterEngine/backend/elasticsearchbackend.py", "copies": "1", "size": "7354", "license": "apache-2.0", "hash": -192580703727061920, "line_mean": 33.6886792453, "line_max": 112, "alpha_frac": 0.5380745173, "autogenerated": false, "ratio": 3.6155358898721732, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9530182450069171, "avg_score": 0.02468559142060056, "num_lines": 212 }
__author__ = "Andrea Biancini" __date__ = "October 2, 2013" import threading import sqlite3 from ..secrets import sqlite_db_path from backend import Backend, LastcallBackendError def synchronized(func): func.__lock__ = threading.Lock() def synced_func(*args, **kws): with func.__lock__: return func(*args, **kws) return synced_func class SQLiteBackend(Backend): def __init__(self, engine_config): super(SQLiteBackend, self).__init__(engine_config) def _GetLastCallDb(self): db = sqlite3.connect('%s/lastcall.db' % sqlite_db_path) cursor = db.cursor() cursor.execute('''SELECT name FROM sqlite_master WHERE type='table' AND name='lastcall';''') if cursor.fetchone() is None: cursor.execute('''CREATE TABLE lastcall (id INTEGER PRIMARY KEY AUTOINCREMENT, engine_name TEXT, max_id LONG, since_id LONG NOT NULL, UNIQUE (engine_name, max_id, since_id) ON CONFLICT REPLACE)''') db.commit() return db @synchronized def GetLastCallIds(self, engine_name, pop=False): db = None cursor = None try: db = self._GetLastCallDb() cursor = db.cursor() cursor.execute('''SELECT id, max_id, since_id FROM lastcall WHERE engine_name = ?;''', (engine_name,)) all_rows = cursor.fetchall() ids = [] for row in all_rows: new_id = {} new_id['id'] = row[0] new_id['max_id'] = row[1] new_id['since_id'] = row[2] ids.append(new_id) if pop: self.logger.info('Deleting lastcall id = %s.' % row[0]) cursor.execute('''DELETE FROM lastcall WHERE engine_name = ? AND id = ?''', (engine_name, row[0])) db.commit() return ids except Exception as e: if cursor is not None: db.rollback() raise LastcallBackendError("Error while retrieving last call ids from SQLite: %s" % e) finally: if db is not None: db.close() @synchronized def DeleteLastCallId(self, engine_name, lastcall_id): self.logger.info("Deleting lastcall id = %s." % (lastcall_id)) db = None cursor = None try: db = self._GetLastCallDb() cursor = db.cursor() cursor.execute('''DELETE FROM lastcall WHERE engine_name = ? AND id = ?''', (engine_name, lastcall_id)) db.commit() except Exception as e: if cursor is not None: db.rollback() raise LastcallBackendError("Error while deleting lastcall id %s from SQLite: %s" % (lastcall_id, e)) finally: if db is not None: db.close() @synchronized def InsertLastCallIds(self, engine_name, max_id=None, since_id=None): self.logger.info("Updating lastcall with values max_id = %s and since_id = %s." % (max_id, since_id)) if since_id is None: self.logger.error("Wrong parameters in lastcall update, since_id cannot be None.") return if max_id is not None and max_id <= since_id: self.logger.error("Wrong parameters in lastcall update, max_id smaller than since_id: %s, %s." % (max_id, since_id)) return db = None cursor = None try: db = self._GetLastCallDb() cursor = db.cursor() cursor.execute('''SELECT MAX(since_id) FROM lastcall WHERE engine_name = ? AND max_id IS NULL;''', (engine_name,)) row = cursor.fetchone() if row[0] is None: self.logger.debug("No lastcall with max_id = None found in DB.") else: self.logger.debug("Lastcall with max_id = None and since_id = %s found in DB." % row[0]) if max_id is not None or row[0] is None: self.logger.debug("Inserting lastcall since no clashing lastcall already present into DB") cursor.execute('''INSERT INTO lastcall (engine_name, max_id, since_id) VALUES (?, ?, ?);''', (engine_name, max_id, since_id)) db.commit() elif row[0] < since_id: self.logger.debug("Updating lastcall overwriting clashing lastcall already present into DB") cursor.execute('''UPDATE lastcall SET since_id = ? WHERE engine_name = ? AND since_id = ? AND max_id IS NULL;''', (since_id, engine_name, row[0])) db.commit() else: self.logger.info("Doing nothing.. database already updated...") db.commit() except Exception as e: if cursor is not None: db.rollback() raise LastcallBackendError("Error while inserting lastcall ids into SQLite: %s" % e) finally: if db is not None: db.close()
{ "repo_name": "biancini/TwitterAnalyzer", "path": "TwitterDownloader/TwitterEngine/lastcallbackend/sqllitebackend.py", "copies": "1", "size": "4394", "license": "apache-2.0", "hash": -9119506546307594000, "line_mean": 35.3140495868, "line_max": 203, "alpha_frac": 0.630860264, "autogenerated": false, "ratio": 3.532154340836013, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.46630146048360127, "avg_score": null, "num_lines": null }
__author__ = "Andrea Fioraldi" __copyright__ = "Copyright 2017, Andrea Fioraldi" __license__ = "MIT" __email__ = "andreafioraldi@gmail.com" import idaapi import subprocess import idc import os import threading pwd = os.path.dirname(__file__) def startView(buf): view = subprocess.Popen( [os.path.join(pwd, "IdaGrabStringsView.exe")], stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True ) view.communicate(input=buf) def startViewGetPosition(): pos = idc.ScreenEA() view = subprocess.Popen( [os.path.join(pwd, "IdaGrabStringsView.exe"), "0x"+hex(pos)], stdout=subprocess.PIPE, stdin=subprocess.PIPE ) output = view.communicate()[0] lines = output.split("\n") return (int(lines[0]), int(lines[1])) def fromPositionThread(): pos, length = startViewGetPosition() idaapi.msg("IdaGrabStrings: getted position = "+hex(pos)+" "+str(length)+"\n") buf = idc.GetManyBytes(pos, length, False) startView(buf) def fromPosition(): thread = threading.Thread(target=fromPositionThread, args=tuple()) thread.deamon = True thread.start() def fromSelection(): sel = idaapi.read_selection() buf = idc.GetManyBytes(sel[1], sel[2] - sel[1], False) thread = threading.Thread(target=startView, args=(buf, )) thread.deamon = True thread.start() MENU_PATH = 'Edit/Other' class IdaGrabStringsPlugin(idaapi.plugin_t): flags = idaapi.PLUGIN_KEEP comment = "" help = "IdaGrabStrings: Grab strings from a bytes buffer in IDA" wanted_name = "IDA Grab Strings" wanted_hotkey = "Alt-8" def init(self): r = idaapi.add_menu_item(MENU_PATH, 'IdaGrabStrings - From position', '', 1, fromPosition, tuple()) if r is None: idaapi.msg("IdaGrabStrings: add menu failed!\n") return idaapi.PLUGIN_SKIP r = idaapi.add_menu_item(MENU_PATH, 'IdaGrabStrings - From selection', '', 1, fromSelection, tuple()) if r is None: idaapi.msg("IdaGrabStrings: add menu failed!\n") return idaapi.PLUGIN_SKIP idaapi.msg("IdaGrabStrings: initialized\n") return idaapi.PLUGIN_KEEP def run(self, arg): pass def term(self): idaapi.msg("IdaGrabStrings: terminated\n") def PLUGIN_ENTRY(): return IdaGrabStringsPlugin()
{ "repo_name": "andreafioraldi/IdaGrabStrings", "path": "IdaGrabStrings.py", "copies": "1", "size": "2383", "license": "mit", "hash": -3822972179079327000, "line_mean": 27.7108433735, "line_max": 109, "alpha_frac": 0.6374318086, "autogenerated": false, "ratio": 3.2643835616438355, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9307518710843388, "avg_score": 0.01885933188008961, "num_lines": 83 }
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>" __date__ = "31 March 2009" import wx import auibook from aui_constants import * _ = wx.GetTranslation #----------------------------------------------------------------------------- # AuiMDIParentFrame #----------------------------------------------------------------------------- class AuiMDIParentFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE|wx.VSCROLL|wx.HSCROLL, name="AuiMDIParentFrame"): wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name) self.Init() self.Bind(wx.EVT_MENU, self.DoHandleMenu, id=wx.ID_ANY) # this style can be used to prevent a window from having the standard MDI # "Window" menu if not style & wx.FRAME_NO_WINDOW_MENU: self._pWindowMenu = wx.Menu() self._pWindowMenu.Append(wxWINDOWCLOSE, _("Cl&ose")) self._pWindowMenu.Append(wxWINDOWCLOSEALL, _("Close All")) self._pWindowMenu.AppendSeparator() self._pWindowMenu.Append(wxWINDOWNEXT, _("&Next")) self._pWindowMenu.Append(wxWINDOWPREV, _("&Previous")) self._pClientWindow = self.OnCreateClient() def SetArtProvider(self, provider): if self._pClientWindow: self._pClientWindow.SetArtProvider(provider) def GetArtProvider(self): if not self._pClientWindow: return None return self._pClientWindow.GetArtProvider() def GetNotebook(self): return self._pClientWindow def SetWindowMenu(self, pMenu): # Replace the window menu from the currently loaded menu bar. pMenuBar = self.GetMenuBar() if self._pWindowMenu: self.RemoveWindowMenu(pMenuBar) del self._pWindowMenu self._pWindowMenu = None if pMenu: self._pWindowMenu = pMenu self.AddWindowMenu(pMenuBar) def GetWindowMenu(self): return self._pWindowMenu def SetMenuBar(self, pMenuBar): # Remove the Window menu from the old menu bar self.RemoveWindowMenu(self.GetMenuBar()) # Add the Window menu to the new menu bar. self.AddWindowMenu(pMenuBar) wx.Frame.SetMenuBar(self, pMenuBar) def SetChildMenuBar(self, pChild): if not pChild: # No Child, set Our menu bar back. if self._pMyMenuBar: self.SetMenuBar(self._pMyMenuBar) else: self.SetMenuBar(self.GetMenuBar()) # Make sure we know our menu bar is in use self._pMyMenuBar = None else: if pChild.GetMenuBar() == None: return # Do we need to save the current bar? if self._pMyMenuBar == None: self._pMyMenuBar = self.GetMenuBar() self.SetMenuBar(pChild.GetMenuBar()) def ProcessEvent(self, event): # stops the same event being processed repeatedly if self._pLastEvt == event: return False self._pLastEvt = event # let the active child (if any) process the event first. res = False if self._pActiveChild and event.IsCommandEvent() and \ event.GetEventObject() != self._pClientWindow and \ event.GetEventType() not in [wx.wxEVT_ACTIVATE, wx.wxEVT_SET_FOCUS, wx.wxEVT_KILL_FOCUS, wx.wxEVT_CHILD_FOCUS, wx.wxEVT_COMMAND_SET_FOCUS, wx.wxEVT_COMMAND_KILL_FOCUS]: res = self._pActiveChild.GetEventHandler().ProcessEvent(event) if not res: # if the event was not handled this frame will handle it, # which is why we need the protection code at the beginning # of this method res = self.GetEventHandler().ProcessEvent(event) self._pLastEvt = None return res def GetActiveChild(self): return self._pActiveChild def SetActiveChild(self, pChildFrame): self._pActiveChild = pChildFrame def GetClientWindow(self): return self._pClientWindow def OnCreateClient(self): return AuiMDIClientWindow(self) def ActivateNext(self): if self._pClientWindow and self._pClientWindow.GetSelection() != wx.NOT_FOUND: active = self._pClientWindow.GetSelection() + 1 if active >= self._pClientWindow.GetPageCount(): active = 0 self._pClientWindow.SetSelection(active) def ActivatePrevious(self): if self._pClientWindow and self._pClientWindow.GetSelection() != wx.NOT_FOUND: active = self._pClientWindow.GetSelection() - 1 if active < 0: active = self._pClientWindow.GetPageCount() - 1 self._pClientWindow.SetSelection(active) def Init(self): self._pLastEvt = None self._pClientWindow = None self._pActiveChild = None self._pWindowMenu = None self._pMyMenuBar = None def RemoveWindowMenu(self, pMenuBar): if pMenuBar and self._pWindowMenu: # Remove old window menu pos = pMenuBar.FindMenu(_("&Window")) if pos != wx.NOT_FOUND: pMenuBar.Remove(pos) def AddWindowMenu(self, pMenuBar): if pMenuBar and self._pWindowMenu: pos = pMenuBar.FindMenu(wx.GetStockLabel(wx.ID_HELP, wx.STOCK_NOFLAGS)) if pos == wx.NOT_FOUND: pMenuBar.Append(self._pWindowMenu, _("&Window")) else: pMenuBar.Insert(pos, self._pWindowMenu, _("&Window")) def DoHandleMenu(self, event): evId = event.GetId() if evId == wxWINDOWCLOSE: if self._pActiveChild: self._pActiveChild.Close() elif evId == wxWINDOWCLOSEALL: while self._pActiveChild: if not self._pActiveChild.Close(): return # failure elif evId == wxWINDOWNEXT: self.ActivateNext() elif evId == wxWINDOWPREV: self.ActivatePrevious() else: event.Skip() def Tile(self, orient=wx.HORIZONTAL): client_window = self.GetClientWindow() if not client_window: raise Exception("Missing MDI Client Window") cur_idx = client_window.GetSelection() if cur_idx == -1: return if orient == wx.VERTICAL: client_window.Split(cur_idx, wx.LEFT) elif orient == wx.HORIZONTAL: client_window.Split(cur_idx, wx.TOP) #----------------------------------------------------------------------------- # AuiMDIChildFrame #----------------------------------------------------------------------------- class AuiMDIChildFrame(wx.PyPanel): def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name="AuiMDIChildFrame"): pClientWindow = parent.GetClientWindow() if pClientWindow is None: raise Exception("Missing MDI client window.") self.Init() # see comment in constructor if style & wx.MINIMIZE: self._activate_on_create = False cli_size = pClientWindow.GetClientSize() # create the window off-screen to prevent flicker wx.PyPanel.__init__(self, pClientWindow, id, wx.Point(cli_size.x+1, cli_size.y+1), size, wx.NO_BORDER, name=name) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.Show(False) self.SetMDIParentFrame(parent) # this is the currently active child parent.SetActiveChild(self) self._title = title pClientWindow.AddPage(self, title, self._activate_on_create) pClientWindow.Refresh() self.Bind(wx.EVT_MENU_HIGHLIGHT_ALL, self.OnMenuHighlight) self.Bind(wx.EVT_ACTIVATE, self.OnActivate) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) def Init(self): # There are two ways to create an tabbed mdi child fram without # making it the active document. Either Show(False) can be called # before Create() (as is customary on some ports with wxFrame-type # windows), or wx.MINIMIZE can be passed in the style flags. Note that # AuiMDIChildFrame is not really derived from wxFrame, as MDIChildFrame # is, but those are the expected symantics. No style flag is passed # onto the panel underneath. self._activate_on_create = True self._pMDIParentFrame = None self._pMenuBar = None self._mdi_currect = None self._mdi_newrect = wx.Rect() self._icon = None self._icon_bundle = None def Destroy(self): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if not pClientWindow: raise Exception("Missing MDI Client Window") if pParentFrame.GetActiveChild() == self: # deactivate ourself event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, False, self.GetId()) event.SetEventObject(self) self.GetEventHandler().ProcessEvent(event) pParentFrame.SetActiveChild(None) pParentFrame.SetChildMenuBar(None) for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: return pClientWindow.DeletePage(pos) return False def SetMenuBar(self, menu_bar): pOldMenuBar = self._pMenuBar self._pMenuBar = menu_bar if self._pMenuBar: pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") self._pMenuBar.Reparent(pParentFrame) if pParentFrame.GetActiveChild() == self: # replace current menu bars if pOldMenuBar: pParentFrame.SetChildMenuBar(None) pParentFrame.SetChildMenuBar(self) def GetMenuBar(self): return self._pMenuBar def SetTitle(self, title): self._title = title pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: pClientWindow.SetPageText(pos, self._title) break def GetTitle(self): return self._title def SetIcons(self, icons): # get icon with the system icon size self.SetIcon(icons.GetIcon(-1)) self._icon_bundle = icons def GetIcons(self): return self._icon_bundle def SetIcon(self, icon): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") self._icon = icon bmp = wx.BitmapFromIcon(self._icon) pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: idx = pClientWindow.GetPageIndex(self) if idx != -1: pClientWindow.SetPageBitmap(idx, bmp) def GetIcon(self): return self._icon def Activate(self): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: pClientWindow.SetSelection(pos) break def OnMenuHighlight(self, event): if self._pMDIParentFrame: # we don't have any help text for this item, # but may be the MDI frame does? self._pMDIParentFrame.OnMenuHighlight(event) def OnActivate(self, event): # do nothing pass def OnCloseWindow(self, event): pParentFrame = self.GetMDIParentFrame() if pParentFrame: if pParentFrame.GetActiveChild() == self: pParentFrame.SetActiveChild(None) pParentFrame.SetChildMenuBar(None) pClientWindow = pParentFrame.GetClientWindow() idx = pClientWindow.GetPageIndex(self) if idx != wx.NOT_FOUND: pClientWindow.RemovePage(idx) self.Destroy() def SetMDIParentFrame(self, parentFrame): self._pMDIParentFrame = parentFrame def GetMDIParentFrame(self): return self._pMDIParentFrame def CreateStatusBar(self, number=1, style=1, winid=1, name=""): return None def GetStatusBar(self): return None def SetStatusText(self, text, number=0): pass def SetStatusWidths(self, widths_field): pass # no toolbar bars def CreateToolBar(self, style=1, winid=-1, name=""): return None def GetToolBar(self): return None # no maximize etc def Maximize(self, maximize=True): pass def Restore(self): pass def Iconize(self, iconize=True): pass def IsMaximized(self): return True def IsIconized(self): return False def ShowFullScreen(self, show=True, style=0): return False def IsFullScreen(self): return False def IsTopLevel(self): return False # renamed from Show(). def ActivateOnCreate(self, activate_on_create): self._activate_on_create = activate_on_create return True def Show(self, show=True): wx.PyPanel.Show(self, show) def ApplyMDIChildFrameRect(self): if self._mdi_currect != self._mdi_newrect: self.SetDimensions(*self._mdi_newrect) self._mdi_currect = wx.Rect(*self._mdi_newrect) #----------------------------------------------------------------------------- # AuiMDIClientWindow #----------------------------------------------------------------------------- class AuiMDIClientWindow(auibook.AuiNotebook): def __init__(self, parent, agwStyle=0): auibook.AuiNotebook.__init__(self, parent, wx.ID_ANY, wx.Point(0, 0), wx.Size(100, 100), agwStyle=AUI_NB_DEFAULT_STYLE|wx.NO_BORDER) caption_icon_size = wx.Size(wx.SystemSettings.GetMetric(wx.SYS_SMALLICON_X), wx.SystemSettings.GetMetric(wx.SYS_SMALLICON_Y)) self.SetUniformBitmapSize(caption_icon_size) bkcolour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_APPWORKSPACE) self.SetOwnBackgroundColour(bkcolour) self._mgr.GetArtProvider().SetColour(AUI_DOCKART_BACKGROUND_COLOUR, bkcolour) self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnPageChanged) self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose) self.Bind(wx.EVT_SIZE, self.OnSize) def SetSelection(self, nPage): return auibook.AuiNotebook.SetSelection(self, nPage) def PageChanged(self, old_selection, new_selection): # don't do anything if the page doesn't actually change if old_selection == new_selection: return # notify old active child that it has been deactivated if old_selection != -1 and old_selection < self.GetPageCount(): old_child = self.GetPage(old_selection) if not old_child: raise Exception("AuiMDIClientWindow.PageChanged - null page pointer") event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, False, old_child.GetId()) event.SetEventObject(old_child) old_child.GetEventHandler().ProcessEvent(event) # notify new active child that it has been activated if new_selection != -1: active_child = self.GetPage(new_selection) if not active_child: raise Exception("AuiMDIClientWindow.PageChanged - null page pointer") event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, True, active_child.GetId()) event.SetEventObject(active_child) active_child.GetEventHandler().ProcessEvent(event) if active_child.GetMDIParentFrame(): active_child.GetMDIParentFrame().SetActiveChild(active_child) active_child.GetMDIParentFrame().SetChildMenuBar(active_child) def OnPageClose(self, event): wnd = self.GetPage(event.GetSelection()) wnd.Close() # regardless of the result of wnd.Close(), we've # already taken care of the close operations, so # suppress further processing event.Veto() def OnPageChanged(self, event): self.PageChanged(event.GetOldSelection(), event.GetSelection()) def OnSize(self, event): auibook.AuiNotebook.OnSize(self, event) for pos in xrange(self.GetPageCount()): self.GetPage(pos).ApplyMDIChildFrameRect()
{ "repo_name": "nyov/dmide", "path": "core/agw/aui/tabmdi.py", "copies": "1", "size": "17529", "license": "bsd-3-clause", "hash": -5952654894197622000, "line_mean": 25.3198198198, "line_max": 97, "alpha_frac": 0.5881681784, "autogenerated": false, "ratio": 4.020412844036697, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5108581022436697, "avg_score": null, "num_lines": null }
__author__ = "Andrea Gavana <andrea.gavana@gmail.com>" __date__ = "31 March 2009" import wx import auibook from aui_constants import * _ = wx.GetTranslation #----------------------------------------------------------------------------- # AuiMDIParentFrame #----------------------------------------------------------------------------- class AuiMDIParentFrame(wx.Frame): def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE|wx.VSCROLL|wx.HSCROLL, name="AuiMDIParentFrame"): wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name) self.Init() self.Bind(wx.EVT_MENU, self.DoHandleMenu, id=wx.ID_ANY) # this style can be used to prevent a window from having the standard MDI # "Window" menu if not style & wx.FRAME_NO_WINDOW_MENU: self._pWindowMenu = wx.Menu() self._pWindowMenu.Append(wxWINDOWCLOSE, _("Cl&ose")) self._pWindowMenu.Append(wxWINDOWCLOSEALL, _("Close All")) self._pWindowMenu.AppendSeparator() self._pWindowMenu.Append(wxWINDOWNEXT, _("&Next")) self._pWindowMenu.Append(wxWINDOWPREV, _("&Previous")) self._pClientWindow = self.OnCreateClient() def SetArtProvider(self, provider): if self._pClientWindow: self._pClientWindow.SetArtProvider(provider) def GetArtProvider(self): if not self._pClientWindow: return None return self._pClientWindow.GetArtProvider() def GetNotebook(self): return self._pClientWindow def SetWindowMenu(self, pMenu): # Replace the window menu from the currently loaded menu bar. pMenuBar = self.GetMenuBar() if self._pWindowMenu: self.RemoveWindowMenu(pMenuBar) del self._pWindowMenu self._pWindowMenu = None if pMenu: self._pWindowMenu = pMenu self.AddWindowMenu(pMenuBar) def GetWindowMenu(self): return self._pWindowMenu def SetMenuBar(self, pMenuBar): # Remove the Window menu from the old menu bar self.RemoveWindowMenu(self.GetMenuBar()) # Add the Window menu to the new menu bar. self.AddWindowMenu(pMenuBar) wx.Frame.SetMenuBar(self, pMenuBar) def SetChildMenuBar(self, pChild): if not pChild: # No Child, set Our menu bar back. if self._pMyMenuBar: self.SetMenuBar(self._pMyMenuBar) else: self.SetMenuBar(self.GetMenuBar()) # Make sure we know our menu bar is in use self._pMyMenuBar = None else: if pChild.GetMenuBar() == None: return # Do we need to save the current bar? if self._pMyMenuBar == None: self._pMyMenuBar = self.GetMenuBar() self.SetMenuBar(pChild.GetMenuBar()) def ProcessEvent(self, event): # stops the same event being processed repeatedly if self._pLastEvt == event: return False self._pLastEvt = event # let the active child (if any) process the event first. res = False if self._pActiveChild and event.IsCommandEvent() and \ event.GetEventObject() != self._pClientWindow and \ event.GetEventType() not in [wx.wxEVT_ACTIVATE, wx.wxEVT_SET_FOCUS, wx.wxEVT_KILL_FOCUS, wx.wxEVT_CHILD_FOCUS, wx.wxEVT_COMMAND_SET_FOCUS, wx.wxEVT_COMMAND_KILL_FOCUS]: res = self._pActiveChild.GetEventHandler().ProcessEvent(event) if not res: # if the event was not handled this frame will handle it, # which is why we need the protection code at the beginning # of this method res = self.GetEventHandler().ProcessEvent(event) self._pLastEvt = None return res def GetActiveChild(self): return self._pActiveChild def SetActiveChild(self, pChildFrame): self._pActiveChild = pChildFrame def GetClientWindow(self): return self._pClientWindow def OnCreateClient(self): return AuiMDIClientWindow(self) def ActivateNext(self): if self._pClientWindow and self._pClientWindow.GetSelection() != wx.NOT_FOUND: active = self._pClientWindow.GetSelection() + 1 if active >= self._pClientWindow.GetPageCount(): active = 0 self._pClientWindow.SetSelection(active) def ActivatePrevious(self): if self._pClientWindow and self._pClientWindow.GetSelection() != wx.NOT_FOUND: active = self._pClientWindow.GetSelection() - 1 if active < 0: active = self._pClientWindow.GetPageCount() - 1 self._pClientWindow.SetSelection(active) def Init(self): self._pLastEvt = None self._pClientWindow = None self._pActiveChild = None self._pWindowMenu = None self._pMyMenuBar = None def RemoveWindowMenu(self, pMenuBar): if pMenuBar and self._pWindowMenu: # Remove old window menu pos = pMenuBar.FindMenu(_("&Window")) if pos != wx.NOT_FOUND: pMenuBar.Remove(pos) def AddWindowMenu(self, pMenuBar): if pMenuBar and self._pWindowMenu: pos = pMenuBar.FindMenu(wx.GetStockLabel(wx.ID_HELP, wx.STOCK_NOFLAGS)) if pos == wx.NOT_FOUND: pMenuBar.Append(self._pWindowMenu, _("&Window")) else: pMenuBar.Insert(pos, self._pWindowMenu, _("&Window")) def DoHandleMenu(self, event): evId = event.GetId() if evId == wxWINDOWCLOSE: if self._pActiveChild: self._pActiveChild.Close() elif evId == wxWINDOWCLOSEALL: while self._pActiveChild: if not self._pActiveChild.Close(): return # failure elif evId == wxWINDOWNEXT: self.ActivateNext() elif evId == wxWINDOWPREV: self.ActivatePrevious() else: event.Skip() def Tile(self, orient=wx.HORIZONTAL): client_window = self.GetClientWindow() if not client_window: raise Exception("Missing MDI Client Window") cur_idx = client_window.GetSelection() if cur_idx == -1: return if orient == wx.VERTICAL: client_window.Split(cur_idx, wx.LEFT) elif orient == wx.HORIZONTAL: client_window.Split(cur_idx, wx.TOP) #----------------------------------------------------------------------------- # AuiMDIChildFrame #----------------------------------------------------------------------------- class AuiMDIChildFrame(wx.PyPanel): def __init__(self, parent, id=wx.ID_ANY, title="", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, name="AuiMDIChildFrame"): pClientWindow = parent.GetClientWindow() if pClientWindow is None: raise Exception("Missing MDI client window.") self.Init() # see comment in constructor if style & wx.MINIMIZE: self._activate_on_create = False cli_size = pClientWindow.GetClientSize() # create the window off-screen to prevent flicker wx.PyPanel.__init__(self, pClientWindow, id, wx.Point(cli_size.x+1, cli_size.y+1), size, wx.NO_BORDER, name=name) self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM) self.Show(False) self.SetMDIParentFrame(parent) # this is the currently active child parent.SetActiveChild(self) self._title = title pClientWindow.AddPage(self, title, self._activate_on_create) pClientWindow.Refresh() self.Bind(wx.EVT_MENU_HIGHLIGHT_ALL, self.OnMenuHighlight) self.Bind(wx.EVT_ACTIVATE, self.OnActivate) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) def Init(self): # There are two ways to create an tabbed mdi child fram without # making it the active document. Either Show(False) can be called # before Create() (as is customary on some ports with wxFrame-type # windows), or wx.MINIMIZE can be passed in the style flags. Note that # AuiMDIChildFrame is not really derived from wxFrame, as MDIChildFrame # is, but those are the expected symantics. No style flag is passed # onto the panel underneath. self._activate_on_create = True self._pMDIParentFrame = None self._pMenuBar = None self._mdi_currect = None self._mdi_newrect = wx.Rect() self._icon = None self._icon_bundle = None def Destroy(self): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if not pClientWindow: raise Exception("Missing MDI Client Window") if pParentFrame.GetActiveChild() == self: # deactivate ourself event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, False, self.GetId()) event.SetEventObject(self) self.GetEventHandler().ProcessEvent(event) pParentFrame.SetActiveChild(None) pParentFrame.SetChildMenuBar(None) for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: return pClientWindow.DeletePage(pos) return False def SetMenuBar(self, menu_bar): pOldMenuBar = self._pMenuBar self._pMenuBar = menu_bar if self._pMenuBar: pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") self._pMenuBar.Reparent(pParentFrame) if pParentFrame.GetActiveChild() == self: # replace current menu bars if pOldMenuBar: pParentFrame.SetChildMenuBar(None) pParentFrame.SetChildMenuBar(self) def GetMenuBar(self): return self._pMenuBar def SetTitle(self, title): self._title = title pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: pClientWindow.SetPageText(pos, self._title) break def GetTitle(self): return self._title def SetIcons(self, icons): # get icon with the system icon size self.SetIcon(icons.GetIcon(-1)) self._icon_bundle = icons def GetIcons(self): return self._icon_bundle def SetIcon(self, icon): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") self._icon = icon bmp = wx.BitmapFromIcon(self._icon) pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: idx = pClientWindow.GetPageIndex(self) if idx != -1: pClientWindow.SetPageBitmap(idx, bmp) def GetIcon(self): return self._icon def Activate(self): pParentFrame = self.GetMDIParentFrame() if not pParentFrame: raise Exception("Missing MDI Parent Frame") pClientWindow = pParentFrame.GetClientWindow() if pClientWindow is not None: for pos in xrange(pClientWindow.GetPageCount()): if pClientWindow.GetPage(pos) == self: pClientWindow.SetSelection(pos) break def OnMenuHighlight(self, event): if self._pMDIParentFrame: # we don't have any help text for this item, # but may be the MDI frame does? self._pMDIParentFrame.OnMenuHighlight(event) def OnActivate(self, event): # do nothing pass def OnCloseWindow(self, event): pParentFrame = self.GetMDIParentFrame() if pParentFrame: if pParentFrame.GetActiveChild() == self: pParentFrame.SetActiveChild(None) pParentFrame.SetChildMenuBar(None) pClientWindow = pParentFrame.GetClientWindow() idx = pClientWindow.GetPageIndex(self) if idx != wx.NOT_FOUND: pClientWindow.RemovePage(idx) self.Destroy() def SetMDIParentFrame(self, parentFrame): self._pMDIParentFrame = parentFrame def GetMDIParentFrame(self): return self._pMDIParentFrame def CreateStatusBar(self, number=1, style=1, winid=1, name=""): return None def GetStatusBar(self): return None def SetStatusText(self, text, number=0): pass def SetStatusWidths(self, widths_field): pass # no toolbar bars def CreateToolBar(self, style=1, winid=-1, name=""): return None def GetToolBar(self): return None # no maximize etc def Maximize(self, maximize=True): pass def Restore(self): pass def Iconize(self, iconize=True): pass def IsMaximized(self): return True def IsIconized(self): return False def ShowFullScreen(self, show=True, style=0): return False def IsFullScreen(self): return False def IsTopLevel(self): return False # renamed from Show(). def ActivateOnCreate(self, activate_on_create): self._activate_on_create = activate_on_create return True def Show(self, show=True): wx.PyPanel.Show(self, show) def ApplyMDIChildFrameRect(self): if self._mdi_currect != self._mdi_newrect: self.SetDimensions(*self._mdi_newrect) self._mdi_currect = wx.Rect(*self._mdi_newrect) #----------------------------------------------------------------------------- # AuiMDIClientWindow #----------------------------------------------------------------------------- class AuiMDIClientWindow(auibook.AuiNotebook): def __init__(self, parent, agwStyle=0): auibook.AuiNotebook.__init__(self, parent, wx.ID_ANY, wx.Point(0, 0), wx.Size(100, 100), agwStyle=AUI_NB_DEFAULT_STYLE|wx.NO_BORDER) caption_icon_size = wx.Size(wx.SystemSettings.GetMetric(wx.SYS_SMALLICON_X), wx.SystemSettings.GetMetric(wx.SYS_SMALLICON_Y)) self.SetUniformBitmapSize(caption_icon_size) bkcolour = wx.SystemSettings.GetColour(wx.SYS_COLOUR_APPWORKSPACE) self.SetOwnBackgroundColour(bkcolour) self._mgr.GetArtProvider().SetColour(AUI_DOCKART_BACKGROUND_COLOUR, bkcolour) self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CHANGED, self.OnPageChanged) self.Bind(auibook.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnPageClose) self.Bind(wx.EVT_SIZE, self.OnSize) def SetSelection(self, nPage): return auibook.AuiNotebook.SetSelection(self, nPage) def PageChanged(self, old_selection, new_selection): # don't do anything if the page doesn't actually change if old_selection == new_selection: return # notify old active child that it has been deactivated if old_selection != -1 and old_selection < self.GetPageCount(): old_child = self.GetPage(old_selection) if not old_child: raise Exception("AuiMDIClientWindow.PageChanged - null page pointer") event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, False, old_child.GetId()) event.SetEventObject(old_child) old_child.GetEventHandler().ProcessEvent(event) # notify new active child that it has been activated if new_selection != -1: active_child = self.GetPage(new_selection) if not active_child: raise Exception("AuiMDIClientWindow.PageChanged - null page pointer") event = wx.ActivateEvent(wx.wxEVT_ACTIVATE, True, active_child.GetId()) event.SetEventObject(active_child) active_child.GetEventHandler().ProcessEvent(event) if active_child.GetMDIParentFrame(): active_child.GetMDIParentFrame().SetActiveChild(active_child) active_child.GetMDIParentFrame().SetChildMenuBar(active_child) def OnPageClose(self, event): wnd = self.GetPage(event.GetSelection()) wnd.Close() # regardless of the result of wnd.Close(), we've # already taken care of the close operations, so # suppress further processing event.Veto() def OnPageChanged(self, event): self.PageChanged(event.GetOldSelection(), event.GetSelection()) def OnSize(self, event): auibook.AuiNotebook.OnSize(self, event) for pos in xrange(self.GetPageCount()): self.GetPage(pos).ApplyMDIChildFrameRect()
{ "repo_name": "ktan2020/legacy-automation", "path": "win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/extern/aui/tabmdi.py", "copies": "2", "size": "18715", "license": "mit", "hash": 7571448684468398000, "line_mean": 26.1006006006, "line_max": 97, "alpha_frac": 0.550895004, "autogenerated": false, "ratio": 4.228422955264347, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.014868281756502023, "num_lines": 666 }
__author__ = 'Andrea' from functions import encode get_bin = lambda x: x >= 0 and str(bin(x))[2:] or "-" + str(bin(x))[3:] class Pitch(object): step = None alter = None def __init__(self, pitch): # pitch contructor if pitch is not None: self.step = pitch.find('step') self.octave = pitch.find('octave') self.alter = pitch.find('alter') # returns true if is a pause, false elsewhere @property def is_pause(self): return self.step is None # returns true if the pitch is not altered, false elsewhere @property def not_alter(self): return self.alter is None class Note(object): def __init__(self, note=None, division=4096): self.pitch = Pitch(note.find('pitch')) self.duration = note.find('duration') self.type_ = note.find('type') self.division = division # returns true if is a pause, false elsewhere @property def is_pause(self): return self.pitch.is_pause # allows to change the division value def set_division(self, div): self.division = div # encode the note def encode(self, kv): return encode(self.pitch, self, kv)
{ "repo_name": "AndreaDellera/Tesi", "path": "music-rnn/modules/classes.py", "copies": "1", "size": "1214", "license": "apache-2.0", "hash": -8366686274255318000, "line_mean": 25.9777777778, "line_max": 71, "alpha_frac": 0.6021416804, "autogenerated": false, "ratio": 3.7353846153846155, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9830427530352517, "avg_score": 0.001419753086419753, "num_lines": 45 }
__author__ = 'Andrea' import xml.etree.ElementTree as ET import glob from modules.functions import create_binary_dataset, create_int_dataset from modules.myBackProp import myBackpropTrainer from modules.classes import Note from modules.functions import create_network, train_network, binary_to_int_note from pybrain.structure import SigmoidLayer, LSTMLayer, LinearLayer, GaussianLayer, SoftmaxLayer, TanhLayer from pybrain.tools.xml.networkwriter import NetworkWriter # database for different inputs def main(): # number of pitches in every note normalized by max value division = 1024 # hash table for encoding the duration kv = {'4096': '0000', '3072': '0001', '2048': '0010', '1536': '0011', '1024': '0100', '768': '0101', '512': '0110', '384': '0111', '256': '1000', '192': '1001', '128': '1010', '64': '1011'} # list of all train files files = glob.glob("../files/train/*.xml") coded_notes = [] # extracting all the notes for file in files: print "\nfile: " + file tree = ET.parse(file) # notes = [Note(note, division, step_time) for note in tree.findall('.//note')] notes = [Note(note, division) for note in tree.findall('.//note')] for note in notes: coded_notes.append(note.encode(kv)) binary = True input_notes = 1 if binary: # creating the datasets n_input = input_notes * 11 n_output = 11 dataset_notes = create_binary_dataset(n_input, n_output, coded_notes) else: n_input = input_notes * 3 n_output = 3 coded_notes = binary_to_int_note(coded_notes) dataset_notes = create_int_dataset(n_input, n_output, coded_notes) rec = True # creation of the datasets based on the number of input # if not rec: # from modules.functions import unique_dataset # dataset_notes = unique_dataset(dataset_notes) del files, coded_notes # creating the network # without hidden layers the network does not works properly # rnn = buildNetwork(n_input, 5, n_output, recurrent=rec, outclass=SigmoidLayer, hiddenclass=LSTMLayer, bias=False) if rec: hc = LSTMLayer oc = SigmoidLayer else: hc = SigmoidLayer oc = SigmoidLayer rnn = create_network(n_input, 20, n_output, recurrent=rec, outclass=oc, hiddenclass=hc, bias=False) # if verbose == True then print "Total error:", MSE / ponderation trainer = myBackpropTrainer(rnn, learningrate=0.1, momentum=0.3, verbose=False, batchlearning=True, recurrent=rec) print "start training" n = 10 if n > dataset_notes.getLength(): n = dataset_notes.getLength() train_network(trainer, dataset_notes, k_fold=n, bold_driver=True, maxEpochs=100) print "end training" # salva lo stato della rete NetworkWriter.writeToFile(rnn, 'weights.xml') rnn.reset() if __name__ == "__main__": main()
{ "repo_name": "AndreaDellera/Tesi", "path": "music-rnn/train_rnn.py", "copies": "1", "size": "2985", "license": "apache-2.0", "hash": -6692384231381265000, "line_mean": 32.5393258427, "line_max": 119, "alpha_frac": 0.642881072, "autogenerated": false, "ratio": 3.5663082437275984, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9702746128783604, "avg_score": 0.0012886373887989174, "num_lines": 89 }
__author__ = 'Andrea' import xml.etree.ElementTree as ET import glob from pybrain.tools.xml.networkreader import NetworkReader from modules.classes import Note from modules.buildXML import create_music_xml from modules.functions import decode, binary_to_int_note, int_to_binary_note import random def main(): division = 1024 # load the network trained in train_rnn rnn = NetworkReader.readFrom('weights.xml') files = glob.glob("../files/toGenerate/*.xml") input_notes = () # hash table for encoding the duration dur_en_kv = {'4096': '0000', '3072': '0011', '2048': '0010', '1536': '0101', '1024': '0100', '768': '0111', '512': '0110', '384': '1001', '256': '1000', '192': '1011', '128': '1010', '64': '1100'} # hash table for decoding the step value step_kv = {'0000': 'A', '0001': 'A#', '0010': 'B', '0011': 'C', '0100': 'C#', '0101': 'D', '0110': 'D#', '0111': 'E', '1000': 'F', '1001': 'F#', '1010': 'G', '1011': 'G#', '1100': 'pausa', '1101': 'pausa', '1110': 'pausa', '1111': 'pausa'} # hash table for decoding the duration dur_dec_kv = {'0000': '4096', '0001': '3072', '0010': '2048', '0011': '1536', '0100': '1024', '0101': '768', '0110': '512', '0111': '384', '1000': '256', '1001': '192', '1010': '128', '1011': '64', '1100': '64', '1101': '64', '1110': '64', '1111': '64'} binary = True if binary: n = rnn.indim / 11 else: n = rnn.indim / 3 # creating the input for the net i = 0 input_notes = [] # extracting all the notes # generating new notes if binary: i = 0 for file in files: print "\nfile: " + file tree = ET.parse(file) notes = [Note(note, division) for note in tree.findall('.//note')] for note in notes: if i < rnn.indim / 11: i += 1 else: break tmp = note.encode(dur_en_kv) for x in range(11): input_notes += (tmp[x],) for i in range(150): otp = rnn.activate(input_notes[(i * 11):(rnn.indim + 11 * i):1]) # takes the last rnn.indim notes to activate the netwotk assert len(otp) == 11 for x in range(len(otp)): otp[x] = abs(round(otp[x])) # if otp[x] > 1: # otp[x] = 1 # if otp[x] < 0: # otp[x] = 0 for x in range(len(otp)): input_notes += (str(otp[x])[0],) dec_notes = [] for i in range(0, len(input_notes), 11): note = decode(input_notes[i:((i + 1) * 11):1], step_kv, dur_dec_kv) dec_notes.append(note) # XML create_music_xml(dec_notes, division, 'output.xml') if __name__ == "__main__": main()
{ "repo_name": "AndreaDellera/Tesi", "path": "music-rnn/use_rnn.py", "copies": "1", "size": "2958", "license": "apache-2.0", "hash": 7579350858764074000, "line_mean": 33, "line_max": 119, "alpha_frac": 0.4847870183, "autogenerated": false, "ratio": 3.297658862876254, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.42824458811762545, "avg_score": null, "num_lines": null }
__author__ = 'Andrea' import xml.etree.ElementTree as ET def indent(elem, level=0): i = "\n" + level * " " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def create_music_xml(dec_notes, division=1024, name='output.xml'): tree = ET.parse('../files/toGenerate/EmptyXML.xml') part = tree.find('.//part') i = 0 n_measure = 1 strings = {1: 'E', 2: 'A', 3: 'D', 4: 'G', 5: 'B', 6: 'E'} oct = {1: '2', 2: '2', 3: '3', 4: '3', 5: '3', 6: '4'} type = {4096: 'whole', 3072: 'half', 2048: 'half', 1536: 'quarter', 1024: 'quarter', 768: 'eighth', 512: 'eighth', 384: '16th', 256: '16th', 192: '32th', 128: '32th', 96: '64th', 32: '64th', 48: '64th', 64: '64th'} while i < len(dec_notes): # structure of the measure measure = ET.SubElement(part, 'measure', {'number': str(n_measure)}) n_measure += 1 # <attributes> # <divisions>1024</divisions> att = ET.SubElement(measure, 'attributes') if n_measure == 1: tmp = ET.SubElement(att, 'division') tmp.text = str(division) # <key> # <fifths>0</fifths> # <mode>major</mode> # </key> key = ET.SubElement(att, 'key') tmp = ET.SubElement(key, 'fifths') tmp.text = str(0) tmp = ET.SubElement(key, 'mode') tmp.text = "major" # <time> # <beats>4</beats> # <beat-type>4</beat-type> # </time> time = ET.SubElement(att, 'time') tmp = ET.SubElement(time, 'beats') tmp.text = str(4) tmp = ET.SubElement(time, 'beat-type') tmp.text = str(4) # <staves>1</staves>, numero di alterazioni in chiave tmp = ET.SubElement(att, 'staves') tmp.text = str(1) # <clef> # <sign>TAB</sign> # <line>6</line> # </clef> clef = ET.SubElement(att, 'clef') tmp = ET.SubElement(clef, 'sign') tmp.text = "G" tmp = ET.SubElement(clef, 'line') tmp.text = str(2) # </attributes> totDuration = 0 # putting the notes in the measure # whole_bar = True while totDuration < division * 4 and i < len(dec_notes): # <note> note = ET.SubElement(measure, 'note') # <pitch> # <step>G</step> # <alter>1</alter> # <octave>5</octave> # </pitch> pitch = ET.SubElement(note, 'pitch') if dec_notes[i][1] != 'pause': # nota tmp = ET.SubElement(pitch, 'step') tmp.text = dec_notes[i][1][1] tmp = ET.SubElement(pitch, 'alter') if dec_notes[i][1][0]: tmp.text = "1" tmp = ET.SubElement(pitch, 'octave') tmp.text = str(dec_notes[i][0]) else: # pausa ET.SubElement(note, 'rest') # <duration>512</duration> tmp = ET.SubElement(note, 'duration') # if (totDuration + dec_notes[i][2]) <= division: # print 'nota ok' tmp.text = str(dec_notes[i][2]) # else: # tmp.text = str(division * 4 - totDuration) # # print dec_notes[i][2], ' ', tmp.text # whole_bar = False # <voice>0</voice> tmp = ET.SubElement(note, 'voice') tmp.text = '1' # <type>eighth</type> # <sharp>1<\sharp> tmp = ET.SubElement(note, 'type') tmp.text = type[dec_notes[i][2]] if dec_notes[i][1][0]: tmp = ET.SubElement(note, 'accidental') tmp.text = "sharp" if dec_notes[i][2] in (3072, 1536, 768, 384, 192): ET.SubElement(note, 'dot') # if not whole_bar: # tmp = ET.SubElement(note, 'notation') # ET.SubElement((ET.SubElement(tmp, 'accidental')), 'detached-legato', # {'default-x': "0", 'default-y': "6", 'placement': "above"}) # # </note> # # totDuration += dec_notes[i][2] # if whole_bar: # i += 1 # else: # dec_notes[i] = list(dec_notes[i]) # dec_notes[i][2] -= division - totDuration # dec_notes[i] = tuple(dec_notes[i]) # whole_bar = False totDuration += dec_notes[i][2] i += 1 indent(tree.getroot()) del strings, oct, i with open(name, 'w') as f: f.write('<?xml version="1.0" encoding="UTF-8" ?>\n' '<!DOCTYPE score-partwise PUBLIC "-//Recordare//DTD MusicXML 2.0 Partwise//EN" "musicxml20/partwise.dtd">\n') tree.write(f, encoding='utf-8', method="xml")
{ "repo_name": "AndreaDellera/Tesi", "path": "music-rnn/modules/buildXML.py", "copies": "1", "size": "5299", "license": "apache-2.0", "hash": -8131470667182445000, "line_mean": 35.7986111111, "line_max": 125, "alpha_frac": 0.4614078128, "autogenerated": false, "ratio": 3.3474415666456094, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4308849379445609, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' from models import BaseModel from models.contractor import Contractor from bson.objectid import ObjectId import defs import schedule import datetime import time class DataItem(BaseModel, defs.StoppableThread): StorageName = 'data_items' def __init__(self, item): BaseModel.__init__(self, item) defs.StoppableThread.__init__(self) self._scheduler = schedule.Scheduler() job = self._scheduler.every(self.schedule['interval']) job.unit = self.schedule['unit'] if len(self.schedule['at']) > 0: job.at(self.schedule['at'][0]) job.do(self.run_job) """ data_item id: object_id, name: str, data_type: str, contractor: object_id, entity: object_id, schedule: {'interval': num, 'unit': str, 'at':[timestr]} """ def run(self): while not self._stop_event.is_set(): self._scheduler.run_pending() time.sleep(0.1) def run_job(self): contractor = self.contractor if contractor is None: self.send_error("Contractor {0} was not found".format(self._item.get('contractor'))) return data = contractor.exec() self.send(data, contractor.hash) def send(self, data, hash): chunk = [dict( data_list=[dict(data=data,timestamp=datetime.datetime.now())], data_item=self.id, hash=hash )] self._client.send_data(chunk) def send_error(self, error): pass @property def id(self): return self._item.get('id') @property def schedule(self): return self._item.get('schedule') @property def contractor(self): _id = self._item.get('contractor') if not isinstance(_id, ObjectId): return None return Contractor.find(_id)
{ "repo_name": "Andrean/lemon.apple", "path": "agent/models/data_item.py", "copies": "1", "size": "1935", "license": "mit", "hash": 8324499501989275000, "line_mean": 25.5068493151, "line_max": 96, "alpha_frac": 0.5684754522, "autogenerated": false, "ratio": 3.824110671936759, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4892586124136759, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' from modules.base import BaseServerModule import core import uuid import bson.objectid import models.components import defs.cmd import datetime class Manager(BaseServerModule): def __init__(self, _core): super().__init__(_core, 'Manager') self._storage = None self._agents = None self._entities = None def start(self): self._storage = core.Instance.Storage self.ensure_index() def stop(self): pass def ensure_index(self): self._logger.info('Start indexing of data') models.components.Agent.ensure_index_schema() models.components.Entity.ensure_index_schema() models.components.Contractor.ensure_index_schema() self._logger.info('Indexing is over') @property def agents(self): if models.components.Agent.Instances is None: models.components.Agent.load_instances() return models.components.Agent @property def entities(self): if models.components.Entity.Instances is None: models.components.Entity.load_instances() return models.components.Entity @property def contractors(self): # don't load all instances to memory return models.components.Contractor @property def data_items(self): if models.components.DataItem.Instances is None: models.components.DataItem.load_instances() return models.components.DataItem
{ "repo_name": "Andrean/lemon.apple", "path": "server/modules/manager.py", "copies": "1", "size": "1487", "license": "mit", "hash": -5484495433502313000, "line_mean": 26.0545454545, "line_max": 58, "alpha_frac": 0.6583725622, "autogenerated": false, "ratio": 4.285302593659942, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5443675155859942, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' from modules.base import BaseServerModule import logging # module variables Instance = None class Core(object): ''' Core class. It keeps all working instances of Lemon server ''' Config = {} # core components modules = {} def __init__(self, config=None): self.Config = config self._logger = logging.getLogger('main.Core') global Instance # user for global access for Core Instance = self def add(self, module): instance = module(self) assert isinstance(instance, BaseServerModule) self.modules[instance.Name] = instance def start(self): self._logger.info('Starting modules') for module in self.modules.values(): self._logger.debug('Starting module {}'.format(module.Name)) module.start() def stop(self): self._logger.info('Stopping modules') for module in self.modules.values(): self._logger.debug('Stopping module {}'.format(module.Name)) module.stop() @property def Storage(self): return self.modules.get('Storage') @property def Server(self): return self.modules.get('Server') @property def Manager(self): return self.modules.get('Manager')
{ "repo_name": "Andrean/lemon.apple", "path": "server/core.py", "copies": "1", "size": "1302", "license": "mit", "hash": 5122647046047314000, "line_mean": 25.04, "line_max": 72, "alpha_frac": 0.6152073733, "autogenerated": false, "ratio": 4.325581395348837, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 50 }
__author__ = 'Andrean' from modules.base import BaseServerModule import router ###################################################################### # Server # Run all registered Listeners ###################################################################### class Server(BaseServerModule): def __init__(self, _core): super().__init__(_core, 'Server') agent_router = router.AgentRouter() web_router = router.WebRouter() self.agent_listener = Listener(HTTPRequestHandler, agent_router) endpoint = self._config.get('agent_interface',{}).get('listenerEndpoint') address = endpoint.split(':') self.agent_listener.set_endpoint(tuple([address[0], int(address[1])])) self.web_listener = Listener(HTTPRequestHandler, web_router) endpoint = self._config.get('web_interface',{}).get('listenerEndpoint') address = endpoint.split(':') self.web_listener.set_endpoint(tuple([address[0], int(address[1])])) self._logger.info("Server initiated") def start(self): self.agent_listener.start() self._logger.info('Agent Listener started') self.web_listener.start() self._logger.info('Web Listener started') def stop(self): self.agent_listener.stop() self._logger.info('Agent Listener was stopped') self.web_listener.stop() self._logger.info('Web Listener was stopped') ###################################################################### # HTTPListener # Class for listen and handle http requests ###################################################################### import http.server from socketserver import ThreadingMixIn import threading SOCKET_TIMEOUT = 60 class ThreadingHTTPServer(ThreadingMixIn, http.server.HTTPServer): def finish_request(self, request, client_address): request.settimeout(30) http.server.HTTPServer.finish_request(self, request, client_address) class Listener(threading.Thread): def __init__(self, handler_class, router_instance): self._handler = handler_class self._router = router_instance self._httpd = None self._endpoint = None super().__init__() def run(self): self._router.load() self.listen() def set_endpoint(self, server_address): self._endpoint = server_address def listen(self): self._httpd = ThreadingHTTPServer(self._endpoint, self._handler) self._httpd.daemon_threads = True self._httpd.request_router = self._router self._httpd.serve_forever() def stop(self): if self._httpd is not None: self._httpd.shutdown() self._httpd.socket.close() ###################################################################### # HTTPRequestHandler # Class for handling one request ###################################################################### class HTTPRequestHandler(http.server.BaseHTTPRequestHandler): def __init__(self, request, client_address, server): self.protocol_version = 'HTTP/1.1' super().__init__(request, client_address, server) def do_GET(self): router = self.server.request_router router.dispatch(self, 'GET', self.path) def do_POST(self): router = self.server.request_router router.dispatch(self, 'POST', self.path) def do_PUT(self): router = self.server.request_router router.dispatch(self, 'PUT', self.path) def do_HEAD(self): router = self.server.request_router router.dispatch(self, 'HEAD', self.path) def do_DELETE(self): router = self.server.request_router router.dispatch(self, 'DELETE', self.path)
{ "repo_name": "Andrean/lemon.apple", "path": "server/modules/server.py", "copies": "1", "size": "3737", "license": "mit", "hash": 7150717911953201000, "line_mean": 32.9818181818, "line_max": 81, "alpha_frac": 0.5726518598, "autogenerated": false, "ratio": 4.386150234741784, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0026668723951545916, "num_lines": 110 }
__author__ = 'Andrean' from modules import BaseAgentModule import bson.json_util import datetime import json import threading import traceback import time import commands import os import sys import schedule import subprocess import defs.cmd import queue import core import defs import defs.scheduler import hashlib COMMANDS_POLL_INTERVAl = 5 # every 1 second try to get commands class Manager(BaseAgentModule): Name = "Manager" def __init__(self, core): super().__init__(core) self._client = self._core.Client self._stop = threading.Event() self._commands_handler = None self.commandManager = None def start(self): self._logger.info('Starting commands timer') self._commands_handler = threading.Thread(target=self._handle_commands) self.commandManager = commands.CommandManager() self.commandManager.start() self._commands_handler.start() def stop(self): self._logger.info('Stopping commands timer...') self._stop.set() self.commandManager.stop() def _handle_commands(self): while not self._stop.is_set(): # say to client 'GET commands' request # and wait for response in queue if not self.commandManager.isAlive: time.sleep(1) continue try: cmds = self._client.get_commands() self.commandManager.handle(cmds) status = self.commandManager.status(1) if len(status) > 0: response = self._client.send_commands(status) if response is None: self._logger.error("Server did not answered on 'send_commands'") except ValueError as e: self._logger.error('Unknown response from server: {0}'.format(e)) time.sleep(1) except Exception as err: self._logger.exception(err) time.sleep(1)
{ "repo_name": "Andrean/lemon.apple", "path": "agent/modules/managers.py", "copies": "1", "size": "2014", "license": "mit", "hash": -1856222869657222100, "line_mean": 27.3661971831, "line_max": 88, "alpha_frac": 0.6027805362, "autogenerated": false, "ratio": 4.436123348017621, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.014854663056442244, "num_lines": 71 }
__author__ = 'Andrean' from modules import BaseAgentModule import http.client import defs.errors from queue import Queue, Empty, Full import threading import re import time import json import bson.json_util ClientLock = threading.Lock() def ParseBody(response): # get content type. If not found use default "text/plain" with encoding "utf-8" content_type = response.getheader('Content-Type','text/plain; charset=utf-8') # if Content-Length is not found - do not read body content_length = int(response.getheader('Content-Length', 0)) match = re.match(r"([\w/-]+);\s*charset=([\w-]+)", content_type) content_charset = 'utf-8' if match: content_charset = match.group(2) content_type = match.group(1) body = response.read(content_length) if content_type == 'text/plain': response.text = str(body, content_charset) response.data = body return response if content_type == 'application/json': response.json = json.loads(str(body, content_charset), object_hook=bson.json_util.object_hook) return response if content_type == 'application/octet-stream': response.data = body return response class Client(BaseAgentModule): Name = "Client" PackageSize = 4 DefaultCharset='utf8' def __init__(self, core): super().__init__(core) self._command_channel = None self._data_channel = None self._queue = Queue(50) self._data_queue = Queue(500) self._stop = threading.Event() def start(self): endpoint = self._config.get('server') if endpoint is None: raise defs.errors.LemonConfigurationError("Parameter: {0} not found".format('server')) host, port = endpoint.split(':') self._command_channel = http.client.HTTPConnection(host, int(port), 30) self._command_channel.name = 'Command Channel' self._command_channel.mutex = threading.Lock() self._data_channel = http.client.HTTPConnection(host, int(port), 30) self._data_channel.name = 'Data Channel' self._data_channel.mutex = threading.Lock() if self.connect_channel(self._command_channel) == 0: raise ConnectionError("Cannot connect Command Channel") if self.connect_channel(self._data_channel) == 0: raise ConnectionError("Cannot connect Data Channel") self.data_processor = threading.Thread( target=self.process_data, name="DATA PROCESSOR" ) self.data_processor.start() def process_data(self): while True: try: item = self._data_queue.get(timeout=10) response = self._request(self._data_channel, 'POST', '/data', item) if response is not None: print(response) #check response status. it cannot be not 200 # todo: read answer of server and resend failed data items except Empty: if self._stop.is_set(): break self._data_channel.close() self._logger.info("Data Channel stopped successfully") def stop(self): self._logger.info("Stopping HTTP Connections...") self._stop.set() self._command_channel.close() self.data_processor.join(timeout=60) if self.data_processor.is_alive(): self._logger.warn('Data Channel thread is still alive. Try to kill them') raise RuntimeError self._logger.info('HTTP Client was stopped successfully') def send_commands(self, commands): with self._command_channel.mutex: response = self._request(self._command_channel, 'POST', '/commands', commands) if response is None: return None return response.json def send_data(self, data): self._data_queue.put(data, timeout=50) def send_error(self, error): pass def get_commands(self): print(time.time()) with self._command_channel.mutex: response = self._request(self._command_channel, 'GET', '/commands') if response is None: return None return response.json def _request(self, channel, method, url, obj=None): headers = dict() headers['Lemon-Agent-ID'] = str(self._core.Storage.agent_id) body = None try: if obj is not None: body = bytes(json.dumps(obj, default=bson.json_util.default), self.DefaultCharset) headers['Content-Length'] = len(body) headers['Content-Type'] = 'application/json; charset={0}'.format(self.DefaultCharset) channel.request(method, url, body, headers) res = channel.getresponse() return ParseBody(res) except ConnectionError as err: self._logger.exception(err) self.connect_channel(channel) except http.client.HTTPException as err: self._logger.exception(err) self.connect_channel(channel) return None def connect_channel(self, channel): self._logger.info('Connecting channel "{0}"...'.format(channel.name)) attempts = 100 while attempts > 0: self._logger.debug('Reconnection attempt: {0}'.format(100 -attempts)) try: channel.close() channel.connect() break except Exception as e: self._logger.exception(e) attempts -= 1 time.sleep(10) if attempts > 0: self._logger.info('Successfully connected to {0}({1}:{2})'.format(channel.name, channel.host, channel.port)) return attempts
{ "repo_name": "Andrean/lemon.apple", "path": "agent/modules/client.py", "copies": "1", "size": "5795", "license": "mit", "hash": -8900827460323041000, "line_mean": 36.6298701299, "line_max": 120, "alpha_frac": 0.5972389991, "autogenerated": false, "ratio": 4.2299270072992705, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.008242144742773263, "num_lines": 154 }
__author__ = 'Andrean' from modules import BaseAgentModule import os import socket import shelve import uuid class Storage(BaseAgentModule): Name = "Storage" def __init__(self, core): super().__init__(core) def start(self): self._logger.info('Load Database on {0}'.format(self._config['data_path'])) data_path = self._config['data_path'] os.makedirs(os.path.dirname(data_path), exist_ok=True) with shelve.open(data_path, 'c') as db: if 'agent_id' not in db: self._logger.info("Storage File not found. Creating new on {0}".format(data_path)) self._logger.info("Generating new AGENT ID") agent_id = uuid.uuid3(uuid.NAMESPACE_DNS, socket.getfqdn(socket.gethostname())) self._logger.info("AGENT ID is {0}".format(agent_id)) db['agent_id'] = agent_id self._db = data_path self.agent_id = self.get('agent_id') self._logger.info("Database is loaded") def stop(self): self._logger.info('Stopped') def get(self, key): with shelve.open(self._db, 'r') as db: return db.get(key) def set(self, key, value): with shelve.open(self._db) as db: db[key] = value
{ "repo_name": "Andrean/lemon.apple", "path": "agent/modules/storage.py", "copies": "1", "size": "1281", "license": "mit", "hash": 4856130358764286000, "line_mean": 30.243902439, "line_max": 98, "alpha_frac": 0.5807962529, "autogenerated": false, "ratio": 3.578212290502793, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9529292392345179, "avg_score": 0.025943230211522898, "num_lines": 41 }
__author__ = 'Andrean' from threading import Thread, Event class IntervalTimer(Thread): """Call a function after a specified number of seconds: t = Timer(30.0, f, args=None, kwargs=None) t.start() t.cancel() # stop the timer's action if it's still waiting """ def __init__(self, interval, function, iteration=-1, args=None, kwargs=None): super().__init__() self.interval = interval self.function = function self.iteration = iteration self.args = args if args is not None else [] self.kwargs = kwargs if kwargs is not None else {} self.finished = Event() def cancel(self): """Stop the timer if it hasn't finished yet.""" self.finished.set() def run(self): while True: self.finished.wait(self.interval) if self.iteration == 0 or self.finished.is_set(): break self.function(*self.args, **self.kwargs) if self.iteration > 0: self.iteration -= 1
{ "repo_name": "Andrean/lemon.apple", "path": "agent/defs/scheduler.py", "copies": "1", "size": "1068", "license": "mit", "hash": -8381013808918403000, "line_mean": 29.5428571429, "line_max": 81, "alpha_frac": 0.5664794007, "autogenerated": false, "ratio": 4.254980079681275, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5321459480381274, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' from types import * def binary_search(arr, x, comparator=None, strict=True, low=0, high=None): """ :param arr: sorted list of elements :param x: searched element or compare forEach function, which returns 1, 0, -1 :param strict: if True don't look for strict comparison. Returns position of nearest left element :return: position of looking element or position between elements where it could be if "strict" is True """ def cmp(arr_el, el): if arr_el > el: return 1 if arr_el < el: return -1 return 0 if isinstance(comparator, FunctionType): cmp = comparator if high is None: high = len(arr) while low < high: mid = (low + high)//2 test = cmp(arr[mid], x) if test == -1: low = mid+1 elif test == 1: high = mid else: return mid if strict is not True: if high == 0: return -0.5 if low >= len(arr): return 0.5 return low-1 return -1
{ "repo_name": "Andrean/lemon.apple", "path": "agent/defs/search.py", "copies": "2", "size": "1092", "license": "mit", "hash": 2459837236213403000, "line_mean": 27.0256410256, "line_max": 107, "alpha_frac": 0.5512820513, "autogenerated": false, "ratio": 3.9565217391304346, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.002770114115949948, "num_lines": 39 }
__author__ = 'Andrean' import controllers.web as webController ##################################################################################### # Routes for routing request from WEB-Server as web-interface ##################################################################################### ROUTES = [ [ 'GET', r'^/entities[?=%&_\-\+\w\.,]*$', webController.entity_manager['get_entities'] ], [ 'PUT', r'^/entities[?=%&_\-\+\w\.,]*$', webController.entity_manager['add_entity'] ], [ 'POST', r'^/entities[?=%&_\-\+\w\.,]*$', webController.entity_manager['modify_entity'] ], [ 'DELETE', r'^/entities[?=%&_\-\+\w\.,]*$', webController.entity_manager['del_entity'] ], [ 'GET', r'^/agents[?=%&_\-\+\w,\.]*$', webController.agents.get_agents ], [ 'GET', r'^/contractors[?=%&_\-\+\w\.,]*$', webController.contractors.get ], [ 'PUT', r'^/contractors[?=%&_\-\+\w\.,]*$', webController.contractors.add ], [ 'DELETE', r'^/contractors[?=%&_\-\+\w\.,]*$', webController.contractors.remove ], [ 'GET', r'^/data/items[?=%&_\-\+\w\.,]*$', webController.data_items.get ], [ 'POST', r'^/data/items[?=%&_\-\+\w\.,]*$', webController.data_items.set ], [ 'DELETE', r'^/data/items[?=%&_\-\+\w\.,]*$', webController.data_items.remove ], [ 'GET', r'^/data/chunk[?=%&_\-\+\w\.,]*$', webController.data_chunks.get ], [ 'GET', r'^/data/chunk/count[?=%&_\-\+\w\.,]*$', webController.data_chunks.count ], [ 'GET', r'^/commands/send[?=%&_\-\+\w\.,]*$', webController.commands.send_to ], [ 'POST', r'^/commands/send[?=%&_\-\+\w\.,]*$', webController.commands.send_to ], [ 'GET', r'^/agents/commands[?=%&_\-\+\w\.,]*$', webController.agents.get_agent_commands ] ]
{ "repo_name": "Andrean/lemon.apple", "path": "server/routes/web_interface.py", "copies": "1", "size": "1900", "license": "mit", "hash": 7973573752059263000, "line_mean": 72.0769230769, "line_max": 98, "alpha_frac": 0.4384210526, "autogenerated": false, "ratio": 3.125, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40634210526, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import copy import bson import bson.objectid import bson.dbref import core class BaseModel(object): Schema = None Collection = None Instances = None virtual = None _index_objectId = None def __init__(self, item=None): self.virtual = {} self._dbref = {} self._collection = self.__class__.Collection self.schema_instance = self.Schema() self._raw = self.schema_instance.init_data() self._data = self._raw self._id = None if type(item) is dict: self._id = item.get('_id') self.load_from(item) else: self.load(item) @classmethod def list_instances(cls, filter={}): if cls.Instances is not None: return cls.Instances conn = cls.get_connection() return [cls(x) for x in conn[cls.Collection].find(filter)] @classmethod def findById(cls, obj_id): obj_id = bson.objectid.ObjectId(obj_id) try: if cls._index_objectId is not None: return cls.Instances[cls._index_objectId.get(obj_id)] else: conn = cls.get_connection() item = conn[cls.Collection].find_one(obj_id) if item is not None: return cls(item) return None except: return None @classmethod def load_instances(cls): conn = cls.get_connection() cls.Instances = [] cls._index_objectId = {} pointer = 0 for item in conn[cls.Collection].find({}): model = cls(item) cls.Instances.append(model) cls._index_objectId[model.id] = pointer pointer += 1 cls.create_indexes() @classmethod def create_indexes(cls): pass @staticmethod def get_connection(): return core.Instance.Storage.connection def __getitem__(self, key): if key in self.virtual: return self.virtual.get(key) return self._data.get(key) def __setitem__(self, key, value): if key in self.virtual: self.virtual[key] = value if self.schema_instance.is_valid(key, value): self._raw[key] = value self._data[key] = value else: raise TypeError("'{0}' is not valid type for key '{1}' in schema".format(value, key)) def __str__(self): return str(self._data) @classmethod def get_data(cls): if cls.Instances is not None: return [x.data for x in cls.Instances] conn = cls.get_connection() return [x for x in conn[cls.Collection].find({})] @property def data(self): return self._data @property def raw(self): return self._raw @property def DbRef(self): return self._dbref @property def id(self): return self._id def load_from(self, item): self._raw = item self._data = item def load(self, _id=None): db = self.get_connection() if _id is not None: self._id = _id if self._id is not None: self._dbref = bson.dbref.DBRef(self._collection, self._id) self._raw = db.dereference(self._dbref) self._data = self._raw def save(self): db = self.get_connection() to_save = self._raw _id = db[self._collection].save(to_save) if type(_id) is bson.ObjectId: self._id = _id self._dbref = bson.dbref.DBRef(self._collection, self._id) # self.load_instances() return self def remove(self): db = self.get_connection() if self._id is not None: db[self._collection].remove({'_id': self._id}) self.load_instances() def populate(self, *fields): # returns populated DATA!! data = copy.deepcopy(self._data) if len(fields) > 0: for k in fields: data = self._populate(data, self.schema_instance._schema, k) for k, v in self.virtual.items(): data[k] = v.dict() return data def _populate(self, data, schema, field=None): """ :rtype : returns populated data from parameter "data" :param data: data, which need to be populated :param schema: current schema of data :param field: field, which need to be populated """ if type(schema) is list: if type(data) is not list: raise AttributeError for i, v in enumerate(data): data[i] = self._populate(v, schema[0], field) return data if type(schema) is not dict: return data if field is not None: v = schema.get(field) data[field] = self._populate(data[field], v) return data _lemon_field = schema.get('_lemon_field') if _lemon_field is not True: for k, v in schema.items(): data[k] = self._populate(data[k],v) return data ref = schema.get('ref') if ref is not None: model_instance = ref({'_id':data}) model_instance.load() return model_instance.data return data def find(self, query): con = self.get_connection() for v in con[self._collection].find(query): yield self.__class__(v) @classmethod def ensure_index_schema(cls): cls.Schema.ensure_index(cls) class BaseSchema(object): _schema = None def __init__(self): self._instance = None @classmethod def setup(cls): cls.setup_schema() @classmethod def setup_schema(cls): raise NotImplementedError def init_data(self): schema = copy.deepcopy(self._schema) return self._init_data(schema) def _init_data(self, schema): for k, v in schema.items(): if type(v) == dict: _lemon_field = v.get('_lemon_field') if _lemon_field is not None: schema[k] = None _default = v.get('default') _type = v.get('type') if _default is not None and _type is not None: assert(isinstance(_default, _type)) schema[k] = _default else: schema[k] = self._init_data(v) elif type(v) == list: schema[k] = [] else: schema[k] = None return schema def is_valid(self, key, value): item = self._schema.get(key) if item is None: return False if value is None: return True if type(value) == type(item): return True if type(value) == item: return True if type(item) is dict: is_lemon_field = item.get('_lemon_field') if is_lemon_field is True: type_key = item.get('type') if isinstance(value, type_key): return True return False if isinstance(value, item): return True return False @classmethod def ensure_index(cls, model): assert(issubclass(model, BaseModel)) connection = model.get_connection() collection = model.Collection all_indexes = cls._ensure_index(cls._schema) def get_index_fields(index): if index is None: return None, None if index is 'unique': return True, None if index is 'index': return None, True uniques = [] indexes = [] for k, v in index.items(): result_u, result_i = get_index_fields(v) if type(result_u) is list and len(result_u) > 0: for field in result_u: uniques.append(k+'.'+field) if result_u is True: uniques.append(k) if type(result_i) is list and len(result_i) > 0: for field in result_i: indexes.append(k+'.'+field) if result_i is True: indexes.append(k) return uniques, indexes if type(all_indexes) is dict: uniques, indexes = get_index_fields(all_indexes) for index in uniques: connection[collection].ensure_index(index,unique=True) for index in indexes: connection[collection].ensure_index(index) @classmethod def _ensure_index(cls, schema): if type(schema) is dict: _lemon_field = schema.get('_lemon_field') if _lemon_field is True: unique = schema.get('unique') if unique is True: return 'unique' index = schema.get('index') if index is True: return 'index' return None indexes = {} for k, v in schema.items(): indexes[k] = cls._ensure_index(v) return indexes if type(schema) is list: return cls._ensure_index(schema[0]) return None class ItemNotFoundException(Exception): pass
{ "repo_name": "Andrean/lemon.apple", "path": "server/models/base.py", "copies": "1", "size": "9436", "license": "mit", "hash": -5046275190196652000, "line_mean": 29.2467948718, "line_max": 97, "alpha_frac": 0.5162144977, "autogenerated": false, "ratio": 4.177069499778663, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.002262860137907511, "num_lines": 312 }
__author__ = 'Andrean' import core from bson.objectid import ObjectId class BaseModel(object): """ Base class for classes that use Storage for save themselfs """ StorageName = "base" Instances = {} Core = core.Core def __init__(self, item=None): self._item = dict(id=ObjectId()) if item is not None: self._item = item @property def _storage(self): return self.Core.Instance.Storage @property def _client(self): return self.Core.Instance.Client @classmethod def load_all(cls): items = cls._storage.get(cls.StorageName) if items is not None: for x in items: cls.Instances[x['id']] = cls(x) @classmethod def find(cls, _id): return cls.Instances.get(_id) def save(self): self.Instances[self.id] = self self._storage.set(self.StorageName, [x._item for x in self.Instances.values()]) def delete(self): if self.id in self.Instances: self.Instances.pop(self.id) self._storage.set(self.StorageName, [x._item for x in self.Instances.values()]) @property def id(self): return self._item.get('id')
{ "repo_name": "Andrean/lemon.apple", "path": "agent/models/__init__.py", "copies": "1", "size": "1225", "license": "mit", "hash": 8618749887143154000, "line_mean": 23.0392156863, "line_max": 91, "alpha_frac": 0.587755102, "autogenerated": false, "ratio": 3.746177370030581, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4833932472030581, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import core import datetime from defs.cmd import CommandStatusEnum as CmdStatus import defs.request @defs.request.prepare_agent_request def get(req, res): manager = core.Instance.Manager agent = manager.agents.findByAgentId(req.agent_id) if agent is None: # agent not found. Add them to list agent = manager.agents.add_new(req.agent_id, req.client_address[0]) agent['_sysinfo']['last_connect'] = datetime.datetime.now() agent.save() cmd_list = agent.commands.find(CmdStatus.present) cmd_list_dict = [] for cmd in cmd_list: cmd_list_dict.append(cmd.to_dict()) res.send_json(cmd_list_dict) for cmd in cmd_list: agent.commands[cmd.id].status = CmdStatus.submit @defs.request.prepare_agent_request def send(req, res): manager = core.Instance.Manager agent = manager.agents.findByAgentId(req.agent_id) if agent is None: res.send_json({}, code=404) return if req.json is not None: from_commands = req.json for command in from_commands: if agent.commands[command['id']] is not None: agent.commands[command['id']].response = command['response'] agent.commands[command['id']].status = command['status'] res.send_json({})
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/agent_controllers/commands.py", "copies": "1", "size": "1306", "license": "mit", "hash": -8379827172997960000, "line_mean": 31.65, "line_max": 75, "alpha_frac": 0.6546707504, "autogenerated": false, "ratio": 3.5392953929539295, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4693966143353929, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import core import defs.errors import datetime ############################################################################ # GET data REQUEST # # query params: # data_item : data_items object_id string. # from : datetime string, returns data from that timestamp # to : datetime string, returns data by that timestamp # last : int, returns last <last> data records # chunk: : int, directly access to chunk num <chunk> of <data_item> # from_num : int, request records from <from_num> position # end_num : int, request records to <end_num> position # # returns: # [{'data_item': <data_item>, 'data': [ [data, timestamp], [data, timestamp], ... ]}] # or # {'error': message} # or # {'error': {'code':int, 'message': str} ############################################################################ def get(req, res): data_items = req.query.get('data_item') if data_items is None: raise defs.errors.LemonAttributeError('"data_item" is empty') from_time = req.query.get('from', [None])[0] if from_time is not None: from_time = datetime.datetime.fromtimestamp(float(from_time)) to_time = req.query.get('to', [None])[0] if to_time is not None: to_time = datetime.datetime.fromtimestamp(float(to_time)) from_num = req.query.get('from_num', [None])[0] to_num = req.query.get('to_num', [None])[0] if from_num is not None: from_num = int(from_num) if to_num is not None: to_num = int(to_num) last = req.query.get('last', [None])[0] chunk_num = req.query.get('chunk', [None])[0] result = [] manager = core.Instance.Manager if last is not None: last = int(last) for data_item_oid in data_items: data_item = manager.data_items.findById(data_item_oid) chunk = [x for x in data_item.get_last(last, chunk_num)] result.append(dict(data_item=data_item_oid, data=chunk)) res.send_json(result) return if chunk_num is not None: last = manager.data_items.MaxChunkSize for data_item_oid in data_items: data_item = manager.data_items.findById(data_item_oid) chunk = [x for x in data_item.get_last(last, chunk_num)] result.append(dict(data_item=data_item_oid, data=chunk)) res.send_json(result) return if from_num is not None or to_num is not None: for data_item_oid in data_items: data_item = manager.data_items.findById(data_item_oid) chunk = [x for x in data_item.get_data_by_num(from_num=from_num, to_num=to_num)] result.append(dict(data_item=data_item_oid, data=chunk)) res.send_json(result) return for data_item_oid in data_items: data_item = manager.data_items.findById(data_item_oid) chunk = [x for x in data_item.get_data(_from=from_time, _to=to_time)] result.append(dict(data_item=data_item_oid, data=chunk)) res.send_json(result) ############################################################################ # HTTP GET data/chunk/count REQUEST # # query params: # data_item: data_items object_id string # from : datetime string, count from <from> timestamp # to : datetime string, count to <to> timestamp # returns: # count of defined data records of <data_item> # int # or # [{'data_item': <data_item>, 'count': count}] ############################################################################ def count(req, res): data_items = req.query.get('data_item') if data_items is None: raise defs.errors.LemonAttributeError('"data_item" is empty') from_time = req.query.get('from', [None])[0] if from_time is not None: from_time = datetime.datetime.fromtimestamp(float(from_time)) to_time = req.query.get('to', [None])[0] if to_time is not None: to_time = datetime.datetime.fromtimestamp(float(to_time)) result = [] manager = core.Instance.Manager for data_item_oid in data_items: data_item = manager.data_items.findById(data_item_oid) result.append(dict(data_item=data_item_oid, count=data_item.count_data(_from=from_time,_to=to_time))) if len(result) == 1: res.send_content(result[0]['count']) else: res.send_json(result)
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/web_controllers/data/chunk.py", "copies": "1", "size": "4444", "license": "mit", "hash": -3822373151258991600, "line_mean": 40.1481481481, "line_max": 109, "alpha_frac": 0.5661566157, "autogenerated": false, "ratio": 3.4745895230648944, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9538903357673632, "avg_score": 0.00036855621825233794, "num_lines": 108 }
__author__ = 'Andrean' import core import defs.errors def get(req, res): entities_id_list = req.query.get('entity_id') names = req.query.get('name') populate = req.query.get('populate',[]) manager = core.Instance.Manager if entities_id_list is not None: entities_id = [x.id for x in manager.entities if x['entity_id'] in entities_id_list] items = manager.data_items.list_instances({'entity': {'$in': entities_id}}) if names is not None: items = [x for x in items if x['name'] in names] res.send_json([x.populate(*populate) for x in items]) return if names is not None: res.send_json([x.populate(*populate) for x in manager.data_items.list_instances({'name': {'$in': names}})]) return res.send_json([x.populate(*populate) for x in manager.data_items.list_instances()]) def set(req, res): if req.json is None: raise defs.errors.LemonAttributeError('empty body') _id = req.json.get('_id') manager = core.Instance.Manager if _id is not None: item = manager.data_items.findById(_id) else: item = manager.data_items.add_new(**(req.json)) if item is None: raise defs.errors.LemonException("data_item not exists") for k, v in req.json.items(): if k is not '_id': item[k] = v res.send_json(item.id) def remove(req, res): _ids = req.query.get('id') removed = [] if _ids is not None: manager = core.Instance.Manager for x in _ids: item = manager.data_items.findById(x) if item is None: removed.append({'id': x, 'status': "not found"}) break item.remove() removed.append({'id': x, 'status': "not found"}) res.send_json(removed)
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/web_controllers/data/items.py", "copies": "1", "size": "1815", "license": "mit", "hash": -2798767439631429600, "line_mean": 32.6111111111, "line_max": 115, "alpha_frac": 0.5878787879, "autogenerated": false, "ratio": 3.4903846153846154, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9572210713578793, "avg_score": 0.0012105379411645632, "num_lines": 54 }
__author__ = 'Andrean' import core import defs.errors def get(req, res): names = req.query.get('name') short = req.query.get('short', ["0"])[0] contractors = [] manager = core.Instance.Manager if names is not None: contractors.extend(manager.contractors.list_instances({'name': { '$in': names}})) else: contractors.extend(manager.contractors.list_instances()) if short == "1": res.send_json([x.short_view for x in contractors]) return res.send_json([x.data for x in contractors]) def add(req, res): if req.json is not None: name = req.json.get('name') if name is None: raise defs.errors.LemonAttributeError('Missing "NAME"') data = req.json.get('data') if data is None: raise defs.errors.LemonAttributeError('Missing "DATA"') manager = core.Instance.Manager contractor = manager.contractors.add_new(name, data) res.send_json({'name': contractor['name']}) return raise defs.errors.LemonAttributeError('body is not JSON') def remove(req, res): names = req.query.get('name') if names is None: raise defs.errors.LemonAttributeError('name is not found') manager = core.Instance.Manager removed = {} for name in names: removed[name] = None contractor = manager.contractors.findByName(name) if contractor is not None: contractor.remove() removed[name] = True res.send_json(removed)
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/web_controllers/contractors.py", "copies": "1", "size": "1520", "license": "mit", "hash": -5668473774393873000, "line_mean": 30.6875, "line_max": 89, "alpha_frac": 0.6203947368, "autogenerated": false, "ratio": 3.7438423645320196, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48642371013320196, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import core def select_properties(obj, properties): if properties is None: return obj if type(obj) is list: return [select_properties(x, properties) for x in obj] if type(obj) is dict: new_obj = {} for p in properties: if p in obj: new_obj[p] = obj[p] return new_obj return obj ################################################################ # # # """ # Agents management controllers: # get_agents: get list of agents by filter # filter: # tag # """ ################################################################ def get_agents(req, res): tags = req.query.get('tag') populate = req.query.get('populate', []) properties = req.query.get('property') manager = core.Instance.Manager if tags is not None: agents = manager.agents.findByTag(tags) else: agents = manager.agents.list_instances() res.send_json(select_properties([x.populate(*populate) for x in agents], properties)) def get_agent_commands(req, res): tags = req.query.get('tag') status = req.query.get('status', [None])[0] manager = core.Instance.Manager if tags is not None: agents = manager.agents.findByTag(tags) else: agents = manager.agents.list_instances() res.send_json([x.commands.dict(status) for x in agents])
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/web_controllers/agents.py", "copies": "1", "size": "1417", "license": "mit", "hash": 6515910810929958000, "line_mean": 27.34, "line_max": 89, "alpha_frac": 0.5455187015, "autogenerated": false, "ratio": 3.9035812672176307, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.994798885760652, "avg_score": 0.00022222222222222223, "num_lines": 50 }
__author__ = 'Andrean' import core ################################################################ # # # """ # Entities management controllers: # add: add new entity to database # remove: remove entity from database # get: get list of entities by filter # filter: # tags # modify: modify one entity # """ ################################################################ """ HTTP GET entities REQUEST CONTROLLER query params: :parameter agent_id :parameter entity_id :parameter tag :parameter populate. What fields must been populated :return entities list """ def get(req, res): agent_ids = req.query.get('agent_id') entity_ids = req.query.get('entity_id') tags = req.query.get('tag') populate = req.query.get('populate',[]) manager = core.Instance.Manager if entity_ids is not None: entities = [] for entity_id in entity_ids: entity = manager.entities.findByEntityId(entity_id) if entity is not None: entities.append(entity) res.send_json([x.populate(*populate) for x in entities]) return if agent_ids is not None: entities = [] for agent_id in agent_ids: agent = manager.agents.findByAgentId(agent_id) if agent is not None: entities.extend(agent.entities) res.send_json([x.populate(*populate) for x in entities]) return if tags is not None: entities = [] for agent in manager.agents.findByTag(tags): entities.extend(agent.entities) res.send_json([x.populate(*populate) for x in entities]) return res.send_json([x.populate(*populate) for x in manager.entities.list_instances()]) """ HTTP PUT new entity REQUEST CONTROLLER query params: :parameter name :parameter description :parameter agent_id :return entity_id of new entity """ def add(req, res): agent_id = req.query.get('agent_id', [None])[0] name = req.query.get('name', [None])[0] description = req.query.get('description', [None])[0] manager = core.Instance.Manager result = manager.entities.add_new(agent_id, name, description) if result is not None: res.send_json({'entity_id': result['entity_id']}) return res.send_content('', code=401) """ HTTP POST modify entity REQUEST CONTROLLER body params: :parameter agent_id :parameter name :parameter description :return HTTP STATUS """ def modify(req, res): if req.json is not None: applied = {} # result is dict of applied items manager = core.Instance.Manager for modify_args in req.json: entity_id = modify_args.get('entity_id') entity = manager.entities.findByEntityId(entity_id) if entity is not None: applied[entity_id] = {} name = modify_args.get('name') description = modify_args.get('description') agent_id = modify_args.get('agent_id') if name is not None: entity['info']['name'] = name applied[entity_id]['name'] = True if description is not None: entity['info']['description'] = description applied[entity_id]['name'] = True if agent_id is not None: agent = manager.agents.findByAgentId(agent_id) if agent is not None: entity.set_agent(agent) applied[entity_id]['agent_id'] = True entity.save() res.send_json(applied) return res.send_content("request error", code=401) """ HTTP DELETE delete entity REQUEST CONTROLLER body params: :parameter entity_id :return HTTP STATUS """ def remove(req, res): entities = req.query.get('entity_id', []) manager = core.Instance.Manager removed = {} for entity_id in entities: entity = manager.entities.findByEntityId(entity_id) removed[entity_id] = None if entity is not None: entity.remove() removed[entity_id] = True res.send_json(removed) ################################################################
{ "repo_name": "Andrean/lemon.apple", "path": "server/controllers/web_controllers/entitiies.py", "copies": "1", "size": "4339", "license": "mit", "hash": -3356265618147779000, "line_mean": 32.3846153846, "line_max": 85, "alpha_frac": 0.5561189214, "autogenerated": false, "ratio": 4.2373046875, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0078025711283820215, "num_lines": 130 }
__author__ = 'Andrean' import datetime import defs.cmd import uuid import hashlib from defs.search import binary_search from bson.objectid import ObjectId from bson.binary import Binary from models.base import BaseModel, BaseSchema def SetupSchema(): AgentSchema.setup() EntitySchema.setup() DataItemSchema.setup() DataChunkSchema.setup() ContractorSchema.setup() TriggerSchema.setup() def Le(**kwargs): kwargs['_lemon_field'] = True return kwargs ####################################################################### # describe ModelSchemas # # System fields: # _lemon_field: if True shows up that dict field is system definition of field # type: Class reference. Shows type of that field # ref: Reference to another Model # unique: Ensures index in MongoDB with UNIQUE: TRUE # index: if True Ensures index in MongoDB on that field in PyMongo.ASCENDING order # default: default value of field # ####################################################################### class AgentSchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'agent_id': uuid.UUID, 'name': str, 'tags': [str], 'entities': [Le(type=ObjectId, ref=Entity)], '_sysinfo': { 'network_address': str, 'last_connect': datetime.datetime, 'added_at': datetime.datetime } } class EntitySchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'entity_id': uuid.UUID, 'agent': Le(type=ObjectId, ref=Agent), 'info': { 'name': Le(type=str, unique=True), 'description': str, '_added_at': datetime.datetime, '_last_check': datetime.datetime, 'status': str }, 'data_items': [ Le(type=ObjectId, ref=DataItem) ] } class DataItemSchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'name': str, 'entity': Le(type=ObjectId, ref=Entity), 'data_type': str, 'contractor': Le(type=ObjectId, ref=Contractor), 'trigger': Le(type=ObjectId, ref=Trigger), 'data': [Le(type=ObjectId, ref=DataChunk, index=True)] } # Better decision is storing Data in PostgreSQL DB # todo: use postgresql for this data schema class DataChunkSchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'data_item': Le(type=ObjectId, ref=DataItem, index=True), 'num': Le(type=int, default=0), 'size': Le(type=int, default=0), 'chunk': [ { 'data': dict, 'timestamp': datetime.datetime } ], '_firstTimestamp': datetime.datetime, '_endTimestamp': datetime.datetime } class ContractorSchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'name': Le(type=str, unique=True), 'data': Binary, # binary object '_type': str, '_hash': Le(type=str, unique=True) } class TriggerSchema(BaseSchema): @classmethod def setup_schema(cls): cls._schema = { 'name': Le(type=str, unique=True), 'trigger': str } ####################################################################### ####################################################################### # describe models # ####################################################################### ####################################################################### # Model "Agent" ####################################################################### class Agent(BaseModel): """ """ Schema = AgentSchema Collection = 'agents' _index_agentid = None _index_tags = None def __init__(self, item=None): super().__init__(item) self.virtual['commands'] = defs.cmd.Commands() @property def commands(self): return self.virtual['commands'] @classmethod def add_new(cls, agent_id, client_host): agent = cls() agent['agent_id'] = uuid.UUID(agent_id) agent['tags'].append(str(agent['agent_id'])) agent['_sysinfo']['network_address'] = client_host agent['_sysinfo']['added_at'] = datetime.datetime.now() agent['_sysinfo']['last_connect'] = datetime.datetime.now() return agent.save() @classmethod def add_command(cls, cmd, tags, args=[]): command = defs.cmd.Command(cmd, tags, args) for agent in cls.findByTag(tags): agent.commands.add(command) @classmethod def create_indexes(cls): if cls.Instances is not None: pointer = 0 cls._index_tags = {} cls._index_agentid = {} for agent in cls.Instances: cls._index_agentid[agent['agent_id']] = pointer for tag in agent['tags']: if not cls._index_tags.__contains__(tag): cls._index_tags[tag] = [] cls._index_tags[tag].append(pointer) @classmethod def findByAgentId(cls, agent_id): if type(agent_id) is not uuid.UUID: agent_id = uuid.UUID(agent_id) try: if cls._index_agentid is not None: return cls.Instances[cls._index_agentid[agent_id]] conn = cls.get_connection() item = conn[cls.Collection].find_one({'agent_id': agent_id}) if item is not None: return cls(item) except: pass return None @classmethod def findByTag(cls, tags): try: if cls._index_tags is not None: agents = [] pointers = [] for tag in tags: index = cls._index_tags.get(tag) if index is not None: for pointer in index: if pointer not in pointers: pointers.append(pointer) for pointer in pointers: agents.append(cls.Instances[pointer]) return agents conn = cls.get_connection() return [cls(agent) for agent in conn[cls.Collection].find({'tags':{'$in': tags}})] except: pass return [] @property def entities(self): return [Entity.findById(e) for e in self['entities']] def add_entity(self, entity): _id = None if isinstance(entity, Entity): _id = entity.id if type(entity) is ObjectId: _id = entity if _id is not None and _id not in self['entities']: self['entities'].append(_id) self.save() def del_entity(self, entity): if isinstance(entity, Entity): _id = entity.id if type(entity) is ObjectId: _id = entity if _id is not None and _id in self['entities']: self['entities'].remove(_id) self.save() ####################################################################### # Model "Entity" ####################################################################### class Entity(BaseModel): Schema = EntitySchema Collection = 'entities' _index_entity_id = None @property def agent(self): if self['agent'] is not None: return Agent.findById(self['agent']) return None def set_agent(self, agent): _id = None if isinstance(agent, Agent): _id = agent.id if type(agent) == ObjectId: _id = agent last_agent_id = self['agent'] if last_agent_id is not None: agent = Agent.findById(last_agent_id) if agent is not None: agent.del_entity(self) self['agent'] = _id agent = Agent.findById(_id) if agent is not None: agent.add_entity(self) self.save() @classmethod def add_new(cls, agent_id, name, description): entity = cls() entity['entity_id'] = uuid.uuid4() entity['info']['_added_at'] = datetime.datetime.now() entity['info']['name'] = str(name) entity['info']['description'] = str(description) entity['info']['status'] = 'unknown' # todo: use trigger to field 'agent' for correct assignment agent entity.save() # save to get ObjectId entity.set_agent(Agent.findByAgentId(agent_id)) entity.save() return entity @classmethod def create_indexes(cls): if cls.Instances is not None: pointer = 0 cls._index_entity_id = {} for e in cls.Instances: cls._index_entity_id[e['entity_id']] = pointer @classmethod def findByEntityId(cls, entity_id): if type(entity_id) is not uuid.UUID: entity_id = uuid.UUID(entity_id) try: if cls._index_entity_id is not None: return cls.Instances[cls._index_entity_id[entity_id]] conn = cls.get_connection() item = conn[cls.Collection].find_one({'entity_id': entity_id}) if item is not None: return cls(item) except: pass return None def remove(self): agent = self.agent if agent is not None: agent.del_entity(self) super().remove() def addDataItem(self, item): if isinstance(item, DataItem): if item.id not in self['data_items']: self['data_items'].append(item.id) item['entity'] = self.id item.save() self.save() return item if type(item) is dict: item = DataItem.add_new(item['name'], item['data_type'], self, item['contractor']) self.save() return item return None def delDataItem(self, item, full_delete=False): _id = None if isinstance(item, ObjectId): _id = item if isinstance(item, DataItem): _id = item.id if _id is not None: if _id in self['data_items']: self['data_items'].remove(_id) item = DataItem.findById(_id) item['entity'] = None item.save() self.save() if full_delete is True: item.remove() class DataItem(BaseModel): Schema = DataItemSchema Collection = 'data_items' MaxChunkSize = 256 # max size of DataChunk document field 'chunk' @classmethod def add_new(cls, name, data_type, entity, contractor): data_item = cls() data_item['name'] = name data_item['data_type'] = data_type if isinstance(contractor, Contractor): data_item['contractor'] = contractor.id elif isinstance(contractor,ObjectId): data_item['contractor'] = contractor else: raise TypeError("contractor is not Contractor or ObjectId") e = None if isinstance(entity, BaseModel): data_item['entity'] = entity.id e = Entity.findById(entity.id) if isinstance(entity, ObjectId): data_item['entity'] = entity e = Entity.findById(entity) data_item.save() if e is not None: e.addDataItem(data_item) return data_item def remove(self): if self['entity'] is not None: Entity.findById(self['entity']).delDataItem(self) super().remove() def verify_hash(self, hash): assert(isinstance(self['contractor'], ObjectId)) conn = self.get_connection() value = conn[Contractor.Collection].find_one(self['contractor'], fields=['_hash']) assert(value is not None) return hash == str(value.get('_hash')) def add_data(self, data, timestamp): if len(self['data']) == 0: data_chunk = DataChunk.add_new(self.id, 0) data_chunk['_firstTimestamp'] = timestamp self['data'].append(data_chunk.id) self.save() else: data_chunk = DataChunk.findById(self['data'][-1]) assert(isinstance(data_chunk, DataChunk)) if data_chunk['size'] >= self.MaxChunkSize: num = data_chunk['num'] data_chunk = DataChunk.add_new(self.id, num + 1) data_chunk['_firstTimestamp'] = timestamp self['data'].append(data_chunk.id) self.save() data_chunk.insert(data, timestamp) def count_data(self, **kwargs): pos = self.__getelements_pos(**kwargs) if pos is None: return 0 count = (pos[1][0] - pos[0][0])*256 + pos[1][1] - pos[0][1] return count def get_data_by_num(self, from_num, to_num): if from_num >= to_num: return [] from_index_2 = from_num // self.MaxChunkSize from_index_1 = from_num - from_index_2 * self.MaxChunkSize to_index_2 = to_num // self.MaxChunkSize to_index_1 = to_num - to_index_2 * self.MaxChunkSize return self.__get_by_pos([[from_index_2, from_index_1], [to_index_2, to_index_1]]) def get_last(self, last, chunk_num=None): counter = 0 if chunk_num is not None: chunk_num = int(chunk_num) if chunk_num >= len(self['data']): return [] data_chunk = DataChunk(self['data'][chunk_num]) chunk = reversed(data_chunk['chunk']) for record in chunk: if counter < last: yield [record['data'], record['timestamp']] counter += 1 else: return return for item in reversed(self['data']): if counter >= last: break data_chunk = DataChunk(item) chunk = reversed(data_chunk['chunk']) for record in chunk: if counter < last: yield [record['data'], record['timestamp']] counter += 1 def __get_by_pos(self, pos): if pos is None: return [] try: chunk = DataChunk(self['data'][pos[0][0]]) if pos[0][0] == pos[1][0]: for i, item in enumerate(chunk['chunk']): if i < pos[0][1] or i > pos[1][1]: continue yield [item['data'], item['timestamp']] else: for i, item in enumerate(chunk['chunk']): if i < pos[0][1]: continue yield [item['data'], item['timestamp']] for i in range(pos[0][0]+1, pos[1][0]-1): chunk = DataChunk(self['data'][i]) for item in chunk['chunk']: yield [item['data'], item['timestamp']] chunk = DataChunk(self['data'][pos[1][1]]) for i, item in enumerate(chunk['chunk']): if i > pos[1][1]: break yield [item['data'], item['timestamp']] except IndexError: return [] def get_data(self, **kwargs): pos = self.__getelements_pos(**kwargs) return self.__get_by_pos(pos) def __getelements_pos(self, **kwargs): if len(self['data']) == 0: return None from_time = kwargs.get('_from') if from_time is None: from_time = datetime.datetime.min to_time = kwargs.get('_to') if to_time is None: to_time = datetime.datetime.max assert(isinstance(from_time, datetime.datetime)) assert(isinstance(to_time, datetime.datetime)) if from_time > to_time: return None from_oid_pos = binary_search(self['data'], from_time, self.__comparator_chunks, strict=False) to_oid_pos = binary_search(self['data'], to_time, self.__comparator_chunks, strict=False) # last timestamp in data is less than from or first timestamp is greater than to if from_oid_pos == 0.5 or to_oid_pos == -0.5: return None if from_oid_pos == -0.5: from_oid_pos = 0 if to_oid_pos == 0.5: to_oid_pos = len(self['data']) -1 chunk_from = DataChunk(self['data'][from_oid_pos]) from_num = binary_search(chunk_from['chunk'], from_time, self.__comparator_data_in_chunk, strict=False) if from_num < 0: from_num = 0 chunk_to = DataChunk(self['data'][to_oid_pos]) to_num = binary_search(chunk_to['chunk'], to_time, self.__comparator_data_in_chunk, strict=False) if to_num == 0.5: to_num = chunk_to['size'] - 1 return [[from_oid_pos, from_num], [to_oid_pos, to_num]] @staticmethod def __comparator_chunks(chunk_oid, timestamp): chunk = DataChunk(chunk_oid) if chunk['_firstTimestamp'] > timestamp: return 1 if chunk['_endTimestamp'] < timestamp: return -1 return 0 @staticmethod def __comparator_data_in_chunk(el, timestamp): if el['timestamp'] > timestamp: return 1 if el['timestamp'] < timestamp: return -1 return 0 class DataChunk(BaseModel): Schema = DataChunkSchema Collection = 'data' def __getitem__(self, item): if self._id is None: return None conn = self.get_connection() value = conn[self.Collection].find_one(self._id, fields=[item]) return value.get(item) def __setitem__(self, key, value): if self._id is None: self._data[key] = value return conn = self.get_connection() conn[self.Collection].update({'_id': self._id}, {'$set': {key: value}}) @property def data(self): self.force_load() return self._data def load(self, _id=None): self._id = _id def force_load(self, _id=None): super().load(_id) @classmethod def add_new(cls, data_item, num): chunk = cls() chunk.force_load() if isinstance(data_item, DataItem): chunk['data_item'] = data_item.id elif isinstance(data_item, ObjectId): chunk['data_item'] = data_item else: raise TypeError("{0} is not ObjectId or DataItem".format(type(data_item))) chunk['num'] = num chunk.save() return chunk def insert(self, data, timestamp): assert(isinstance(timestamp, datetime.datetime)) conn = self.get_connection() conn[self.Collection].update( {'_id': self.id}, { '$inc': {'size': 1}, '$push': {'chunk':{'data': data, 'timestamp': timestamp}}, '$set': {'_endTimestamp': timestamp} } ) class Contractor(BaseModel): Schema = ContractorSchema Collection = 'contractors' _index_name = None @classmethod def add_new(cls, name, binary_data): contractor = cls() contractor['name'] = name contractor['data'] = Binary(binary_data) contractor['_type'] = "py" contractor['_hash'] = cls.get_md5(contractor['data']) contractor.save() return contractor @staticmethod def get_md5(data): md5 = hashlib.md5() md5.update(data) return md5.hexdigest() @property def short_view(self): data = self._data.copy() data['size'] = len(self['data']) data['data'] = None return data @classmethod def findByName(cls, name): try: assert(isinstance(name, str)) conn = cls.get_connection() item = conn[cls.Collection].find_one({'name': name}) if item is not None: return cls(item) except: pass return None class Trigger(BaseModel): Schema = TriggerSchema Collection = 'triggers' ####################################################################### ####################################################################### # after describe of models we must # setup schemas ####################################################################### SetupSchema()
{ "repo_name": "Andrean/lemon.apple", "path": "server/models/components.py", "copies": "1", "size": "20639", "license": "mit", "hash": 300086939539760830, "line_mean": 31.4512578616, "line_max": 111, "alpha_frac": 0.5086002229, "autogenerated": false, "ratio": 4.1711802748585285, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001100377041264949, "num_lines": 636 }
__author__ = 'Andrean' import defs.cmd import threading import traceback import sys import logging import time from commands.routes import Routes class CommandManager(threading.Thread): def __init__(self): super().__init__() self.mutex = threading.Lock() self._stop_event = threading.Event() self.command_ready = threading.Event() self._new_commands = threading.Event() self._commands = [] self.router = CommandRouter() def run(self): self.router.load(Routes) while not self._stop_event.is_set(): self._new_commands.wait(1) if self._new_commands.is_set(): with self.mutex: for x in self._commands: if x.status == defs.cmd.CommandStatusEnum.present: CommandHandler(x, self) self._new_commands.clear() def status(self, timeout=None): self.command_ready.wait(timeout) with self.mutex: result = self._commands if self.command_ready.is_set(): l = [] for x in self._commands: if x.status != defs.cmd.CommandStatusEnum.completed and x.status != defs.cmd.CommandStatusEnum.error: l.append(x) self._commands = l self.command_ready.clear() return [x.to_dict() for x in result] def handle(self, commands): if type(commands) == list: if len(commands) > 0: with self.mutex: self._commands.extend([defs.cmd.Command(x) for x in commands]) self._new_commands.set() else: raise ValueError("Unknown type of commands") def stop(self): self._stop_event.set() class CommandHandler(object): def __init__(self, command_obj, manager): self.command = command_obj self.manager = manager self.router = self.manager.router t = threading.Thread(target=self.do, daemon=True) t.start() def do(self): self.set_pending(dict(percent=0, msg='Command started')) self.router.dispatch(self, self.command.cmd) self.set_completed() def set_status(self, status, msg): with self.manager.mutex: self.command.response = msg self.command.status = status def set_pending(self, msg=""): self.set_status(defs.cmd.CommandStatusEnum.pending, msg) def set_error(self, msg=""): self.set_status(defs.cmd.CommandStatusEnum.error, msg) self.manager.command_ready.set() def set_completed(self, msg=""): if self.command.status == defs.cmd.CommandStatusEnum.pending: self.set_status(defs.cmd.CommandStatusEnum.completed, msg) self.manager.command_ready.set() class CommandRouter(object): def __init__(self): self._logger = logging.getLogger('main.'+self.__class__.__name__) self._routes = [] def dispatch(self, handler, command): try: for route in self._routes: if route[0] == command: route[1](handler) return except: handler.set_error(traceback.format_exception(*(sys.exc_info()))) return handler.set_error('Unhandled command: {0}'.format(command)) def load(self, routes): self._routes.extend(routes)
{ "repo_name": "Andrean/lemon.apple", "path": "agent/commands/__init__.py", "copies": "1", "size": "3483", "license": "mit", "hash": 2135814522165558000, "line_mean": 29.2869565217, "line_max": 121, "alpha_frac": 0.5661785817, "autogenerated": false, "ratio": 4.146428571428571, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5212607153128571, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import enum from datetime import datetime from uuid import uuid4 import copy class BaseCommands(enum.Enum): get_info = "_.get_info" class CommandStatusEnum(enum.IntEnum): error = -1 present = 0 submit = 1 pending = 2 completed = 3 class Command(object): def __init__(self, cmd=None, tags=[], args=[]): if type(cmd) is dict: self.from_dict(cmd) if type(cmd) is str: self.id = uuid4() self.cmd = cmd self.tags = tags self.args = args self.time = datetime.now() self.status = CommandStatusEnum.present self.response = None def from_dict(self, item): self.id = copy.deepcopy(item['id']) self.cmd = copy.deepcopy(item['cmd']) self.tags = copy.deepcopy(item['tags']) self.args = copy.deepcopy(item['args']) self.time = copy.deepcopy(item['time']) self.status = copy.deepcopy(item['status']) self.response = copy.deepcopy(item['response']) def to_dict(self): _dict = { 'id': self.id, 'cmd': self.cmd, 'tags': self.tags, 'time': self.time, 'args': self.args, 'status': self.status, 'response': self.response } return _dict class Commands(object): """ Keeps commands dictionary: { 'command_id': Command } """ def __init__(self): self.commands = {} def __getitem__(self, cmd_id): return self.commands.get(cmd_id) def __setitem__(self, cmd_id, cmd): self.commands[cmd_id] = cmd def __delitem__(self, cmd_id): self.delete(cmd_id) def add(self, command): cmd_dict = command.to_dict() self_command = Command() self_command.from_dict(cmd_dict) self.commands[command.id] = self_command def find(self, status=None): return [cmd for cmd in self.commands.values() if cmd.status == status] def delete(self, cmd_id): self.commands.pop(cmd_id)
{ "repo_name": "Andrean/lemon.apple", "path": "agent/defs/cmd.py", "copies": "1", "size": "2126", "license": "mit", "hash": 8942529644034486000, "line_mean": 23.7325581395, "line_max": 78, "alpha_frac": 0.5432737535, "autogenerated": false, "ratio": 3.736379613356766, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4779653366856766, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import logging from modules import BaseAgentModule class Core(object): ''' Core class. It keeps all working instances of Lemon agent Core is Singleton ''' Instance = None Config = {} # core components modules = {} def __new__(cls, *args, **kwargs): if cls.Instance is None: cls.Instance = super(Core, cls).__new__(cls) return cls.Instance def __init__(self, config=None): self.Config = config self._logger = logging.getLogger('main.Core') self._order = [] def add(self, module): instance = module(self) assert isinstance(instance, BaseAgentModule) self.modules[instance.Name] = instance self._order.append(instance.Name) def start(self): self._logger.info('Starting modules') for module_name in self._order: self._logger.debug('Starting module {0}'.format(module_name)) self.modules[module_name].start() def stop(self): self._logger.info('Stopping modules') for module in self.modules.values(): self._logger.debug('Stopping module {}'.format(module.Name)) module.stop() @property def Storage(self): return self.modules.get('Storage') @property def Client(self): return self.modules.get('Client') @property def Manager(self): return self.modules.get('Manager')
{ "repo_name": "Andrean/lemon.apple", "path": "agent/core.py", "copies": "1", "size": "1457", "license": "mit", "hash": -7054676136920901000, "line_mean": 25.5090909091, "line_max": 73, "alpha_frac": 0.5998627316, "autogenerated": false, "ratio": 4.162857142857143, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0004329004329004329, "num_lines": 55 }
__author__ = 'Andrean' import logging from urllib.parse import urlsplit from urllib.parse import parse_qs import json import types import re import traceback import sys import pymongo.errors import bson.json_util import defs.errors as errors import controllers.base as BaseController import routes.web_interface as web_interface import routes.agent_interface as agent_interface ##################################################################################### # Functions for make request and response references ##################################################################################### def MakeRequest(requestHandler, path): requestHandler.query = parse_qs((urlsplit(path)).query) return requestHandler def send_content(self, content, code=200, headers={}): self.send_response(code) if not isinstance(content, str): content = str(content) if 'Content-Type' not in headers.keys(): self.send_header('Content-Type', 'text/plain;charset=utf-8') if 'Content-Length' not in headers.keys(): self.send_header('Content-Length', len(content)) for header, value in headers.items(): self.send_header(header, value) self.end_headers() self.wfile.write(bytes(content, 'utf-8')) def send_json(self, content, code=200, headers={}): if 'Content_Type' not in headers.keys(): headers['Content-Type'] = 'application/json; charset=utf8' self.send_content( json.dumps(content, default=bson.json_util.default), code, headers ) def MakeResponse(requestHandler): requestHandler.send_content = types.MethodType( send_content, requestHandler ) requestHandler.send_json = types.MethodType( send_json, requestHandler ) requestHandler.data = None requestHandler.text = None requestHandler.json = None requestHandler = ParseBody(requestHandler) return requestHandler def ParseBody(req): # get content type. If not found use default "text/plain" with encoding "utf-8" content_type = req.headers.get('Content-Type','text/plain; charset=utf-8') # if Content-Length is not found - do not read body content_length = int(req.headers.get('Content-Length', 0)) match = re.match(r"([\w/-]+);\s*charset=([\w-]+)", content_type) content_charset = 'utf-8' if match: content_charset = match.group(2) content_type = match.group(1) body = req.rfile.read(content_length) if content_type == 'text/plain': req.text = str(body, content_charset) req.data = body return req if content_type == 'application/json': req.json = json.loads(str(body, content_charset), object_hook=bson.json_util.object_hook) return req if content_type == 'application/octet-stream': req.data = body return req ##################################################################################### class Router(object): def __init__(self, name): self.Name = name self.Methods = ['GET', 'POST', 'PUT', 'HEAD', 'DELETE'] self._logger = logging.getLogger('main.'+self.Name) self._routes = [] def dispatch(self, handler, method, path): try: for rule in self._routes: if rule['method'] == method and re.search(rule['pattern'], path): rule['action']( MakeRequest(handler, path), MakeResponse(handler) ) return except errors.BaseLemonException as e: self._logger.exception(e) handler.send_content = types.MethodType( send_content, handler ) handler.send_json = types.MethodType( send_json, handler ) handler.send_json({'error': e.message}) except pymongo.errors.PyMongoError as e: self._logger.exception(e) handler.send_content = types.MethodType( send_content, handler ) handler.send_json = types.MethodType( send_json, handler ) handler.send_json({'error': {'code': e.code, 'message':e.details['err']}}) except: self._logger.error('{0}\n{1}'.format(self.Name, ''.join(traceback.format_exception(*(sys.exc_info()))))) # HTTP 500 Handler BaseController.get_500(handler) def add_route(self, method, url_pattern, action): self._routes.append( {'pattern': url_pattern, 'action': action, 'method': method} ) def load(self, routes): self._logger.debug('Loading routes') for rule in routes: try: self.add_route(*rule) except Exception as e: self._logger.exception(e) # after them load plugins routes # after all routes add rule HTTP 404 for .* path for method in self.Methods: self.add_route(method, r'.*', BaseController.get_404) class AgentRouter(Router): def __init__(self): super().__init__('AGENT ROUTER') def load(self): super().load(agent_interface.ROUTES) class WebRouter(Router): def __init__(self): super().__init__('WEB ROUTER') def load(self): super().load(web_interface.ROUTES)
{ "repo_name": "Andrean/lemon.apple", "path": "server/router.py", "copies": "1", "size": "5187", "license": "mit", "hash": -7815196142279639000, "line_mean": 35.7943262411, "line_max": 116, "alpha_frac": 0.5984191247, "autogenerated": false, "ratio": 4.065047021943574, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.005153797452224834, "num_lines": 141 }
__author__ = 'Andrean' import os import yaml import logging.config class Config(object): ''' Class keeps all configuration of lemon server Has methods for loading configuration ''' Storage = {} Server = {} Manager = {} root = {} def __init__(self, file = None): self.file = file self.loggingFile = None def Load(self, filePath = None): if filePath is not None: self.file = filePath self.root = yaml.load(open(self.file)) self.Storage = self.root.get('STORAGE', {}) self.Server = self.root.get('SERVER', {}) def LoadLogging(self, loggingFilePath): self.loggingFile = loggingFilePath file = yaml.load(open(self.loggingFile)) for item in file['handlers'].values(): if item.__contains__('filename'): os.makedirs(os.path.dirname(item['filename']), exist_ok = True) logging.config.dictConfig(file) def GetSection(self, name): return self.root.get(name, {})
{ "repo_name": "Andrean/lemon.apple", "path": "server/config.py", "copies": "1", "size": "1037", "license": "mit", "hash": 5385112532223675000, "line_mean": 27.0540540541, "line_max": 79, "alpha_frac": 0.5911282546, "autogenerated": false, "ratio": 3.9884615384615385, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5079589793061539, "avg_score": null, "num_lines": null }
__author__ = 'Andrean' import pymongo import pymongo.errors from modules.base import BaseServerModule class Storage(BaseServerModule): def __init__(self, _core): super().__init__(_core, 'Storage') self._logger.info("Created") self._client = None self._connection = None def start(self): self.connect() def connect(self): self._logger.debug("Connecting to database") database = self._config.get('database', {}) uri = database.get("uri") host = database.get('host','localhost') port = database.get('port',27017) db = database.get('db') try: if uri is not None: self._client = pymongo.MongoClient(uri) else: self._client = pymongo.MongoClient(host, port) except pymongo.errors.ConnectionFailure as e: self._logger.error("Cannot connect to mongodb", e) return if db is None: self._logger.error('[config error] parameter "db" not found') try: self._connection = self._client[db] except pymongo.errors.InvalidName: self._logger.error("Invalid Database name {0}".format(db)) return self._logger.info('Storage successfully connected to db "{0}"'.format(db)) return True def stop(self): pass @property def connection(self): if self._client is not None and self._client.alive(): return self._connection self._logger.info('Connection error to database. Try to reconnect') if self.connect(): return self._connection
{ "repo_name": "Andrean/lemon.apple", "path": "server/modules/storage.py", "copies": "1", "size": "1663", "license": "mit", "hash": -8601493709911638000, "line_mean": 30.9807692308, "line_max": 82, "alpha_frac": 0.5814792544, "autogenerated": false, "ratio": 4.387862796833773, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001672963081095611, "num_lines": 52 }
__author__ = 'Andrean' import yaml import logging import logging.config import os class Config(object): ''' Class keeps all configuration of lemon agent Has methods for loading configuration ''' Storage = {} Client = {} Manager = {} root = {} def __init__(self, file = None): self.file = file self.loggingFile = None def Load(self, filePath = None): if filePath is not None: self.file = filePath self.root = yaml.load(open(self.file)) self.Storage = self.root.get('storage', {}) self.Client = self.root.get('client', {}) def LoadLogging(self, loggingFilePath): self.loggingFile = loggingFilePath file = yaml.load(open(self.loggingFile)) for item in file['handlers'].values(): if item.__contains__('filename'): os.makedirs(os.path.dirname(item['filename']), exist_ok = True) logging.config.dictConfig(file) def GetSection(self, name): return self.root.get(name, {})
{ "repo_name": "Andrean/lemon.apple", "path": "agent/config.py", "copies": "1", "size": "1053", "license": "mit", "hash": 8271679026193397000, "line_mean": 26, "line_max": 79, "alpha_frac": 0.5935422602, "autogenerated": false, "ratio": 3.9886363636363638, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.006932384873561345, "num_lines": 39 }
__author__ = 'Andrean' from models import BaseModel import os import subprocess import hashlib import json import bson.json_util import traceback import sys class Contractor(BaseModel): StorageName = 'contractors' Directory = './contractors' def __init__(self, item=None): super().__init__(item) self._item['hash'] = None self._set_exec() os.makedirs(self.Directory, exist_ok=True) def save(self): super().save() self._item['path'] = os.path.join( os.path.abspath(self.Directory), "{0}.{1}.{2}".format(str(self.id), self.name, self.type) ) if 'data' in self._item: with open(self.path, 'wb') as f: f.write(self._item.get('data')) self._item.pop('data') def delete(self): if self.id in self.Instances: super().delete() if os.path.exists(self.path): os.remove(self.path) def kill(self): """ Kill all running instances of that contractor :return: """ pass def exec(self, _args=[]): """ Process returns JSON string. Contractor returns object of this JSON :param _args: :return: dict object """ if os.path.exists(self.path) is not True: return process = self._exec(_args) try: outs, err = process.communicate(timeout=20) except subprocess.TimeoutExpired: process.kill() _, _ = process.communicate() return dict(error="timeout expired") except: return dict(error=''.join(traceback.format_exception(*(sys.exc_info())))) if process.returncode != 0: return dict(error="return code: {0}".format(process.returncode),msg=outs) try: result = json.loads(outs.strip(), encoding=sys.getdefaultencoding(), object_hook=bson.json_util.object_hook) except: result=dict(error="wrong format") finally: return result def _set_exec(self): if self.type == 'py': self._exec = self._exec_python def _exec(self, _args): raise NotImplementedError("Exec method not implemented") def _exec_python(self, _args): args = [sys.executable, self.path] args.extend(_args) process = subprocess.Popen( args=args, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) return process @property def id(self): return self._item.get('id') @property def name(self): return self._item.get('name') @property def path(self): return self._item.get('path') @property def type(self): return self._item.get('type') @property def args(self): return self._item.get('args') @property def hash(self): with open(self.path, 'rb') as f: self._item['hash'] = hashlib.md5(f.read()).hexdigest() return self._item.get('hash')
{ "repo_name": "Andrean/lemon.apple", "path": "agent/models/contractor.py", "copies": "1", "size": "3151", "license": "mit", "hash": -5193423621334552000, "line_mean": 25.2583333333, "line_max": 120, "alpha_frac": 0.5483973342, "autogenerated": false, "ratio": 4.055341055341056, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5103738389541056, "avg_score": null, "num_lines": null }
import random import os import numpy as np import math import imp try: imp.find_module('PIL') found = True except ImportError: found = False if found: from PIL import Image from PIL import ImageDraw from PIL import ImageFont else: import Image import ImageDraw import ImageFont debug = 0 float_min = 1e-300 from sys import platform as _platform def get_default_font(): """return the default font of different operator system, highly recommend you prepare the fonts youself instead of using this function""" if _platform == "linux" or _platform == "linux2": # linux, sorry, i dont know much FONT_PATH = '/usr/share/fonts/truetype/droid/DroidSansMono.ttf' elif _platform == "darwin": # OS X FONT_PATH = "/Library/Fonts/hei.ttf" elif _platform == "win32": # Windows... FONT_PATH = "c:/windows/fonts/msyh.ttf" return FONT_PATH def draw_word_cloud(words, width=800, height=600, output_file_name=None, font_path=None): """ Generate the word cloud. Parameters ---------- words : array of tuples A tuple contains the word and its weight. The weight must greate than 0. the weight can be any positive num. width : int (default=800) Width of the canvas. height : int (default=600) Height of the canvas. output_file_name: string (default=None) the path to the output figure name. if the output_file_name is not None, a picture with path output_file_name will be saved. if the output_file_name is None. it will Displays an image. On Unix platforms, it will saves the image to a temporary PPM file, and calls the xv utility. On Windows, it will saves the image to a temporary BMP file, and uses the standard BMP display utility to show it. font_path : string Font path to the font that will be used (OTF or TTF). If the font_path is None then will call the get_default_font to get_default_font to get a default font. Notes ----- """ best_score = 0 best_elements = None for font_scale in [0.1, 0.5, 1, 2, 5, 7, 10, 15, 20, 30, 50]: elements, score, fill_rate, show_rate = fit_words( words, width=width, height=height, margin=2, scale=font_scale) if debug >= 1: print('scale:', font_scale, 'score:', score, 'show_rate:', show_rate, 'fille_rate:', fill_rate) if score > best_score: best_elements, best_score = elements, score if score == 0.0: break draw(best_elements, output_file_name, width=width, height=height, scale=1) def random_color_func(word, font_size, position, orientation): return "hsl(%d" % random.randint(0, 255) + ", 80%, 50%)" def select_orintation(font_size, font_path, canvas_size, word, margin, draw, font): """"choice the orintation for each word""" width, height = canvas_size draw.setfont(font) nontransposed_box_size = draw.textsize(word) transposed_font = ImageFont.TransposedFont(font, orientation=Image.ROTATE_90) draw.setfont(transposed_font) transposed_box_size = draw.textsize(word) box_size = None orientation = None if not check_in_bound((width, height), (transposed_box_size[1] + margin, transposed_box_size[0] + margin)): box_size = nontransposed_box_size orientation = None elif not check_in_bound((width, height), (nontransposed_box_size[1] + margin, nontransposed_box_size[0] + margin)): box_size = transposed_box_size orientation = Image.ROTATE_90 if debug >= 1: print('trans:', transposed_box_size, 'nontrans:', nontransposed_box_size, orientation, box_size) # transpose font optionally if box_size is None: box_size, orientation = random.choice([(nontransposed_box_size, None)]*9 + [(transposed_box_size, Image.ROTATE_90)]) return box_size, orientation def fit_words(words, font_path=None, width=80, height=40, margin=2, prefer_horiz=0.90, scale=5, file_name=None): """Generate the positions for words. Parameters ---------- words : array of tuples A tuple contains the word and its frequency. font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path, but you might not have it. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. margin: int(default=2) prefer_horiz : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. scale : int( default=5) this number is used to scale the font size in case of the font is too small. Notes ----- """ if len(words) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(words)) if font_path is None: font_path = get_default_font() if not os.path.exists(font_path): raise ValueError("The font %s does not exist." % font_path) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) valid_words, font_sizes, positions, orientations = [], [], [], [] #sort the words by weight sum_weight = sum(weight for word, weight in words) words = [(word, weight * 1.0 / sum_weight) for word, weight in words] # start drawing grey image for word, weight in sorted(words, key=lambda x: x[1], reverse=True): # alternative way to set the font size integral = np.asarray(img_grey) font_size = int((weight * height * scale)) font = ImageFont.truetype(font_path, font_size) box_size, orientation = select_orintation(font_size, font_path, (width, height), word, margin, draw, font) # find possible places using integral image: result = query_integral_image(integral, (box_size[0] + margin, box_size[1] + margin)) if result is None: break if debug >= 1: print('font_size', font_size, word, weight, 'orientation:', orientation, 'pos:', result, 'box_size:', box_size) x, y = np.array(result) + margin // 2 #need to reset the font transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) draw.text((y, x), word, fill="white") # store the information valid_words.append((word, weight)) positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) fill_rate = 1.0 * (integral != 0).sum() / (integral.shape[0] * integral.shape[1]) show_rate = len(valid_words) * 1.0 / len(words) score = show_rate * fill_rate if debug >= 3: print(zip(valid_words, font_sizes, positions, orientations)) print('size:', len(valid_words), 'all:', len(words)) if debug >= 1: print('integral sum:', (integral != 0).sum(), 'show_rate:', show_rate, 'fille_rate:', fill_rate, 'score:', score) return zip(valid_words, font_sizes, positions, orientations), score, fill_rate, show_rate def draw(elements, file_name=None, font_path=None, width=80, height=40, scale=1, color_func=random_color_func): if font_path is None: font_path = get_default_font() img = Image.new("RGB", (width, height)) draw = ImageDraw.Draw(img) for (word, weight), font_size, position, orientation in elements: font = ImageFont.truetype(font_path, font_size) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) color = random_color_func(word, font_size * scale, position, orientation) pos = (position[1], position[0]) draw.text(pos, word, fill=color) if file_name is not None: img.save(file_name) else: img.show() if debug >= 3: a = np.asarray(img) for i in range(a.shape[0]): for j in range(a.shape[1]): print(1 if a[i, j].any() else 0), print('\n'), def collision_detect(integral_image, pos, box_size): height, width = integral_image.shape x, y = pos box_width, box_height = box_size #out of the bound if x + box_height >= height or y + box_width >= width: return True if integral_image[x: x + box_height, y: y + box_width].any(): return True return False def get_spiral_function(size): width, height = size e = width * 1.0 / height return lambda t: (t * 1.0 * math.cos(t), e * t * math.sin(t)) def euclid_distance(pos1, pos2): return math.sqrt((pos1[0]-pos2[0])**2 + (pos1[1]-pos2[1])**2) def check_in_bound(size, pos_current): """check the pos_current in the bound or not """ pos_x, pos_y = pos_current[0], pos_current[1] width, height = size if pos_x >= 0 and pos_x < height and pos_y >= 0 and pos_y < width: return True return False def query_integral_image(integral_image, box_size): #print('sum:', integral_image.sum()) height = integral_image.shape[0] width = integral_image.shape[1] box_width, box_height = box_size #area, i, j spiral = get_spiral_function((width, height)) delta = random.choice([1, -1]) t = 0 #pos_begin_x, pos_begin_y = random.randint(0, height-1), random.randint(0, width-1) pos_begin_x, pos_begin_y = \ int((height - box_height) * random.uniform(0.25, 0.75)), int((width - box_width) * random.uniform(0.25, 0.75)) #print('begin:x:y:', pos_begin_x, pos_begin_y, box_size, (width, height), height - box_height) max_distance = euclid_distance((height, width), (0, 0)) while True: #first geenrate a random point on the horizon pos_x, pos_y = spiral(t) pos_x, pos_y = int(pos_x + pos_begin_x + 0.5), int(pos_y + pos_begin_y + 0.5) t += delta #then move it piral if euclid_distance((pos_x, pos_y), (pos_begin_x, pos_begin_y)) >= max_distance: break if not check_in_bound((width, height), (pos_x, pos_y)): continue if not collision_detect(integral_image, (pos_x, pos_y), box_size): if debug >= 3: for i in range(integral_image.shape[0]): for j in range(integral_image.shape[1]): print(1 if integral_image[i, j] != 0 else 0), print('\n'), return pos_x, pos_y return None
{ "repo_name": "knightwu/easy_word_cloud", "path": "easywordcloud/layout_cloud.py", "copies": "1", "size": "10818", "license": "mit", "hash": 5978409888694708000, "line_mean": 32.80625, "line_max": 124, "alpha_frac": 0.6112035496, "autogenerated": false, "ratio": 3.468419365181148, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4579622914781148, "avg_score": null, "num_lines": null }
import os import re from .layout_cloud import * STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) def process_text(text, max_features=200, stopwords=None): """Splits a long text into words, eliminates the stopwords and returns (words, counts) which is necessary for make_wordcloud(). Parameters ---------- text : string The text to be processed. max_features : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ if stopwords is None: stopwords = STOPWORDS d = {} for word in re.findall(r"\w[\w']*", text): word_lower = word.lower() if word_lower in stopwords: continue # Look in lowercase dict. if word_lower in d: d2 = d[word_lower] else: d2 = {} d[word_lower] = d2 # Look in any case dict. if word in d2: d2[word] += 1 else: d2[word] = 1 d3 = {} for d2 in d.values(): # Get the most popular case. first = sorted(d2.items(), key=lambda x: x[1], reverse=True)[0][0] d3[first] = sum(d2.values()) words = sorted(d3.items(), key=lambda x: x[1], reverse=True) words = words[:max_features] maximum = float(max(d3.values())) for i, (word, count) in enumerate(words): words[i] = word, count/maximum return words
{ "repo_name": "knightwu/easy_word_cloud", "path": "easywordcloud/__init__.py", "copies": "1", "size": "1886", "license": "mit", "hash": 3344796932879659000, "line_mean": 25.5633802817, "line_max": 88, "alpha_frac": 0.5572640509, "autogenerated": false, "ratio": 3.6339113680154145, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4691175418915414, "avg_score": null, "num_lines": null }
from random import Random import os import re import sys import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", "/usr/share/fonts/truetype/droid/DroidSansMono.ttf") STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) def random_color_func(word, font_size, position, orientation, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. All zero entries will be considered "free" to draw on, while all non-zero entries will be deemed occupied. If mask is not None, width and height will be ignored and the shape of mask will be used instead. max_words : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. Attributes ---------- words_ : list of tuples (string, float) Word tokens with associated frequency. layout_ : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. """ def __init__(self, font_path=None, width=400, height=200, margin=5, ranks_only=False, prefer_horizontal=0.9, mask=None, scale=1, color_func=random_color_func, max_words=200, stopwords=None, random_state=None, background_color='black', max_font_size=None): if stopwords is None: stopwords = STOPWORDS if font_path is None: font_path = FONT_PATH self.font_path = font_path self.width = width self.height = height self.margin = margin self.ranks_only = ranks_only self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.color_func = color_func self.max_words = max_words self.stopwords = stopwords if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color if max_font_size is None: max_font_size = height self.max_font_size = max_font_size def fit_words(self, words): """Generate the positions for words. Parameters ---------- words : array of tuples A tuple contains the word and its frequency. Returns ------- layout_ : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, run this function with a lower canvas size, and draw it with a larger scale. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop for different styles. """ if self.random_state is not None: random_state = self.random_state else: random_state = Random() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] # the order of the cumsum's is important for speed ?! integral = np.cumsum(np.cumsum(self.mask, axis=1), axis=0).astype(np.uint32) else: height, width = self.height, self.width integral = np.zeros((height, width), dtype=np.uint32) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] font_size = self.max_font_size # start drawing grey image for word, count in words: # alternative way to set the font size if not self.ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size, (x, y), orientation, random_state=random_state)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + self.mask # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral self.layout_ = list(zip(words, font_sizes, positions, orientations, colors)) return self.layout_ def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : list of tuples (string, float) Word tokens with associated frequency. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ d = {} flags = re.UNICODE if sys.version < '3' and \ type(text) is unicode else 0 for word in re.findall(r"\w[\w']*", text, flags=flags): if word.isdigit(): continue word_lower = word.lower() if word_lower in self.stopwords: continue # Look in lowercase dict. if word_lower in d: d2 = d[word_lower] else: d2 = {} d[word_lower] = d2 # Look in any case dict. d2[word] = d2.get(word, 0) + 1 d3 = {} for d2 in d.values(): # Get the most popular case. first = max(d2.items(), key=item1)[0] d3[first] = sum(d2.values()) # merge plurals into the singular count (simple cases only) for key in list(d3.keys()): if key.endswith('s'): key_singular = key[:-1] if key_singular in d3: val_plural = d3[key] val_singular = d3[key_singular] d3[key_singular] = val_singular + val_plural del d3[key] words = sorted(d3.items(), key=item1, reverse=True) words = words[:self.max_words] if len(d3.values()) > 0: # check in case there are no words maximum = float(max(d3.values())) else: maximum = 1.0 # avoid by-zero division for i, (word, count) in enumerate(words): words[i] = word, count / maximum self.words_ = words return words def generate(self, text): """Generate wordcloud from text. Calls process_text and fit_words. Returns ------- self """ self.process_text(text) self.fit_words(self.words_) return self def _check_generated(self): """Check if layout_ was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new("RGB", (width * self.scale, height * self.scale), self.background_color) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, font_size * self.scale) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) pos = (position[1] * self.scale, position[0] * self.scale) draw.text(pos, word, fill=color) return img def recolor(self, random_state=None, color_func=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: color_func = self.color_func self.layout_ = [(word, font_size, position, orientation, color_func(word, font_size, position, orientation, random_state)) for word, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "Nespa32/sm_project", "path": "wordcloud_gen/wordcloud_package/wordcloud/wordcloud.py", "copies": "1", "size": "14076", "license": "mit", "hash": 1665463907156999000, "line_mean": 33.4156479218, "line_max": 96, "alpha_frac": 0.5522875817, "autogenerated": false, "ratio": 4.27452171272396, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0007818817681248919, "num_lines": 409 }
import random import os import sys import re import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageDraw from PIL import ImageFont from query_integral_image import query_integral_image item1 = itemgetter(1) FONT_PATH = "/usr/share/fonts/truetype/droid/DroidSansMono.ttf" STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) def fit_words(words, font_path=None, width=400, height=200, margin=5, ranks_only=False, prefer_horiz=0.90): """Generate the positions for words. Parameters ---------- words : array of tuples A tuple contains the word and its frequency. font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path, but you might not have it. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. prefer_horiz : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, run this function with a lower canvas size, and draw it with a larger scale. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop for different styles. """ if len(words) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(words)) if font_path is None: font_path = FONT_PATH if not os.path.exists(font_path): raise ValueError("The font %s does not exist." % font_path) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) integral = np.zeros((height, width), dtype=np.uint32) img_array = np.asarray(img_grey) font_sizes, positions, orientations = [], [], [] # intitiallize font size "large enough" font_size = height # start drawing grey image for word, count in words: # alternative way to set the font size if not ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(font_path, font_size) # transpose font optionally if random.random() < prefer_horiz: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + margin, box_size[0] + margin) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) # recompute integral image img_array = np.asarray(img_grey) # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral return zip(words, font_sizes, positions, orientations) def random_color_func(word, font_size, position, orientation): return "hsl(%d, 80%%, 50%%)" % random.randint(0, 255) def draw(elements, file_name, font_path=None, width=400, height=200, scale=1, color_func=random_color_func): if font_path is None: font_path = FONT_PATH img = Image.new("RGB", (width * scale, height * scale)) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation in elements: font = ImageFont.truetype(font_path, font_size * scale) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) color = color_func(word, font_size, position, orientation) pos = (position[1] * scale, position[0] * scale) draw.text(pos, word, fill=color) img.save(file_name) def process_text(text, max_features=200, stopwords=None): """Splits a long text into words, eliminates the stopwords and returns (words, counts) which is necessary for make_wordcloud(). Parameters ---------- text : string The text to be processed. max_features : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ if stopwords is None: stopwords = STOPWORDS d = {} flags = re.UNICODE if type(text) is unicode else 0 for word in re.findall(r"\w[\w']*", text, flags=flags): if word.isdigit(): continue word_lower = word.lower() if word_lower in stopwords: continue # Look in lowercase dict. if word_lower in d: d2 = d[word_lower] else: d2 = {} d[word_lower] = d2 # Look in any case dict. d2[word] = d2.get(word, 0) + 1 d3 = {} for d2 in d.values(): # Get the most popular case. first = max(d2.iteritems(), key=item1)[0] d3[first] = sum(d2.values()) # merge plurals into the singular count (simple cases only) for key in d3.keys(): if key.endswith('s'): key_singular = key[:-1] if key_singular in d3: val_plural = d3[key] val_singular = d3[key_singular] d3[key_singular] = val_singular + val_plural del d3[key] words = sorted(d3.iteritems(), key=item1, reverse=True) words = words[:max_features] maximum = float(max(d3.values())) for i, (word, count) in enumerate(words): words[i] = word, count/maximum return words
{ "repo_name": "0x0all/word_cloud", "path": "wordcloud/__init__.py", "copies": "1", "size": "7470", "license": "mit", "hash": -4406113394378494000, "line_mean": 32.2, "line_max": 80, "alpha_frac": 0.577643909, "autogenerated": false, "ratio": 3.8825363825363826, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49601802915363824, "avg_score": null, "num_lines": null }
import random import os import sys import re import numpy as np from PIL import Image from PIL import ImageDraw from PIL import ImageFont from query_integral_image import query_integral_image FONT_PATH = "/Library/Fonts/Krungthep.ttf" STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) def fit_words(words, font_path=None, width=400, height=200, margin=5, ranks_only=False, prefer_horiz=0.90): """Generate the positions for words. Parameters ---------- words : array of tuples A tuple contains the word and its frequency. font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path, but you might not have it. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. prefer_horiz : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, run this function with a lower canvas size, and draw it with a larger scale. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop for different styles. """ if len(words) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(words)) if font_path is None: font_path = FONT_PATH if not os.path.exists(font_path): raise ValueError("The font %s does not exist." % font_path) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) integral = np.zeros((height, width), dtype=np.uint32) img_array = np.asarray(img_grey) font_sizes, positions, orientations = [], [], [] # intitiallize font size "large enough" font_size = height # start drawing grey image for word, count in words: # alternative way to set the font size if not ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(font_path, font_size) # transpose font optionally if random.random() < prefer_horiz: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + margin, box_size[0] + margin) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) # recompute integral image img_array = np.asarray(img_grey) # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral return zip(words, font_sizes, positions, orientations) def random_color_func(word, font_size, position, orientation): return "hsl(%d" % random.randint(0, 255) + ", 80%, 50%)" def draw(elements, file_name, font_path=None, width=400, height=200, scale=1, color_func=random_color_func): if font_path is None: font_path = FONT_PATH img = Image.new("RGB", (width * scale, height * scale)) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation in elements: font = ImageFont.truetype(font_path, font_size * scale) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) color = color_func(word, font_size, position, orientation) pos = (position[1] * scale, position[0] * scale) draw.text(pos, word, fill=color) img.save(file_name) def process_text(text, max_features=200, stopwords=None): #from nltk.stem.lancaster import LancasterStemmer #st = LancasterStemmer() from nltk.stem.wordnet import WordNetLemmatizer lmtzr = WordNetLemmatizer() """Splits a long text into words, eliminates the stopwords and returns (words, counts) which is necessary for make_wordcloud(). Parameters ---------- text : string The text to be processed. max_features : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ if stopwords is None: stopwords = STOPWORDS d = {} for word in re.findall(r"\w[\w']*", text): word_lower = word.lower() #word_lower = st.stem(word_lower) word_lower = lmtzr.lemmatize(word_lower) if word_lower in stopwords: continue # Look in lowercase dict. if d.has_key(word_lower): d2 = d[word_lower] else: d2 = {} d[word_lower] = d2 # Look in any case dict. if d2.has_key(word): d2[word] += 1 else: d2[word] = 1 d3 = {} for d2 in d.values(): # Get the most popular case. first = sorted(d2.iteritems(), key=lambda x: x[1], reverse=True)[0][0] d3[first] = sum(d2.values()) words = sorted(d3.iteritems(), key=lambda x: x[1], reverse=True) words = words[:max_features] maximum = float(max(d3.values())) for i, (word, count) in enumerate(words): words[i] = word, count/maximum return words
{ "repo_name": "OculusCam/word_cloud-master", "path": "wordcloud/__init__.py", "copies": "1", "size": "7275", "license": "mit", "hash": -4389913618703979000, "line_mean": 32.5253456221, "line_max": 80, "alpha_frac": 0.5802061856, "autogenerated": false, "ratio": 3.835002635740643, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4915208821340643, "avg_score": null, "num_lines": null }
import random,os from PIL import Image from PIL import ImageDraw from PIL import ImageFont import numpy as np from query_integral_image import query_integral_image # FONT_PATH = "C:/Python33/Lib/site-packages/matplotlib/mpl-data/fonts/ttf/vera.ttf" FONT_PATH = "C:/Python35/Lib/site-packages/pytagcloud-0.3.5-py3.3.egg/pytagcloud/fonts/DroidSans.ttf" def make_wordcloud(words, counts, fname=None, font_path=None, width=400, height=200, margin=5, ranks_only=False, backgroundweight=255): """Build word cloud using word counts, store in image. Parameters ---------- words : numpy array of strings Words that will be drawn in the image. counts : numpy array of word counts Word counts or weighting of words. Determines the size of the word in the final image. Will be normalized to lie between zero and one. font_path : string Font path to the font that will be used. Defaults to DroidSansMono path. fname : sting Output filename. Extension determins image type (written with PIL). width : int (default=400) Width of the word cloud image. height : int (default=200) Height of the word cloud image. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. backgroundweight : int (default=255) Weight that the background of the wordcloud is multiplied by. Applies in cases where there are more than 2 dimensions which charecterize the cloud; in our case it is the logged number of community population whose tweets resulted in the cloud. Notes ----- Larger Images with make the code significantly slower. If you need a large image, you can try running the algorithm at a lower resolution and then drawing the result at the desired resolution. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop vor differnt styles. Colors are used completely at random. Currently the colors are sampled from HSV space with a fixed S and V. Adjusting the percentages at the very end gives differnt color ranges. Obviously you can also set all at random - haven't tried that. """ if len(counts) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(counts)) if font_path is None: font_path = FONT_PATH if not os.path.exists(font_path): raise ValueError("The provided font %s does not exist." % font_path) # normalize counts counts=[float(i/max(counts)) for i in counts] # sort words by counts inds = np.argsort(counts)[::-1] counts = [counts[i] for i in inds] words = [words[i] for i in inds] # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) integral = np.zeros((height, width), dtype=np.uint32) img_array = np.asarray(img_grey) font_sizes, positions, orientations = [], [], [] # intitiallize font size "large enough" font_size = 1000 # start drawing grey image for word, count in zip(words, counts): # alternative way to set the font size if not ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(font_path, font_size, encoding = 'unic') # transpose font optionally orientation = random.choice([None, Image.ROTATE_90]) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + margin, box_size[0] + margin) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) # recompute integral image img_array = np.asarray(img_grey) # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral # redraw in color img = Image.new("RGB", (width, height), (backgroundweight,backgroundweight,backgroundweight)) draw = ImageDraw.Draw(img) everything = zip(words, font_sizes, positions, orientations) for word, font_size, position, orientation in everything: font = ImageFont.truetype(font_path, font_size) # transpose font optionally transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) draw.text((position[1], position[0]), word, #fill = "red") fill="hsl(%d" % random.randint(0, 50) + ", 80%, 50%)") #img.show() try: img.save(fname) except: pass return img if __name__ == "__main__": x=['qqqqq','wwww','eeee','rrrr','ddddd','hhnhhhh'] co=[1,2,3,4,5,6] from wordcloud import make_wordcloud make_wordcloud(x,co,'wordy.jpg')
{ "repo_name": "socialsensor/community-evolution-analysis", "path": "python/wordcloud.py", "copies": "1", "size": "6355", "license": "apache-2.0", "hash": 1853670372998237200, "line_mean": 35.9476744186, "line_max": 101, "alpha_frac": 0.6177812746, "autogenerated": false, "ratio": 3.83756038647343, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.49553416610734297, "avg_score": null, "num_lines": null }
import random from PIL import Image from PIL import ImageDraw from PIL import ImageFont import os import numpy as np from query_integral_image import query_integral_image FONT_PATH = "/usr/share/fonts/truetype/droid/DroidSansMono.ttf" #Colors = [[[79,84,75],[0,0,95],[208,42,95],[207,18,97]]] def make_wordcloud(words, counts, fname, font_path=None, width=400, height=200, margin=5, ranks_only=False): """Build word cloud using word counts, store in image. Parameters ---------- words : numpy array of strings Words that will be drawn in the image. counts : numpy array of word counts Word counts or weighting of words. Determines the size of the word in the final image. Will be normalized to lie between zero and one. font_path : string Font path to the font that will be used. Defaults to DroidSansMono path. fname : sting Output filename. Extension determins image type (written with PIL). width : int (default=400) Width of the word cloud image. height : int (default=200) Height of the word cloud image. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. Notes ----- Larger Images with make the code significantly slower. If you need a large image, you can try running the algorithm at a lower resolution and then drawing the result at the desired resolution. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop vor differnt styles. Colors are used completely at random. Currently the colors are sampled from HSV space with a fixed S and V. Adjusting the percentages at the very end gives differnt color ranges. Obviously you can also set all at random - haven't tried that. """ if len(counts) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(counts)) if font_path is None: font_path = FONT_PATH #if not os.path.exists(font_path): # raise ValueError("The provided font %s does not exist." % font_path) # normalize counts counts = counts / float(counts.max()) # sort words by counts inds = np.argsort(counts)[::-1] counts = counts[inds] words = words[inds] # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) integral = np.zeros((height, width), dtype=np.uint32) img_array = np.asarray(img_grey) font_sizes, positions, orientations = [], [], [] # intitiallize font size "large enough" font_size = 1000 # start drawing grey image for word, count in zip(words, counts): # alternative way to set the font size if not ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(font_path, font_size) # transpose font optionally orientation = random.choice([None, Image.ROTATE_90]) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + margin, box_size[0] + margin) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) # recompute integral image img_array = np.asarray(img_grey) # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral # redraw in color img = Image.new("RGB", (width, height)) draw = ImageDraw.Draw(img) everything = zip(words, font_sizes, positions, orientations) for word, font_size, position, orientation in everything: font = ImageFont.truetype(font_path, font_size) # transpose font optionally transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) draw.text((position[1], position[0]), word, fill="hsl({}, {}%, {}%)".format(random.randint(0,255), 50, 70)) #Actually draws the image #img.show() img.save(fname) if __name__ == "__main__": import os import sys from sklearn.feature_extraction.text import CountVectorizer if "-" in sys.argv: lines = sys.stdin.readlines() sources = ['stdin'] else: sources = ([arg for arg in sys.argv[1:] if os.path.exists(arg)] or ["constitution.txt"]) lines = [] for s in sources: with open(s) as f: lines.extend(f.readlines()) text = "".join(lines) cv = CountVectorizer(min_df=1, charset_error="ignore", stop_words="english", max_features=200) counts = cv.fit_transform([text]).toarray().ravel() words = np.array(cv.get_feature_names()) # throw away some words, normalize words = words[counts > 1] counts = counts[counts > 1] output_filename = (os.path.splitext(os.path.basename(sources[0]))[0] + "_.png") counts = make_wordcloud(words, counts, output_filename)
{ "repo_name": "A-Malone/twitter-reader", "path": "wordcloud.py", "copies": "1", "size": "6650", "license": "mit", "hash": -8772611126088052000, "line_mean": 34.9459459459, "line_max": 109, "alpha_frac": 0.589924812, "autogenerated": false, "ratio": 3.9748953974895396, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0009021311495074613, "num_lines": 185 }
import random from PIL import Image from PIL import ImageDraw from PIL import ImageFont import numpy as np from query_integral_image import query_integral_image FONT_PATH = "/usr/share/fonts/truetype/droid/DroidSansMono.ttf" def make_wordcloud(words, counts, fname, font_path=None, width=400, height=200, margin=5, ranks_only=False): """Build word cloud using word counts, store in image. Parameters ---------- words : numpy array of strings Words that will be drawn in the image. counts : numpy array of word counts Word counts or weighting of words. Determines the size of the word in the final image. Will be normalized to lie between zero and one. font_path : string Font path to the font that will be used. Defaults to DroidSansMono path. fname : sting Output filename. Extension determins image type (written with PIL). width : int (default=400) Width of the word cloud image. height : int (default=200) Height of the word cloud image. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. Notes ----- Larger Images with make the code significantly slower. If you need a large image, you can try running the algorithm at a lower resolution and then drawing the result at the desired resolution. In the current form it actually just uses the rank of the counts, i.e. the relative differences don't matter. Play with setting the font_size in the main loop vor differnt styles. Colors are used completely at random. Currently the colors are sampled from HSV space with a fixed S and V. Adjusting the percentages at the very end gives differnt color ranges. Obviously you can also set all at random - haven't tried that. """ if len(counts) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(counts)) if font_path is None: font_path = FONT_PATH if not os.path.exists(font_path): raise ValueError("The provided font %s does not exist." % font_path) # normalize counts counts = counts / float(counts.max()) # sort words by counts inds = np.argsort(counts)[::-1] counts = counts[inds] words = words[inds] # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) integral = np.zeros((height, width), dtype=np.uint32) img_array = np.asarray(img_grey) font_sizes, positions, orientations = [], [], [] # intitiallize font size "large enough" font_size = 1000 # start drawing grey image for word, count in zip(words, counts): # alternative way to set the font size if not ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(font_path, font_size) # transpose font optionally orientation = random.choice([None, Image.ROTATE_90]) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + margin, box_size[0] + margin) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) # recompute integral image img_array = np.asarray(img_grey) # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral # redraw in color img = Image.new("RGB", (width, height)) draw = ImageDraw.Draw(img) everything = zip(words, font_sizes, positions, orientations) for word, font_size, position, orientation in everything: font = ImageFont.truetype(font_path, font_size) # transpose font optionally transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) draw.text((position[1], position[0]), word, fill="hsl(%d" % random.randint(0, 255) + ", 80%, 50%)") img.show() img.save(fname) if __name__ == "__main__": import os import sys from sklearn.feature_extraction.text import CountVectorizer if "-" in sys.argv: lines = sys.stdin.readlines() sources = ['stdin'] else: sources = ([arg for arg in sys.argv[1:] if os.path.exists(arg)] or ["constitution.txt"]) lines = [] for s in sources: with open(s) as f: lines.extend(f.readlines()) text = "".join(lines) cv = CountVectorizer(min_df=1, charset_error="ignore", stop_words="english", max_features=200) counts = cv.fit_transform([text]).toarray().ravel() words = np.array(cv.get_feature_names()) # throw away some words, normalize words = words[counts > 1] counts = counts[counts > 1] output_filename = (os.path.splitext(os.path.basename(sources[0]))[0] + "_.png") counts = make_wordcloud(words, counts, output_filename)
{ "repo_name": "wikiteams/github-gender-studies", "path": "sources/gender_checker/deprecated/wordcloud.py", "copies": "1", "size": "6536", "license": "mit", "hash": 4197254167002440700, "line_mean": 34.5217391304, "line_max": 79, "alpha_frac": 0.5904222766, "autogenerated": false, "ratio": 4.017209588199139, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0, "num_lines": 184 }
from __future__ import division import warnings from random import Random import os import re import sys import colorsys import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageColor from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image from .tokenization import unigrams_and_bigrams, process_tokens item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", os.path.join(os.path.dirname(__file__), "DroidSansMono.ttf")) STOPWORDS = set([x.strip() for x in open( os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) class IntegralOccupancyMap(object): def __init__(self, height, width, mask): self.height = height self.width = width if mask is not None: # the order of the cumsum's is important for speed ?! self.integral = np.cumsum(np.cumsum(255 * mask, axis=1), axis=0).astype(np.uint32) else: self.integral = np.zeros((height, width), dtype=np.uint32) def sample_position(self, size_x, size_y, random_state): return query_integral_image(self.integral, size_x, size_y, random_state) def update(self, img_array, pos_x, pos_y): partial_integral = np.cumsum(np.cumsum(img_array[pos_x:, pos_y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if pos_x > 0: if pos_y > 0: partial_integral += (self.integral[pos_x - 1, pos_y:] - self.integral[pos_x - 1, pos_y - 1]) else: partial_integral += self.integral[pos_x - 1, pos_y:] if pos_y > 0: partial_integral += self.integral[pos_x:, pos_y - 1][:, np.newaxis] self.integral[pos_x:, pos_y:] = partial_integral def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) class colormap_color_func(object): """Color func created from matplotlib colormap. Parameters ---------- colormap : string or matplotlib colormap Colormap to sample from Example ------- >>> WordCloud(color_func=colormap_color_func("magma")) """ def __init__(self, colormap): import matplotlib.pyplot as plt self.colormap = plt.cm.get_cmap(colormap) def __call__(self, word, font_size, position, orientation, random_state=None, **kwargs): if random_state is None: random_state = Random() r, g, b, _ = 255 * np.array(self.colormap(random_state.uniform(0, 1))) return "rgb({:.0f}, {:.0f}, {:.0f})".format(r, g, b) def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max, old_g / rgb_max, old_b / rgb_max) def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. If mask is not None, width and height will be ignored and the shape of mask will be used instead. All white (#FF or #FFFFFF) entries will be considerd "masked out" while other entries will be free to draw on. [This changed in the most recent version!] scale : float (default=1) Scaling between computation and drawing. For large word-cloud images, using scale instead of larger canvas size is significantly faster, but might lead to a coarser fit for the words. min_font_size : int (default=4) Smallest font size to use. Will stop when there is no more room in this size. font_step : int (default=1) Step size for the font. font_step > 1 might speed up computation but give a worse fit. max_words : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. mode : string (default="RGB") Transparent background will be generated when mode is "RGBA" and background_color is None. relative_scaling : float (default=.5) Importance of relative word frequencies for font-size. With relative_scaling=0, only word-ranks are considered. With relative_scaling=1, a word that is twice as frequent will have twice the size. If you want to consider the word frequencies and not only their rank, relative_scaling around .5 often looks good. .. versionchanged: 2.0 Default is now 0.5. color_func : callable, default=None Callable with parameters word, font_size, position, orientation, font_path, random_state that returns a PIL color for each word. Overwrites "colormap". See colormap for specifying a matplotlib colormap instead. regexp : string or None (optional) Regular expression to split the input text into tokens in process_text. If None is specified, ``r"\w[\w']+"`` is used. collocations : bool, default=True Whether to include collocations (bigrams) of two words. .. versionadded: 2.0 colormap : string or matplotlib colormap, default="viridis" Matplotlib colormap to randomly draw colors from for each word. Ignored if "color_func" is specified. .. versionadded: 2.0 normalize_plurals : bool, default=True Whether to remove trailing 's' from words. If True and a word appears with and without a trailing 's', the one with trailing 's' is removed and its counts are added to the version without trailing 's' -- unless the word ends with 'ss'. Attributes ---------- ``words_`` : dict of string to float Word tokens with associated frequency. .. versionchanged: 2.0 ``words_`` is now a dictionary ``layout_`` : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, try a lower canvas size, and set the scale parameter. The algorithm might give more weight to the ranking of the words than their actual frequencies, depending on the ``max_font_size`` and the scaling heuristic. """ def __init__(self, font_path=None, width=400, height=200, margin=2, ranks_only=None, prefer_horizontal=.9, mask=None, scale=1, color_func=None, max_words=200, min_font_size=4, stopwords=None, random_state=None, background_color='black', max_font_size=None, font_step=1, mode="RGB", relative_scaling=.5, regexp=None, collocations=True, colormap=None, normalize_plurals=True): if font_path is None: font_path = FONT_PATH if color_func is None and colormap is None: # we need a color map import matplotlib version = matplotlib.__version__ if version[0] < "2" and version[2] < "5": colormap = "hsv" else: colormap = "viridis" self.colormap = colormap self.collocations = collocations self.font_path = font_path self.width = width self.height = height self.margin = margin self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.color_func = color_func or colormap_color_func(colormap) self.max_words = max_words self.stopwords = stopwords if stopwords is not None else STOPWORDS self.min_font_size = min_font_size self.font_step = font_step self.regexp = regexp if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color self.max_font_size = max_font_size self.mode = mode if relative_scaling < 0 or relative_scaling > 1: raise ValueError("relative_scaling needs to be " "between 0 and 1, got %f." % relative_scaling) self.relative_scaling = relative_scaling if ranks_only is not None: warnings.warn("ranks_only is deprecated and will be removed as" " it had no effect. Look into relative_scaling.", DeprecationWarning) self.normalize_plurals = normalize_plurals def fit_words(self, frequencies): """Create a word_cloud from words and frequencies. Alias to generate_from_frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ return self.generate_from_frequencies(frequencies) def generate_from_frequencies(self, frequencies, max_font_size=None): """Create a word_cloud from words and frequencies. Parameters ---------- frequencies : dict from string to float A contains words and associated frequency. max_font_size : int Use this font-size instead of self.max_font_size Returns ------- self """ # make sure frequencies are sorted and normalized frequencies = sorted(frequencies.items(), key=item1, reverse=True) frequencies = frequencies[:self.max_words] # largest entry will be 1 max_frequency = float(frequencies[0][1]) frequencies = [(word, freq / max_frequency) for word, freq in frequencies] if self.random_state is not None: random_state = self.random_state else: random_state = Random() if len(frequencies) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(frequencies)) if self.mask is not None: mask = self.mask width = mask.shape[1] height = mask.shape[0] if mask.dtype.kind == 'f': warnings.warn("mask image should be unsigned byte between 0" " and 255. Got a float array") if mask.ndim == 2: boolean_mask = mask == 255 elif mask.ndim == 3: # if all channels are white, mask out boolean_mask = np.all(mask[:, :, :3] == 255, axis=-1) else: raise ValueError("Got mask of invalid shape: %s" % str(mask.shape)) else: boolean_mask = None height, width = self.height, self.width occupancy = IntegralOccupancyMap(height, width, boolean_mask) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] last_freq = 1. if max_font_size is None: # if not provided use default font_size max_font_size = self.max_font_size if max_font_size is None: # figure out a good font size by trying to draw with # just the first two words if len(frequencies) == 1: # we only have one word. We make it big! font_size = self.height else: self.generate_from_frequencies(dict(frequencies[:2]), max_font_size=self.height) # find font sizes sizes = [x[1] for x in self.layout_] font_size = 2 * sizes[0] * sizes[1] / (sizes[0] + sizes[1]) else: font_size = max_font_size # we set self.words_ here because we called generate_from_frequencies # above... hurray for good design? self.words_ = dict(frequencies) # start drawing grey image for word, freq in frequencies: # select the font size rs = self.relative_scaling if rs != 0: font_size = int(round((rs * (freq / float(last_freq)) + (1 - rs)) * font_size)) if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 tried_other_orientation = False while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally transposed_font = ImageFont.TransposedFont( font, orientation=orientation) # get size of resulting text box_size = draw.textsize(word, font=transposed_font) # find possible places using integral image: result = occupancy.sample_position(box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size < self.min_font_size: # either we found a place or font-size went too small break # if we didn't find a place, make font smaller if tried_other_orientation is False: orientation = (Image.ROTATE_90 if orientation is None else Image.ROTATE_90) tried_other_orientation = True else: font_size -= self.font_step orientation = None if font_size < self.min_font_size: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white", font=transposed_font) positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size=font_size, position=(x, y), orientation=orientation, random_state=random_state, font_path=self.font_path)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + boolean_mask # recompute bottom right # the order of the cumsum's is important for speed ?! occupancy.update(img_array, x, y) last_freq = freq self.layout_ = list(zip(frequencies, font_sizes, positions, orientations, colors)) return self def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : dict (string, int) Word tokens with associated frequency. ..versionchanged:: 1.2.2 Changed return type from list of tuples to dict. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ stopwords = set(map(str.lower, self.stopwords)) flags = (re.UNICODE if sys.version < '3' and type(text) is unicode else 0) regexp = self.regexp if self.regexp is not None else r"\w[\w']+" words = re.findall(regexp, text, flags) # remove stopwords words = [word for word in words if word.lower() not in stopwords] # remove 's words = [word[:-2] if word.lower().endswith("'s") else word for word in words] # remove numbers words = [word for word in words if not word.isdigit()] if self.collocations: word_counts = unigrams_and_bigrams(words, self.normalize_plurals) else: word_counts, _ = process_tokens(words, self.normalize_plurals) return word_counts def generate_from_text(self, text): """Generate wordcloud from text. Calls process_text and generate_from_frequencies. ..versionchanged:: 1.2.2 Argument of generate_from_frequencies() is not return of process_text() any more. Returns ------- self """ words = self.process_text(text) self.generate_from_frequencies(words) return self def generate(self, text): """Generate wordcloud from text. Alias to generate_from_text. Calls process_text and generate_from_frequencies. Returns ------- self """ return self.generate_from_text(text) def _check_generated(self): """Check if ``layout_`` was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate" " first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new(self.mode, (int(width * self.scale), int(height * self.scale)), self.background_color) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, int(font_size * self.scale)) transposed_font = ImageFont.TransposedFont( font, orientation=orientation) pos = (int(position[1] * self.scale), int(position[0] * self.scale)) draw.text(pos, word.replace('_',' '), fill=color, font=transposed_font) return img def recolor(self, random_state=None, color_func=None, colormap=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. colormap : string or matplotlib colormap, default=None Use this colormap to generate new colors. Ignored if color_func is specified. If None, self.color_func (or self.color_map) is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: if colormap is None: color_func = self.color_func else: color_func = colormap_color_func(colormap) self.layout_ = [(word_freq, font_size, position, orientation, color_func(word=word_freq[0], font_size=font_size, position=position, orientation=orientation, random_state=random_state, font_path=self.font_path)) for word_freq, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "Fuzzwah/word_cloud", "path": "wordcloud/wordcloud.py", "copies": "1", "size": "24003", "license": "mit", "hash": -2489741705355119600, "line_mean": 35.3131618759, "line_max": 83, "alpha_frac": 0.5633462484, "autogenerated": false, "ratio": 4.318639798488665, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5381986046888665, "avg_score": null, "num_lines": null }
from __future__ import division import warnings from random import Random import os import re import sys import colorsys import numpy as np import csv from operator import itemgetter from PIL import Image from PIL import ImageColor from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image from .tokenization import unigrams_and_bigrams, process_tokens item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", os.path.join(os.path.dirname(__file__), "DroidSansMono.ttf")) STOPWORDS = set([x.strip() for x in open( os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) class IntegralOccupancyMap(object): def __init__(self, height, width, mask): self.height = height self.width = width if mask is not None: # the order of the cumsum's is important for speed ?! self.integral = np.cumsum(np.cumsum(255 * mask, axis=1), axis=0).astype(np.uint32) else: self.integral = np.zeros((height, width), dtype=np.uint32) def sample_position(self, size_x, size_y, random_state): return query_integral_image(self.integral, size_x, size_y, random_state) def update(self, img_array, pos_x, pos_y): partial_integral = np.cumsum(np.cumsum(img_array[pos_x:, pos_y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if pos_x > 0: if pos_y > 0: partial_integral += (self.integral[pos_x - 1, pos_y:] - self.integral[pos_x - 1, pos_y - 1]) else: partial_integral += self.integral[pos_x - 1, pos_y:] if pos_y > 0: partial_integral += self.integral[pos_x:, pos_y - 1][:, np.newaxis] self.integral[pos_x:, pos_y:] = partial_integral def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) class colormap_color_func(object): """Color func created from matplotlib colormap. Parameters ---------- colormap : string or matplotlib colormap Colormap to sample from Example ------- >>> WordCloud(color_func=colormap_color_func("magma")) """ def __init__(self, colormap): import matplotlib.pyplot as plt self.colormap = plt.cm.get_cmap(colormap) def __call__(self, word, font_size, position, orientation, random_state=None, **kwargs): if random_state is None: random_state = Random() r, g, b, _ = 255 * np.array(self.colormap(random_state.uniform(0, 1))) return "rgb({:.0f}, {:.0f}, {:.0f})".format(r, g, b) def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r / rgb_max, old_g / rgb_max, old_b / rgb_max) def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. If prefer_horizontal < 1, the algorithm will try rotating the word if it doesn't fit. (There is currently no built-in way to get only vertical words.) mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. If mask is not None, width and height will be ignored and the shape of mask will be used instead. All white (#FF or #FFFFFF) entries will be considerd "masked out" while other entries will be free to draw on. [This changed in the most recent version!] scale : float (default=1) Scaling between computation and drawing. For large word-cloud images, using scale instead of larger canvas size is significantly faster, but might lead to a coarser fit for the words. min_font_size : int (default=4) Smallest font size to use. Will stop when there is no more room in this size. font_step : int (default=1) Step size for the font. font_step > 1 might speed up computation but give a worse fit. max_words : number (default=200) The maximum number of words. stopwords : set of strings or None The words that will be eliminated. If None, the build-in STOPWORDS list will be used. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. mode : string (default="RGB") Transparent background will be generated when mode is "RGBA" and background_color is None. relative_scaling : float (default=.5) Importance of relative word frequencies for font-size. With relative_scaling=0, only word-ranks are considered. With relative_scaling=1, a word that is twice as frequent will have twice the size. If you want to consider the word frequencies and not only their rank, relative_scaling around .5 often looks good. .. versionchanged: 2.0 Default is now 0.5. color_func : callable, default=None Callable with parameters word, font_size, position, orientation, font_path, random_state that returns a PIL color for each word. Overwrites "colormap". See colormap for specifying a matplotlib colormap instead. regexp : string or None (optional) Regular expression to split the input text into tokens in process_text. If None is specified, ``r"\w[\w']+"`` is used. collocations : bool, default=True Whether to include collocations (bigrams) of two words. .. versionadded: 2.0 colormap : string or matplotlib colormap, default="viridis" Matplotlib colormap to randomly draw colors from for each word. Ignored if "color_func" is specified. .. versionadded: 2.0 normalize_plurals : bool, default=True Whether to remove trailing 's' from words. If True and a word appears with and without a trailing 's', the one with trailing 's' is removed and its counts are added to the version without trailing 's' -- unless the word ends with 'ss'. weightedWords : bool, default=False True to indicate the text provided to WordCloud.generate() is a set of (word , weight) pairs each per line. Attributes ---------- ``words_`` : dict of string to float Word tokens with associated frequency. .. versionchanged: 2.0 ``words_`` is now a dictionary ``layout_`` : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, try a lower canvas size, and set the scale parameter. The algorithm might give more weight to the ranking of the words than their actual frequencies, depending on the ``max_font_size`` and the scaling heuristic. """ def __init__(self, font_path=None, width=400, height=200, margin=2, ranks_only=None, prefer_horizontal=.9, mask=None, scale=1, color_func=None, max_words=200, min_font_size=4, stopwords=None, random_state=None, background_color='black', max_font_size=None, font_step=1, mode="RGB", relative_scaling=.5, regexp=None, collocations=True, colormap=None, normalize_plurals=True , weightedwords = False): if font_path is None: font_path = FONT_PATH if color_func is None and colormap is None: # we need a color map import matplotlib version = matplotlib.__version__ if version[0] < "2" and version[2] < "5": colormap = "hsv" else: colormap = "viridis" self.colormap = colormap self.collocations = collocations self.font_path = font_path self.width = width self.height = height self.margin = margin self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.color_func = color_func or colormap_color_func(colormap) self.max_words = max_words self.stopwords = stopwords if stopwords is not None else STOPWORDS self.min_font_size = min_font_size self.font_step = font_step self.regexp = regexp self.weightedwords = weightedwords if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color self.max_font_size = max_font_size self.mode = mode if relative_scaling < 0 or relative_scaling > 1: raise ValueError("relative_scaling needs to be " "between 0 and 1, got %f." % relative_scaling) self.relative_scaling = relative_scaling if ranks_only is not None: warnings.warn("ranks_only is deprecated and will be removed as" " it had no effect. Look into relative_scaling.", DeprecationWarning) self.normalize_plurals = normalize_plurals def fit_words(self, frequencies): """Create a word_cloud from words and frequencies. Alias to generate_from_frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ return self.generate_from_frequencies(frequencies) def generate_from_frequencies(self, frequencies, max_font_size=None): """Create a word_cloud from words and frequencies. Parameters ---------- frequencies : dict from string to float A contains words and associated frequency. max_font_size : int Use this font-size instead of self.max_font_size Returns ------- self """ # make sure frequencies are sorted and normalized frequencies = sorted(frequencies.items(), key=item1, reverse=True) print frequencies frequencies = frequencies[:self.max_words] # largest entry will be 1 max_frequency = float(frequencies[0][1]) frequencies = [(word, freq / max_frequency) for word, freq in frequencies] if self.random_state is not None: random_state = self.random_state else: random_state = Random() if len(frequencies) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(frequencies)) if self.mask is not None: mask = self.mask width = mask.shape[1] height = mask.shape[0] if mask.dtype.kind == 'f': warnings.warn("mask image should be unsigned byte between 0" " and 255. Got a float array") if mask.ndim == 2: boolean_mask = mask == 255 elif mask.ndim == 3: # if all channels are white, mask out boolean_mask = np.all(mask[:, :, :3] == 255, axis=-1) else: raise ValueError("Got mask of invalid shape: %s" % str(mask.shape)) else: boolean_mask = None height, width = self.height, self.width occupancy = IntegralOccupancyMap(height, width, boolean_mask) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] last_freq = 1. if max_font_size is None: # if not provided use default font_size max_font_size = self.max_font_size if max_font_size is None: # figure out a good font size by trying to draw with # just the first two words if len(frequencies) == 1: # we only have one word. We make it big! font_size = self.height else: self.generate_from_frequencies(dict(frequencies[:2]), max_font_size=self.height) # find font sizes sizes = [x[1] for x in self.layout_] font_size = 2 * sizes[0] * sizes[1] / (sizes[0] + sizes[1]) else: font_size = max_font_size # we set self.words_ here because we called generate_from_frequencies # above... hurray for good design? self.words_ = dict(frequencies) # start drawing grey image for word, freq in frequencies: # select the font size rs = self.relative_scaling if rs != 0: font_size = int(round((rs * (freq / float(last_freq)) + (1 - rs)) * font_size)) if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 tried_other_orientation = False while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally transposed_font = ImageFont.TransposedFont( font, orientation=orientation) # get size of resulting text box_size = draw.textsize(word, font=transposed_font) # find possible places using integral image: result = occupancy.sample_position(box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size < self.min_font_size: # either we found a place or font-size went too small break # if we didn't find a place, make font smaller # but first try to rotate! if not tried_other_orientation and self.prefer_horizontal < 1: orientation = (Image.ROTATE_90 if orientation is None else Image.ROTATE_90) tried_other_orientation = True else: font_size -= self.font_step orientation = None if font_size < self.min_font_size: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white", font=transposed_font) positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size=font_size, position=(x, y), orientation=orientation, random_state=random_state, font_path=self.font_path)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + boolean_mask # recompute bottom right # the order of the cumsum's is important for speed ?! occupancy.update(img_array, x, y) last_freq = freq self.layout_ = list(zip(frequencies, font_sizes, positions, orientations, colors)) return self def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : dict (string, int) Word tokens with associated frequency. ..versionchanged:: 1.2.2 Changed return type from list of tuples to dict. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ stopwords = set(map(str.lower, self.stopwords)) flags = (re.UNICODE if sys.version < '3' and type(text) is unicode else 0) regexp = self.regexp if self.regexp is not None else r"\w[\w']+" words = re.findall(regexp, text, flags) # remove stopwords words = [word for word in words if word.lower() not in stopwords] # remove 's words = [word[:-2] if word.lower().endswith("'s") else word for word in words] # remove numbers words = [word for word in words if not word.isdigit()] if self.collocations: word_counts = unigrams_and_bigrams(words, self.normalize_plurals) else: word_counts, _ = process_tokens(words, self.normalize_plurals) return word_counts def generate_from_text(self, text): """Generate wordcloud from text. Calls process_text and generate_from_frequencies. ..versionchanged:: 1.2.2 Argument of generate_from_frequencies() is not return of process_text() any more. Returns ------- self """ words = dict() if not self.weightedwords : words = self.process_text(text) else : for row in csv.reader (text.split('\n')): if row: words[row[0]] = float(row[1]) self.generate_from_frequencies(words) return self def generate(self, text): """Generate wordcloud from text. Alias to generate_from_text. Calls process_text and generate_from_frequencies. Returns ------- self """ return self.generate_from_text(text) def _check_generated(self): """Check if ``layout_`` was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate" " first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new(self.mode, (int(width * self.scale), int(height * self.scale)), self.background_color) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, int(font_size * self.scale)) transposed_font = ImageFont.TransposedFont( font, orientation=orientation) pos = (int(position[1] * self.scale), int(position[0] * self.scale)) draw.text(pos, word, fill=color, font=transposed_font) return img def recolor(self, random_state=None, color_func=None, colormap=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. colormap : string or matplotlib colormap, default=None Use this colormap to generate new colors. Ignored if color_func is specified. If None, self.color_func (or self.color_map) is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: if colormap is None: color_func = self.color_func else: color_func = colormap_color_func(colormap) self.layout_ = [(word_freq, font_size, position, orientation, color_func(word=word_freq[0], font_size=font_size, position=position, orientation=orientation, random_state=random_state, font_path=self.font_path)) for word_freq, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "mohammadKhalifa/word_cloud", "path": "wordcloud/wordcloud.py", "copies": "1", "size": "24776", "license": "mit", "hash": 1474414987696064800, "line_mean": 35.4889543446, "line_max": 83, "alpha_frac": 0.5644171779, "autogenerated": false, "ratio": 4.3246639902251705, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0006222663316428657, "num_lines": 679 }
import warnings from random import Random import os import re import sys import colorsys import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageColor from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", os.path.join(os.path.dirname(__file__), "DroidSansMono.ttf")) STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) class IntegralOccupancyMap(object): def __init__(self, height, width, mask): self.height = height self.width = width if mask is not None: # the order of the cumsum's is important for speed ?! self.integral = np.cumsum(np.cumsum(255 * mask, axis=1), axis=0).astype(np.uint32) else: self.integral = np.zeros((height, width), dtype=np.uint32) def sample_position(self, size_x, size_y, random_state): return query_integral_image(self.integral, size_x, size_y, random_state) def update(self, img_array, pos_x, pos_y): partial_integral = np.cumsum(np.cumsum(img_array[pos_x:, pos_y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if pos_x > 0: if pos_y > 0: partial_integral += (self.integral[pos_x - 1, pos_y:] - self.integral[pos_x - 1, pos_y - 1]) else: partial_integral += self.integral[pos_x - 1, pos_y:] if pos_y > 0: partial_integral += self.integral[pos_x:, pos_y - 1][:, np.newaxis] self.integral[pos_x:, pos_y:] = partial_integral def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r/rgb_max, old_g/rgb_max, old_b/rgb_max) def single_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() r, g, b = colorsys.hsv_to_rgb(h, s, random_state.uniform(0.2, 1)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. If mask is not None, width and height will be ignored and the shape of mask will be used instead. All white (#FF or #FFFFFF) entries will be considerd "masked out" while other entries will be free to draw on. [This changed in the most recent version!] scale : float (default=1) Scaling between computation and drawing. For large word-cloud images, using scale instead of larger canvas size is significantly faster, but might lead to a coarser fit for the words. min_font_size : int (default=4) Smallest font size to use. Will stop when there is no more room in this size. font_step : int (default=1) Step size for the font. font_step > 1 might speed up computation but give a worse fit. max_words : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. mode : string (default="RGB") Transparent background will be generated when mode is "RGBA" and background_color is None. relative_scaling : float (default=0) Importance of relative word frequencies for font-size. With relative_scaling=0, only word-ranks are considered. With relative_scaling=1, a word that is twice as frequent will have twice the size. If you want to consider the word frequencies and not only their rank, relative_scaling around .5 often looks good. Attributes ---------- ``words_``: list of tuples (string, float) Word tokens with associated frequency. ``layout_`` : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, try a lower canvas size, and set the scale parameter. The algorithm might give more weight to the ranking of the words than their actual frequencies, depending on the ``max_font_size`` and the scaling heuristic. """ def __init__(self, font_path=None, width=400, height=200, margin=2, ranks_only=None, prefer_horizontal=0.9, mask=None, scale=1, color_func=random_color_func, max_words=200, min_font_size=4, stopwords=None, random_state=None, background_color='black', max_font_size=None, font_step=1, mode="RGB", relative_scaling=0): if stopwords is None: stopwords = STOPWORDS if font_path is None: font_path = FONT_PATH self.font_path = font_path self.width = width self.height = height self.margin = margin self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.color_func = color_func self.max_words = max_words self.stopwords = stopwords self.min_font_size = min_font_size self.font_step = font_step if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color if max_font_size is None: max_font_size = height self.max_font_size = max_font_size self.mode = mode if relative_scaling < 0 or relative_scaling > 1: raise ValueError("relative_scaling needs to be between 0 and 1, got %f." % relative_scaling) self.relative_scaling = relative_scaling if ranks_only is not None: warnings.warn("ranks_only is deprecated and will be removed as" " it had no effect. Look into relative_scaling.", DeprecationWarning) def fit_words(self, frequencies): """Create a word_cloud from words and frequencies. Alias to generate_from_frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ return self.generate_from_frequencies(frequencies) def generate_from_frequencies(self, frequencies): """Create a word_cloud from words and frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ # make sure frequencies are sorted and normalized frequencies = sorted(frequencies, key=item1, reverse=True) frequencies = frequencies[:self.max_words] # largest entry will be 1 max_frequency = float(frequencies[0][1]) frequencies = [ (word, freq / max_frequency) for word, freq in frequencies ] self.words_ = frequencies if self.random_state is not None: random_state = self.random_state else: random_state = Random() if len(frequencies) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(frequencies)) if self.mask is not None: mask = self.mask width = mask.shape[1] height = mask.shape[0] if mask.dtype.kind == 'f': warnings.warn("mask image should be unsigned byte between 0 and" " 255. Got a float array") if mask.ndim == 2: boolean_mask = mask == 255 elif mask.ndim == 3: # if all channels are white, mask out boolean_mask = np.all(mask[:, :, :3] == 255, axis=-1) else: raise ValueError("Got mask of invalid shape: %s" % str(mask.shape)) else: boolean_mask = None height, width = self.height, self.width occupancy = IntegralOccupancyMap(height, width, boolean_mask) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] font_size = self.max_font_size last_freq = 1. # start drawing grey image for word, freq in frequencies: # select the font size rs = self.relative_scaling if rs != 0: font_size = int(round((rs * (freq / float(last_freq)) + (1 - rs)) * font_size)) while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) # get size of resulting text box_size = draw.textsize(word, font=transposed_font) # find possible places using integral image: result = occupancy.sample_position(box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= self.font_step if font_size < self.min_font_size: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white", font=transposed_font) positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size=font_size, position=(x, y), orientation=orientation, random_state=random_state, font_path=self.font_path)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + boolean_mask # recompute bottom right # the order of the cumsum's is important for speed ?! occupancy.update(img_array, x, y) last_freq = freq self.layout_ = list(zip(frequencies, font_sizes, positions, orientations, colors)) return self def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : list of tuples (string, float) Word tokens with associated frequency. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ d = {} flags = (re.UNICODE if sys.version < '3' and type(text) is unicode else 0) for word in re.findall(r"\w[\w']+", text, flags=flags): if word.isdigit(): continue word_lower = word.lower() if word_lower in self.stopwords: continue # Look in lowercase dict. try: d2 = d[word_lower] except KeyError: d2 = {} d[word_lower] = d2 # Look in any case dict. d2[word] = d2.get(word, 0) + 1 # merge plurals into the singular count (simple cases only) for key in list(d.keys()): if key.endswith('s'): key_singular = key[:-1] if key_singular in d: dict_plural = d[key] dict_singular = d[key_singular] for word, count in dict_plural.items(): singular = word[:-1] dict_singular[singular] = dict_singular.get(singular, 0) + count del d[key] d3 = {} for d2 in d.values(): # Get the most popular case. first = max(d2.items(), key=item1)[0] d3[first] = sum(d2.values()) return d3.items() def generate_from_text(self, text): """Generate wordcloud from text. Calls process_text and generate_from_frequencies. Returns ------- self """ words = self.process_text(text) self.generate_from_frequencies(words) return self def generate(self, text): """Generate wordcloud from text. Alias to generate_from_text. Calls process_text and generate_from_frequencies. Returns ------- self """ return self.generate_from_text(text) def _check_generated(self): """Check if ``layout_`` was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new(self.mode, (int(width * self.scale), int(height * self.scale)), self.background_color) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, int(font_size * self.scale)) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) pos = (int(position[1] * self.scale), int(position[0] * self.scale)) draw.text(pos, word, fill=color, font=transposed_font) return img def recolor(self, random_state=None, color_func=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: color_func = self.color_func self.layout_ = [(word_freq, font_size, position, orientation, color_func(word=word_freq[0], font_size=font_size, position=position, orientation=orientation, random_state=random_state, font_path=self.font_path)) for word_freq, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "gfarrenkopf/debateScraper", "path": "wordcloud/wordcloud.py", "copies": "1", "size": "19566", "license": "apache-2.0", "hash": 8954719847517168000, "line_mean": 34.9669117647, "line_max": 95, "alpha_frac": 0.5607686804, "autogenerated": false, "ratio": 4.259033521985198, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5319802202385198, "avg_score": null, "num_lines": null }
import warnings from random import Random import os import re import sys import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", "/usr/share/fonts/truetype/droid/DroidSansMono.ttf") STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) def random_color_func(word=None, font_size=None, position=None, orientation=None, font_path=None, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ if random_state is None: random_state = Random() return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. height : int (default=200) Height of the canvas. ranks_only : boolean (default=False) Only use the rank of the words, not the actual counts. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. If mask is not None, width and height will be ignored and the shape of mask will be used instead. All white (#FF or #FFFFFF) entries will be considerd "masked out" while other entries will be free to draw on. [This changed in the most recent version!] scale : float (default=1) Scaling between computation and drawing. For large word-cloud images, using scale instead of larger canvas size is significantly faster, but might lead to a coarser fit for the words. max_words : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. Attributes ---------- ``words_``: list of tuples (string, float) Word tokens with associated frequency. ``layout_`` : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, try a lower canvas size, and set the scale parameter. The algorithm might give more weight to the ranking of the words than their actual frequencies, depending on the ``max_font_size`` and the scaling heuristic. """ def __init__(self, font_path=None, width=400, height=200, margin=5, ranks_only=False, prefer_horizontal=0.9, mask=None, scale=1, color_func=random_color_func, max_words=200, stopwords=None, random_state=None, background_color='black', max_font_size=None): if stopwords is None: stopwords = STOPWORDS if font_path is None: font_path = FONT_PATH self.font_path = font_path self.width = width self.height = height self.margin = margin self.ranks_only = ranks_only self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.color_func = color_func self.max_words = max_words self.stopwords = stopwords if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color if max_font_size is None: max_font_size = height self.max_font_size = max_font_size def fit_words(self, frequencies): """Create a word_cloud from words and frequencies. Alias to generate_from_frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ return self.generate_from_frequencies(frequencies) def generate_from_frequencies(self, frequencies): """Create a word_cloud from words and frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ if self.random_state is not None: random_state = self.random_state else: random_state = Random() if len(frequencies) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(frequencies)) if self.mask is not None: mask = self.mask width = mask.shape[1] height = mask.shape[0] if mask.dtype.kind == 'f': warnings.warn("mask image should be unsigned byte between 0 and" " 255. Got a float array") if mask.ndim == 2: boolean_mask = mask == 255 elif mask.ndim == 3: # "OR" the color channels boolean_mask = np.sum(mask[:, :, :3] == 255, axis=-1) else: raise ValueError("Got mask of invalid shape: %s" % str(mask.shape)) # the order of the cumsum's is important for speed ?! integral = np.cumsum(np.cumsum(boolean_mask * 255, axis=1), axis=0).astype(np.uint32) else: height, width = self.height, self.width integral = np.zeros((height, width), dtype=np.uint32) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] font_size = self.max_font_size # start drawing grey image for word, count in frequencies: # alternative way to set the font size if not self.ranks_only: font_size = min(font_size, int(100 * np.log(count + 100))) while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) # get size of resulting text box_size = draw.textsize(word) # find possible places using integral image: result = query_integral_image(integral, box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= 1 if font_size == 0: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white") positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size=font_size, position=(x, y), orientation=orientation, random_state=random_state, font_path=self.font_path)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + boolean_mask # recompute bottom right # the order of the cumsum's is important for speed ?! partial_integral = np.cumsum(np.cumsum(img_array[x:, y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if x > 0: if y > 0: partial_integral += (integral[x - 1, y:] - integral[x - 1, y - 1]) else: partial_integral += integral[x - 1, y:] if y > 0: partial_integral += integral[x:, y - 1][:, np.newaxis] integral[x:, y:] = partial_integral self.layout_ = list(zip(frequencies, font_sizes, positions, orientations, colors)) return self def process_text(self, text): """Splits a long text into words, eliminates the stopwords. Parameters ---------- text : string The text to be processed. Returns ------- words : list of tuples (string, float) Word tokens with associated frequency. Notes ----- There are better ways to do word tokenization, but I don't want to include all those things. """ d = {} flags = (re.UNICODE if sys.version < '3' and type(text) is unicode else 0) for word in re.findall(r"\w[\w']*", text, flags=flags): if word.isdigit(): continue word_lower = word.lower() if word_lower in self.stopwords: continue # Look in lowercase dict. if word_lower in d: d2 = d[word_lower] else: d2 = {} d[word_lower] = d2 # Look in any case dict. d2[word] = d2.get(word, 0) + 1 d3 = {} for d2 in d.values(): # Get the most popular case. first = max(d2.items(), key=item1)[0] d3[first] = sum(d2.values()) # merge plurals into the singular count (simple cases only) for key in list(d3.keys()): if key.endswith('s'): key_singular = key[:-1] if key_singular in d3: val_plural = d3[key] val_singular = d3[key_singular] d3[key_singular] = val_singular + val_plural del d3[key] words = sorted(d3.items(), key=item1, reverse=True) words = words[:self.max_words] maximum = float(max(d3.values())) for i, (word, count) in enumerate(words): words[i] = word, count / maximum self.words_ = words return words def generate_from_text(self, text): """Generate wordcloud from text. Calls process_text and fit_words. Returns ------- self """ self.process_text(text) self.fit_words(self.words_) return self def generate(self, text): """Generate wordcloud from text. Alias to generate_from_text. Calls process_text and fit_words. Returns ------- self """ return self.generate_from_text(text) def _check_generated(self): """Check if ``layout_`` was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new("RGB", (width * self.scale, height * self.scale), self.background_color) draw = ImageDraw.Draw(img) for (word, count), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, font_size * self.scale) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) draw.setfont(transposed_font) pos = (position[1] * self.scale, position[0] * self.scale) draw.text(pos, word, fill=color) return img def recolor(self, random_state=None, color_func=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: color_func = self.color_func self.layout_ = [(word_freq, font_size, position, orientation, color_func(word=word_freq[0], font_size=font_size, position=position, orientation=orientation, random_state=random_state, font_path=self.font_path)) for word_freq, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "staticor/word_cloud", "path": "wordcloud/wordcloud.py", "copies": "1", "size": "15751", "license": "mit", "hash": 2479185870301739500, "line_mean": 33.3159041394, "line_max": 97, "alpha_frac": 0.5474573043, "autogenerated": false, "ratio": 4.340314136125654, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.5387771440425654, "avg_score": null, "num_lines": null }
import warnings from random import Random import os import re import sys import colorsys import numpy as np from operator import itemgetter from PIL import Image from PIL import ImageColor from PIL import ImageDraw from PIL import ImageFont from .query_integral_image import query_integral_image item1 = itemgetter(1) FONT_PATH = os.environ.get("FONT_PATH", os.path.join(os.path.dirname(__file__), "DroidSansMono.ttf")) STOPWORDS = set([x.strip() for x in open(os.path.join(os.path.dirname(__file__), 'stopwords')).read().split('\n')]) class IntegralOccupancyMap(object): def __init__(self, height, width, mask): self.height = height self.width = width if mask is not None: # the order of the cumsum's is important for speed ?! self.integral = np.cumsum(np.cumsum(255 * mask, axis=1), axis=0).astype(np.uint32) else: self.integral = np.zeros((height, width), dtype=np.uint32) def sample_position(self, size_x, size_y, random_state): return query_integral_image(self.integral, size_x, size_y, random_state) def update(self, img_array, pos_x, pos_y): partial_integral = np.cumsum(np.cumsum(img_array[pos_x:, pos_y:], axis=1), axis=0) # paste recomputed part into old image # if x or y is zero it is a bit annoying if pos_x > 0: if pos_y > 0: partial_integral += (self.integral[pos_x - 1, pos_y:] - self.integral[pos_x - 1, pos_y - 1]) else: partial_integral += self.integral[pos_x - 1, pos_y:] if pos_y > 0: partial_integral += self.integral[pos_x:, pos_y - 1][:, np.newaxis] self.integral[pos_x:, pos_y:] = partial_integral def random_color_func(word=None, font_size=None, position=None, rank=None, hue=0, orientation=None, font_path=None, random_state=None): """Random hue color generation. Default coloring method. This just picks a random hue with value 80% and lumination 50%. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ # print("color_func: ",word,rank,"\n") if random_state is None: random_state = Random() # return "hsl(%d, 80%%, 50%%)" % random_state.randint(0, 255) # print("hsl(0, %d%%, 50%%)\n" % (30+60*rank)) return "hsl(%d, 80%%, %d%%)" % (hue,(30+60*rank)) def get_single_color_func(color): """Create a color function which returns a single hue and saturation with. different values (HSV). Accepted values are color strings as usable by PIL/Pillow. >>> color_func1 = get_single_color_func('deepskyblue') >>> color_func2 = get_single_color_func('#00b4d2') """ old_r, old_g, old_b = ImageColor.getrgb(color) rgb_max = 255. h, s, v = colorsys.rgb_to_hsv(old_r/rgb_max, old_g/rgb_max, old_b/rgb_max) def single_color_func(word=None, font_size=None, position=None, rank=None, orientation=None, font_path=None, random_state=None): """Random color generation. Additional coloring method. It picks a random value with hue and saturation based on the color given to the generating function. Parameters ---------- word, font_size, position, orientation : ignored. random_state : random.Random object or None, (default=None) If a random object is given, this is used for generating random numbers. """ r, g, b = colorsys.hsv_to_rgb(h, s, (0.30 + 0.60*rank)) return 'rgb({:.0f}, {:.0f}, {:.0f})'.format(r * rgb_max, g * rgb_max, b * rgb_max) return single_color_func class WordCloud(object): """Word cloud object for generating and drawing. Parameters ---------- font_path : string Font path to the font that will be used (OTF or TTF). Defaults to DroidSansMono path on a Linux machine. If you are on another OS or don't have this font, you need to adjust this path. width : int (default=400) Width of the canvas. hue : int (default=0) Hue for standard color map. height : int (default=200) Height of the canvas. prefer_horizontal : float (default=0.90) The ratio of times to try horizontal fitting as opposed to vertical. mask : nd-array or None (default=None) If not None, gives a binary mask on where to draw words. If mask is not None, width and height will be ignored and the shape of mask will be used instead. All white (#FF or #FFFFFF) entries will be considerd "masked out" while other entries will be free to draw on. [This changed in the most recent version!] scale : float (default=1) Scaling between computation and drawing. For large word-cloud images, using scale instead of larger canvas size is significantly faster, but might lead to a coarser fit for the words. min_font_size : int (default=4) Smallest font size to use. Will stop when there is no more room in this size. font_step : int (default=1) Step size for the font. font_step > 1 might speed up computation but give a worse fit. max_words : number (default=200) The maximum number of words. stopwords : set of strings The words that will be eliminated. background_color : color value (default="black") Background color for the word cloud image. max_font_size : int or None (default=None) Maximum font size for the largest word. If None, height of the image is used. mode : string (default="RGB") Transparent background will be generated when mode is "RGBA" and background_color is None. relative_scaling : float (default=0) Importance of relative word frequencies for font-size. With relative_scaling=0, only word-ranks are considered. With relative_scaling=1, a word that is twice as frequent will have twice the size. If you want to consider the word frequencies and not only their rank, relative_scaling around .5 often looks good. Attributes ---------- ``words_``: list of tuples (string, float) Word tokens with associated frequency. ``layout_`` : list of tuples (string, int, (int, int), int, color)) Encodes the fitted word cloud. Encodes for each word the string, font size, position, orientation and color. Notes ----- Larger canvases with make the code significantly slower. If you need a large word cloud, try a lower canvas size, and set the scale parameter. The algorithm might give more weight to the ranking of the words than their actual frequencies, depending on the ``max_font_size`` and the scaling heuristic. """ def __init__(self, font_path=None, width=400, hue=20, height=200, margin=2, ranks_only=None, prefer_horizontal=0.9, mask=None, scale=1, color_func=random_color_func, max_words=200, min_font_size=4, stopwords=None, random_state=None, background_color='black', max_font_size=None, font_step=1, mode="RGB", relative_scaling=0): if stopwords is None: stopwords = STOPWORDS if font_path is None: font_path = FONT_PATH if background_color.startswith('hue='): hue = background_color hue = re.sub("hue=","",hue) hue = int(hue) background_color = re.sub("hue=[0-9]+","",background_color) background_color = re.sub("^,","",background_color) if background_color is '': background_color = 'black' self.font_path = font_path self.width = width self.height = height self.margin = margin self.prefer_horizontal = prefer_horizontal self.mask = mask self.scale = scale self.hue = hue self.color_func = color_func self.max_words = max_words self.stopwords = stopwords self.min_font_size = min_font_size self.font_step = font_step if isinstance(random_state, int): random_state = Random(random_state) self.random_state = random_state self.background_color = background_color if max_font_size is None: max_font_size = height self.max_font_size = max_font_size self.mode = mode if relative_scaling < 0 or relative_scaling > 1: raise ValueError("relative_scaling needs to be between 0 and 1, got %f." % relative_scaling) self.relative_scaling = relative_scaling if ranks_only is not None: warnings.warn("ranks_only is deprecated and will be removed as" " it had no effect. Look into relative_scaling.", DeprecationWarning) def fit_words(self, frequencies): """Create a word_cloud from words and frequencies. Alias to generate_from_frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ return self.generate_from_frequencies(frequencies) def generate_from_frequencies(self, frequencies): """Create a word_cloud from words and frequencies. Parameters ---------- frequencies : array of tuples A tuple contains the word and its frequency. Returns ------- self """ # make sure frequencies are sorted and normalized frequencies = sorted(frequencies, key=item1, reverse=True) frequencies = frequencies[:self.max_words] # largest entry will be 1 max_frequency = float(frequencies[0][1]) # for word, freq, rank in frequencies: # print("I: ", word, freq, rank) frequencies = [ (word, (float(freq)/max_frequency), float(rank)) for word, freq, rank in frequencies ] self.words_ = frequencies if self.random_state is not None: random_state = self.random_state else: random_state = Random() if len(frequencies) <= 0: print("We need at least 1 word to plot a word cloud, got %d." % len(frequencies)) if self.mask is not None: mask = self.mask width = mask.shape[1] height = mask.shape[0] if mask.dtype.kind == 'f': warnings.warn("mask image should be unsigned byte between 0 and" " 255. Got a float array") if mask.ndim == 2: boolean_mask = mask == 255 elif mask.ndim == 3: # if all channels are white, mask out boolean_mask = np.all(mask[:, :, :3] == 255, axis=-1) else: raise ValueError("Got mask of invalid shape: %s" % str(mask.shape)) else: boolean_mask = None height, width = self.height, self.width occupancy = IntegralOccupancyMap(height, width, boolean_mask) # create image img_grey = Image.new("L", (width, height)) draw = ImageDraw.Draw(img_grey) img_array = np.asarray(img_grey) font_sizes, positions, orientations, colors = [], [], [], [] font_size = self.max_font_size last_freq = 1. # start drawing grey image for word, freq, rank in frequencies: # select the font size rs = self.relative_scaling if rs != 0: font_size = int(round((rs * (freq / float(last_freq)) + (1 - rs)) * font_size)) while True: # try to find a position font = ImageFont.truetype(self.font_path, font_size) # transpose font optionally if random_state.random() < self.prefer_horizontal: orientation = None else: orientation = Image.ROTATE_90 transposed_font = ImageFont.TransposedFont(font, orientation=orientation) # get size of resulting text box_size = draw.textsize(word, font=transposed_font) # find possible places using integral image: result = occupancy.sample_position(box_size[1] + self.margin, box_size[0] + self.margin, random_state) if result is not None or font_size == 0: break # if we didn't find a place, make font smaller font_size -= self.font_step if font_size < self.min_font_size: # we were unable to draw any more break x, y = np.array(result) + self.margin // 2 # actually draw the text draw.text((y, x), word, fill="white", font=transposed_font) positions.append((x, y)) orientations.append(orientation) font_sizes.append(font_size) colors.append(self.color_func(word, font_size=font_size, position=(x, y), orientation=orientation, random_state=random_state, rank=rank, hue=self.hue, font_path=self.font_path)) # recompute integral image if self.mask is None: img_array = np.asarray(img_grey) else: img_array = np.asarray(img_grey) + boolean_mask # recompute bottom right # the order of the cumsum's is important for speed ?! occupancy.update(img_array, x, y) last_freq = freq self.layout_ = list(zip(frequencies, font_sizes, positions, orientations, colors)) return self def process_text_data(self, text): """Text input is triples (word,freq,rank) in text formatted as 'W1,F1,R1 W2,F2,R2' Parameters ---------- text : string The text to be processed. Returns ------- words : list of tuples (string, float, float) Word tokens with associated frequency, rank. """ words = text.split() freqs = [] for word in words: aa = word.split(",") freqs.append((aa[0],float(aa[1]),float(aa[2]))) return freqs def generate_from_text(self, text): """Generate wordcloud from text. Calls process_text and generate_from_frequencies. Returns ------- self """ words = self.process_text_data(text) self.generate_from_frequencies(words) return self def generate(self, text): """Generate wordcloud from text. Alias to generate_from_text. Calls generate_from_frequencies. Returns ------- self """ return self.generate_from_text(text) def _check_generated(self): """Check if ``layout_`` was computed, otherwise raise error.""" if not hasattr(self, "layout_"): raise ValueError("WordCloud has not been calculated, call generate first.") def to_image(self): self._check_generated() if self.mask is not None: width = self.mask.shape[1] height = self.mask.shape[0] else: height, width = self.height, self.width img = Image.new(self.mode, (int(width * self.scale), int(height * self.scale)), self.background_color) draw = ImageDraw.Draw(img) for (word, count, rank), font_size, position, orientation, color in self.layout_: font = ImageFont.truetype(self.font_path, int(font_size * self.scale)) transposed_font = ImageFont.TransposedFont(font, orientation=orientation) pos = (int(position[1] * self.scale), int(position[0] * self.scale)) draw.text(pos, word, fill=color, font=transposed_font) return img def recolor(self, random_state=None, color_func=None): """Recolor existing layout. Applying a new coloring is much faster than generating the whole wordcloud. Parameters ---------- random_state : RandomState, int, or None, default=None If not None, a fixed random state is used. If an int is given, this is used as seed for a random.Random state. color_func : function or None, default=None Function to generate new color from word count, font size, position and orientation. If None, self.color_func is used. Returns ------- self """ if isinstance(random_state, int): random_state = Random(random_state) self._check_generated() if color_func is None: color_func = self.color_func self.layout_ = [(word_freq, font_size, position, orientation, color_func(word=word_freq[0], font_size=font_size, position=position, orientation=orientation, random_state=random_state, rank=word_freq[2], font_path=self.font_path)) for word_freq, font_size, position, orientation, _ in self.layout_] return self def to_file(self, filename): """Export to image file. Parameters ---------- filename : string Location to write to. Returns ------- self """ img = self.to_image() img.save(filename) return self def to_array(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return np.array(self.to_image()) def __array__(self): """Convert to numpy array. Returns ------- image : nd-array size (width, height, 3) Word cloud image as numpy matrix. """ return self.to_array() def to_html(self): raise NotImplementedError("FIXME!!!")
{ "repo_name": "wbuntine/topic-models", "path": "HCA/scripts/wordcloud.py", "copies": "1", "size": "19288", "license": "mpl-2.0", "hash": -1363906104636043000, "line_mean": 35.6692015209, "line_max": 110, "alpha_frac": 0.56268146, "autogenerated": false, "ratio": 4.196692776327241, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.001884815213561661, "num_lines": 526 }
__author__ = 'Andreas Krohn (andreas.krohn@haw-hamburg.de)' import logging import pycares import select import traceback class PycaDns(object): """ >>> w = PycaDns() >>> w.ptr('8.8.8.8') >>> w.query_a('heise.de') >>> w.query_aaaa('heise.de') >>> w.query_a('time1.google.com') >>> w.query_aaaa('time1.google.com') >>> w.run() >>> print(sorted(w.results())) [('8.8.8.8', ['google-public-dns-a.google.com']), ('heise.de',\ ['193.99.144.80', '2a02:2e0:3fe:1001:302::']), ('time1.google.com',\ ['2001:4860:4802:32::f', '216.239.32.15'])] >>> w.query_ns('heise.de') >>> w.run() >>> print(sorted(w.results(True))) [('8.8.8.8', ['google-public-dns-a.google.com']), ('heise.de',\ ['193.99.144.80', '2a02:2e0:3fe:1001:302::', 'ns.heise.de',\ 'ns.plusline.de', 'ns.pop-hannover.de', 'ns.s.plusline.de',\ 'ns2.pop-hannover.net']), ('time1.google.com',\ ['2001:4860:4802:32::f', '216.239.32.15'])] >>> w.results() [] """ # TODO: what about this? # Traceback (most recent call last): # File "./src/pycadns.py", line 109, in _poll # self._channel.process_fd(read_fd, write_fd) # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe4 in # ..position 14: invalid continuation byte ARES_ENODATA = 1 ARES_EFORMERR = 2 ARES_ESERVFAIL = 3 ARES_ENOTFOUND = 4 ARES_ENOTIMP = 5 ARES_EREFUSED = 6 ARES_EBADQUERY = 7 ARES_EBADNAME = 8 ARES_EBADFAMILY = 9 ARES_EBADRESP = 10 ARES_ECONNREFUSED = 11 ARES_ETIMEOUT = 12 ARES_EOF = 13 ARES_EFILE = 14 ARES_ENOMEM = 15 ARES_EDESTRUCTION = 16 ARES_EBADSTR = 17 ARES_ECANCELLED = 24 def __init__(self, timeout: float=5, tries: int=4): self._channel = pycares.Channel(timeout=timeout, tries=tries) self._fd_map = {} self._queries = [] self._done = [] self._results = {} self._errors = set() def run(self): chan = self._channel while True: try: read_fds, write_fds = chan.getsock() if not read_fds and not write_fds: break timeout = chan.timeout() if not timeout: chan.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD) continue rlist, wlist, xlist = select.select(read_fds, write_fds, [], timeout) for fd in rlist: chan.process_fd(fd, pycares.ARES_SOCKET_BAD) for fd in wlist: chan.process_fd(pycares.ARES_SOCKET_BAD, fd) except: logging.error('Failure in pycares.run()\n%s', traceback.format_exc()) def ptr(self, ipaddress: str, callback=None): self._query(pycares.reverse_address(ipaddress), ipaddress, pycares.QUERY_TYPE_PTR, 'PTR%', callback) def ptrs(self, ipaddresses: list, callback=None): for i in ipaddresses: self.ptr(i, callback) def query_a(self, name: str, callback=None): self._query(name + '.' if name[-1] != '.' else name, name, pycares.QUERY_TYPE_A, 'A%', callback) def query_aaaa(self, name: str, callback=None): self._query(name + '.' if name[-1] != '.' else name, name, pycares.QUERY_TYPE_AAAA, 'AAAA%', callback) def query_ns(self, name: str, callback=None): self._query(name, name, pycares.QUERY_TYPE_NS, 'NS%', callback) def _query(self, name: str, original_name: str, query_type: int, query_prefix: str, callback=None): key = query_prefix + original_name if key in self._done: if callback: callback(original_name, self._results[original_name], None) return if key in self._queries: return self._queries.append(key) def context_callback(result, error): if not error and result: if original_name not in self._results: self._results[original_name] = sorted(result) else: self._results[original_name] = sorted( self._results[original_name] + result) self._done.append(key) self._queries.remove(key) if error: self._errors.add(error) if callback: callback(original_name, result, error) try: self._channel.query(name, query_type, context_callback) except: logging.error('Query: %s, Type: %d\n%s', name, query_type, traceback.format_exc()) raise def results(self, clear: bool=False) -> list: result = list(self._results.items()) if clear: self._results.clear() return result def errors(self, clear: bool=False) -> list: result = self._errors.copy() if clear: self._errors.clear() return result if __name__ == '__main__': import doctest doctest.testmod()
{ "repo_name": "hamburger1984/pycadns", "path": "src/pycadns.py", "copies": "1", "size": "5232", "license": "mit", "hash": 3015746169699168000, "line_mean": 32.3248407643, "line_max": 76, "alpha_frac": 0.5303899083, "autogenerated": false, "ratio": 3.4694960212201593, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9491454563343116, "avg_score": 0.0016862732354088132, "num_lines": 157 }
__author__ = 'Andreas M. Wahl' import logging from pymongo import MongoClient import yaml import configurator.util.util as util import subprocess logging.basicConfig(level=logging.WARN) LOG = logging.getLogger(__name__) class PersistenceManager: def __init__(self, config): """ :param config: dictionary containing database configuration information """ self.config = config self.db = None # file persistence: def persist_to_file(self, results, file_path): """ Persist simulation results to a file in YAML format. :param results: simulation results to be persisted :param file_path: file to which the simulation results are written """ stream = file(file_path, 'w') yaml.dump(util.sanitize_results(results), stream) def retrieve_from_file(self, file_path): """ Retrieve simulation results from a file in YAML format. :param file_path: file from which the simulation results are retrieved :return: retrieved simulation results """ return yaml.load(file(file_path, "r")) # database persistence: def start_db(self): command = "/home/amw/uni/m2etis/m2etis/dependencies/persistence/mongodb/bin/mongod --dbpath /home/amw/uni/m2etis/m2etis/dependencies/persistence/data" args = command.split(" ") mongod = subprocess.Popen(args,stdout=subprocess.PIPE) for line in iter(mongod.stdout.readline, ''): if "[initandlisten] waiting for connections on port" in line: break try: client = MongoClient() self.db = client[self.config["db_name"]] except Exception as ex: LOG.warning(str(ex)) self.db = None def shutdown_db(self): subprocess.Popen(["killall", "mongod"]) def initialise_database_client(self): try: client = MongoClient() self.db = client[self.config["db_name"]] except Exception as ex: LOG.warning(str(ex)) self.db = None def persist_to_database(self, collection, results): """ Persist simulation results to the database configured in the class constructor. :param results: retrieved simulation results """ if not self.db is None: self.db[collection].insert(results)#util.sanitize_results(results)) else: LOG.warning("No database connection specified. Aborting current database operation.") def retrieve_from_database(self, collection, query, projection, sort): """ Retrieve simulation results from the database configured in the class constructor. :param query: dictionary containing keys to be used for querying the database for suitable documents :param projection: dictionary with simulation result fields to be included in the query output. field value of "1" means include, "0" means omit the field :param sort: list of dictionaries containing fields to determine the order of the database output. each dictionary contains only one field. value of "1" means sort ascending, "-1" means sort descending :return: list of retrieved simulation results """ if not self.db is None: return self.db[collection].find(query, projection).sort(sort) else: LOG.warning("No database connection specified. Aborting current database operation.") return [] def clear_database(self, collection): """ Delete all entries from the database configured in the class constructor. """ if not self.db is None: self.db[collection].remove() else: LOG.warning("No database connection specified. Aborting current database operation.") # TODO: currently intensionally not working def retrieve_from_database_corresponding_results(self, tag, x_param, y_param): """ Retrieve two-dimensional corresponding and sorted simulation results from the database configured in the class constructor. :param tag: tag of the simulation results to be retrieved :param x_param: name of the x parameter :param y_param: name of the y parameter :return: list of two lists containing the x and y values of the retrieved simulation results """ if not self.db is None: cursor = self.retrieve_from_database(self.config["collection_name"], {"tag": tag}, {"_id": 0, x_param: 1, y_param: 1}, [(x_param, 1)]) x_list = [] y_list = [] for doc in cursor: x_category = x_param.split('.')[0] x_item = x_param.split('.')[1] x_list.append(doc[x_category][x_item]) y_category = y_param.split('.')[0] y_item = y_param.split('.')[1] y_list.append(doc[y_category][y_item]) return x_list, y_list else: LOG.warning("No database connection specified. Aborting current database operation.") return [], []
{ "repo_name": "ClockworkOrigins/m2etis", "path": "configurator/configurator/persistence/PersistenceManager.py", "copies": "1", "size": "5140", "license": "apache-2.0", "hash": 6067373990857754000, "line_mean": 38.8527131783, "line_max": 209, "alpha_frac": 0.6278210117, "autogenerated": false, "ratio": 4.454072790294627, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.0026660873444305923, "num_lines": 129 }
__author__ = 'Andreas M. Wahl' import matplotlib.pyplot as plt import configurator.util.util as util from matplotlib import cm from mpl_toolkits.mplot3d import * import numpy as np class Plotter: def __init__(self, persistence=None): self.persistence = persistence def plot2d_from_memory(self, results, x_param, y_param, output_filename, title="", x_label="", y_label=""): """ Plot two-dimensional simulation results from a python data structure in memory. :param results: simulation results to be plotted :param x_param: parameter of the simulation results to be used on the x axis :param y_param: parameter of the simulation results to be used on the y axis :param output_filename: file to write the plot to :param title: title of the plot :param x_label: name of the plot x axis :param y_label: name of the y axis """ params = self._get_corresponding_results(util.sanitize_results(results), x_param, y_param) self._plot2d(util.strip_unit(params[0]), util.strip_unit(params[1]), output_filename, title, x_label, y_label) def plot2d_from_file(self, input_filename, x_param, y_param, output_filename, title="", x_label="", y_label="", c_label=""): """ Plot two-dimensional simulation results from a file in YAML format. :param input_filename: file containing the simulation results to be plotted :param x_param: parameter of the simulation results to be used on the x axis :param y_param: parameter of the simulation results to be used on the y axis :param output_filename: file to write the plot to :param title: title of the plot :param x_label: name of the plot x axis :param y_label: name of the plot y axis """ results = self.persistence.retrieve_from_file(input_filename) params = self._get_corresponding_results(results, x_param, y_param) self._plot2d(util.strip_unit(params[0]), util.strip_unit(params[1]), output_filename, title, x_label, y_label, c_label) def plot2d_from_database(self, tag, x_param, y_param, filter, output_filename, title="", x_label="", y_label=""): """ Plot two-dimensional simulation results from a database. :param tag: custom tag of the simulation results to be plotted :param x_param: parameter of the simulation results to be used on the x axis :param y_param: parameter of the simulation results to be used on the y axis :param output_filename: file to write the plot to :param title: title of the plot :param x_label: name of the plot x axis :param y_label: name of the plot y axis """ results = self.persistence.retrieve_from_database(self.persistence.config["collection_name"] , dict({"tag": tag}.items() + filter.items()) , {"_id": 0, x_param: 1, y_param: 1}, [(x_param, 1)]) xs, ys = self._get_corresponding_results(results, x_param, y_param) print xs print ys self._plot2d(util.strip_unit(xs), util.strip_unit(ys), output_filename, title, x_label, y_label) def plot3d_from_database(self, tag, x_param, y_param, z_param, filter, output_filename, title="", x_label="", y_label="", z_label=""): results = self.persistence.retrieve_from_database( self.persistence.config["collection_name"] , dict({"tag": tag}.items() + filter.items()) , {"_id": 0, x_param: 1, y_param: 1, z_param:1}, [(x_param, 1), (y_param, 1)]) xs, ys, zs = self._get_corresponding_results_3d(results, x_param, y_param, z_param) self._plot3d(util.strip_unit(xs), util.strip_unit(ys), util.strip_unit(zs), output_filename, title, x_label, y_label, z_label) def _plot3d(self, xs, ys, zs, filename, title="", x_label="", y_label="", z_label="", c_label=""): fig = plt.figure() ax = fig.gca(projection='3d') # to work in 3d plt.hold = 'True' xs, ys = np.meshgrid(xs, ys) ax.plot_surface(xs, ys, zs, cmap=cm.hot) ax.scatter(xs, ys, zs) ax.set_xlabel(x_label) ax.set_ylabel(y_label) ax.set_zlabel(z_label) #plt.close() plt.savefig(filename) def _plot2d(self, xs, ys, filename, title="", x_label="", y_label="", c_label=""): """ Plot two-dimensional data to a file. :param xs: list of x values :param ys: list of y values :param filename: file to write the plot to :param title: title of the plot :param x_label: name of the plot x axis :param y_label: name of the plot y axis """ plt.plot(xs, ys, label=c_label) plt.xlabel(x_label) plt.ylabel(y_label) plt.suptitle(title) plt.savefig(filename) #plt.legend() plt.close() plt.show() def _close(self): plt.close() def _get_corresponding_results(self, results, x_param, y_param): resultList = [[] for i in range(0, 2)] for result in results: for position, parameter in enumerate([x_param, y_param]): category = parameter.split('.')[0] item = parameter.split('.')[1] resultList[position].append(float(util.strip_unit([result[category][item]])[0])) #print resultList return zip(*sorted(zip(*resultList))) def _get_corresponding_results_3d(self, results, x_param, y_param, z_param): resultList = [[] for i in range(0, 3)] for result in results: for position, parameter in enumerate([x_param, y_param, z_param]): #print position, parameter category = parameter.split('.')[0] item = parameter.split('.')[1] resultList[position].append(float(util.strip_unit([result[category][item]])[0])) return zip(*sorted(zip(*resultList)))
{ "repo_name": "ClockworkOrigins/m2etis", "path": "configurator/configurator/visualization/Plotter.py", "copies": "1", "size": "6177", "license": "apache-2.0", "hash": 5376084422401489000, "line_mean": 41.8958333333, "line_max": 138, "alpha_frac": 0.5918730775, "autogenerated": false, "ratio": 3.7572992700729926, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4849172347572992, "avg_score": null, "num_lines": null }
__author__ = 'andreasveit' __version__ = '1.1' # Interface for accessing the COCO-Text dataset. # COCO-Text is a large dataset designed for text detection and recognition. # This is a Python API that assists in loading, parsing and visualizing the # annotations. The format of the COCO-Text annotations is also described on # the project website http://vision.cornell.edu/se3/coco-text/. In addition to this API, please download both # the COCO images and annotations. # This dataset is based on Microsoft COCO. Please visit http://mscoco.org/ # for more information on COCO, including for the image data, object annotatins # and caption annotations. # An alternative to using the API is to load the annotations directly # into Python dictionary: # with open(annotation_filename) as json_file: # coco_text = json.load(json_file) # Using the API provides additional utility functions. # The following API functions are defined: # COCO_Text - COCO-Text api class that loads COCO annotations and prepare data structures. # getAnnIds - Get ann ids that satisfy given filter conditions. # getImgIds - Get img ids that satisfy given filter conditions. # loadAnns - Load anns with the specified ids. # loadImgs - Load imgs with the specified ids. # showAnns - Display the specified annotations. # loadRes - Load algorithm results and create API for accessing them. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image. # COCO-Text Toolbox. Version 1.1 # Data and paper available at: http://vision.cornell.edu/se3/coco-text/ # Code based on Microsoft COCO Toolbox Version 1.0 by Piotr Dollar and Tsung-Yi Lin # extended and adapted by Andreas Veit, 2016. # Licensed under the Simplified BSD License [see bsd.txt] import json import datetime import matplotlib.pyplot as plt from matplotlib.collections import PatchCollection from matplotlib.patches import Rectangle import numpy as np import copy import os class COCO_Text: def __init__(self, annotation_file=None): """ Constructor of COCO-Text helper class for reading and visualizing annotations. :param annotation_file (str): location of annotation file :return: """ # load dataset self.dataset = {} self.anns = {} self.imgToAnns = {} self.catToImgs = {} self.imgs = {} self.cats = {} self.val = [] self.train = [] if not annotation_file == None: assert os.path.isfile(annotation_file), "file does not exist" print('loading annotations into memory...') time_t = datetime.datetime.utcnow() dataset = json.load(open(annotation_file, 'r')) print(datetime.datetime.utcnow() - time_t) self.dataset = dataset self.createIndex() def createIndex(self): # create index print('creating index...') self.imgToAnns = {int(cocoid): self.dataset['imgToAnns'][cocoid] for cocoid in self.dataset['imgToAnns']} self.imgs = {int(cocoid): self.dataset['imgs'][cocoid] for cocoid in self.dataset['imgs']} self.anns = {int(annid): self.dataset['anns'][annid] for annid in self.dataset['anns']} self.cats = self.dataset['cats'] self.val = [int(cocoid) for cocoid in self.dataset['imgs'] if self.dataset['imgs'][cocoid]['set'] == 'val'] self.train = [int(cocoid) for cocoid in self.dataset['imgs'] if self.dataset['imgs'][cocoid]['set'] == 'train'] print('index created!') def info(self): """ Print information about the annotation file. :return: """ for key, value in self.dataset['info'].items(): print('%s: %s'%(key, value)) def filtering(self, filterDict, criteria): return [key for key in filterDict if all(criterion(filterDict[key]) for criterion in criteria)] def getAnnByCat(self, properties): """ Get ann ids that satisfy given properties :param properties (list of tuples of the form [(category type, category)] e.g., [('readability','readable')] : get anns for given categories - anns have to satisfy all given property tuples :return: ids (int array) : integer array of ann ids """ return self.filtering(self.anns, [lambda d, x=a, y=b:d[x] == y for (a,b) in properties]) def getAnnIds(self, imgIds=[], catIds=[], areaRng=[]): """ Get ann ids that satisfy given filter conditions. default skips that filter :param imgIds (int array) : get anns for given imgs catIds (list of tuples of the form [(category type, category)] e.g., [('readability','readable')] : get anns for given cats areaRng (float array) : get anns for given area range (e.g. [0 inf]) :return: ids (int array) : integer array of ann ids """ imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == len(areaRng) == 0: anns = self.anns.keys() else: if not len(imgIds) == 0: anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[]) else: anns = self.anns.keys() anns = anns if len(catIds) == 0 else list(set(anns).intersection(set(self.getAnnByCat(catIds)))) anns = anns if len(areaRng) == 0 else [ann for ann in anns if self.anns[ann]['area'] > areaRng[0] and self.anns[ann]['area'] < areaRng[1]] return anns def getImgIds(self, imgIds=[], catIds=[]): ''' Get img ids that satisfy given filter conditions. :param imgIds (int array) : get imgs for given ids :param catIds (int array) : get imgs with all given cats :return: ids (int array) : integer array of img ids ''' imgIds = imgIds if type(imgIds) == list else [imgIds] catIds = catIds if type(catIds) == list else [catIds] if len(imgIds) == len(catIds) == 0: ids = self.imgs.keys() else: ids = set(imgIds) if not len(catIds) == 0: ids = ids.intersection(set([self.anns[annid]['image_id'] for annid in self.getAnnByCat(catIds)])) return list(ids) def loadAnns(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying anns :return: anns (object array) : loaded ann objects """ if type(ids) == list: return [self.anns[id] for id in ids] elif type(ids) == int: return [self.anns[ids]] def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]] def showAnns(self, anns): """ Display the specified annotations. :param anns (array of object): annotations to display :return: None """ if len(anns) == 0: return 0 ax = plt.gca() rectangles = [] color = [] for ann in anns: c = np.random.random((1, 3)).tolist()[0] left, top, width, height = ann['bbox'] rectangles.append(Rectangle([left,top],width,height,alpha=0.4)) color.append(c) if 'utf8_string' in ann.keys(): ax.annotate(ann['utf8_string'],(left,top-4),color=c) p = PatchCollection(rectangles, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4) ax.add_collection(p) def loadRes(self, resFile): """ Load result file and return a result api object. :param resFile (str) : file name of result file :return: res (obj) : result api object """ res = COCO_Text() res.dataset['imgs'] = [img for img in self.dataset['imgs']] print('Loading and preparing results... ') time_t = datetime.datetime.utcnow() if type(resFile) == str: anns = json.load(open(resFile)) else: anns = resFile assert type(anns) == list, 'results in not an array of objects' annsImgIds = [int(ann['image_id']) for ann in anns] if set(annsImgIds) != (set(annsImgIds) & set(self.getImgIds())): print('Results do not correspond to current coco set') print('skipping ', str(len(set(annsImgIds)) - len(set(annsImgIds) & set(self.getImgIds()))), ' images') annsImgIds = list(set(annsImgIds) & set(self.getImgIds())) res.imgToAnns = {cocoid : [] for cocoid in annsImgIds} res.imgs = {cocoid: self.imgs[cocoid] for cocoid in annsImgIds} assert anns[0]['bbox'] != [], 'results have incorrect format' for id, ann in enumerate(anns): if ann['image_id'] not in annsImgIds: continue bb = ann['bbox'] ann['area'] = bb[2]*bb[3] ann['id'] = id res.anns[id] = ann res.imgToAnns[ann['image_id']].append(id) print('DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds())) return res
{ "repo_name": "NehaTelhan/CompVisionFinalProj", "path": "coco_text.py", "copies": "1", "size": "9765", "license": "mit", "hash": 7490864213190402000, "line_mean": 41.9864864865, "line_max": 150, "alpha_frac": 0.5853558628, "autogenerated": false, "ratio": 3.675197591268348, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9743258583221148, "avg_score": 0.0034589741694398077, "num_lines": 222 }
__author__ = 'andreasveit' __version__ = '1.3' # Interface for evaluating with the COCO-Text dataset. # COCO-Text is a large dataset designed for text detection and recognition. # This is a Python API that assists in evaluating text detection and recognition results # on COCO-Text. The format of the COCO-Text annotations is described on # the project website http://vision.cornell.edu/se3/coco-text/. In addition to this evaluation API, please download # the COCO-Text tool API, both the COCO images and annotations. # This dataset is based on Microsoft COCO. Please visit http://mscoco.org/ # for more information on COCO, including for the image data, object annotatins # and caption annotations. # The following functions are defined: # getDetections - Compute TP, FN and FP # evaluateAttribute - Evaluates accuracy for classifying text attributes # evaluateTranscription - Evaluates accuracy of transcriptions # area, intersect, iou_score, decode, inter - small helper functions # printDetailedResults - Prints detailed results as reported in COCO-Text paper # COCO-Text Evaluation Toolbox. Version 1.3 # Data, Data API and paper available at: http://vision.cornell.edu/se3/coco-text/ # Code written by Andreas Veit, 2016. # Licensed under the Simplified BSD License [see bsd.txt] import editdistance import copy import re # Compute detections def getDetections(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5): """ A box is a match iff the intersection of union score is >= 0.5. Params ------ Input dicts have the format of annotation dictionaries """ #parameters detectRes = {} # results are lists of dicts {gt_id: xxx, eval_id: yyy} detectRes['true_positives'] = [] detectRes['false_negatives'] = [] detectRes['false_positives'] = [] # the default is set to evaluate on the validation set if imgIds == None: imgIds = groundtruth.val imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys()) for cocoid in imgIds: gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else [] eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else [] for gt_box_id in gt_bboxes: gt_box = groundtruth.anns[gt_box_id]['bbox'] max_iou = 0.0 match = None for eval_box_id in eval_bboxes: eval_box = evaluation.anns[eval_box_id]['bbox'] iou = iou_score(gt_box,eval_box) if iou >= detection_threshold and iou > max_iou: max_iou = iou match = eval_box_id if match is not None: detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match}) eval_bboxes.remove(match) else: detectRes['false_negatives'].append({'gt_id': gt_box_id}) if len(eval_bboxes)>0: detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes]) return detectRes def evaluateAttribute(groundtruth, evaluation, resultDict, attributes): ''' Input: groundtruth_Dict: dict, AnnFile format evalDict: dict, AnnFile format resultDict: dict, output from getDetections attributes : list of strings, attribute categories ----- Output: ''' assert 'utf8_string' not in attributes, 'there is a separate function for utf8_string' res = {} for attribute in attributes: correct = [] incorrect = [] for detection in resultDict['true_positives']: gt_val = groundtruth.anns[detection['gt_id']][attribute] eval_val = evaluation.anns[detection['eval_id']][attribute] if gt_val==eval_val: correct.append(detection) else: if gt_val!='na': incorrect.append(detection) res[attribute] = {'attribute': attribute, 'correct':len(correct), 'incorrect':len(incorrect), 'accuracy':len(correct)*1.0/len(correct+incorrect)} return res def evaluateEndToEnd(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5): """ A box is a match iff the intersection of union score is >= 0.5. Params ------ Input dicts have the format of annotation dictionaries """ #parameters detectRes = {} # results are lists of dicts {gt_id: xxx, eval_id: yyy} detectRes['true_positives'] = [] detectRes['false_negatives'] = [] detectRes['false_positives'] = [] # the default is set to evaluate on the validation set if imgIds == None: imgIds = groundtruth.val imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys()) for cocoid in imgIds: gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else [] eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else [] for gt_box_id in gt_bboxes: gt_box = groundtruth.anns[gt_box_id]['bbox'] if 'utf8_string' not in groundtruth.anns[gt_box_id]: continue gt_val = decode(groundtruth.anns[gt_box_id]['utf8_string']) max_iou = 0.0 match = None for eval_box_id in eval_bboxes: eval_box = evaluation.anns[eval_box_id]['bbox'] iou = iou_score(gt_box,eval_box) if iou >=detection_threshold and iou > max_iou: max_iou = iou match = eval_box_id if 'utf8_string' in evaluation.anns[eval_box_id]: eval_val = decode(evaluation.anns[eval_box_id]['utf8_string']) if editdistance.eval(gt_val, eval_val)==0: break if match is not None: detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match}) eval_bboxes.remove(match) else: detectRes['false_negatives'].append({'gt_id': gt_box_id}) if len(eval_bboxes)>0: detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes]) resultDict = detectRes res = {} for setting, threshold in zip(['exact', 'distance1'],[0,1]): correct = [] incorrect = [] ignore = [] for detection in resultDict['true_positives']: if 'utf8_string' not in groundtruth.anns[detection['gt_id']]: ignore.append(detection) continue gt_val = decode(groundtruth.anns[detection['gt_id']]['utf8_string']) if len(gt_val)<3: ignore.append(detection) continue if 'utf8_string' not in evaluation.anns[detection['eval_id']]: incorrect.append(detection) continue eval_val = decode(evaluation.anns[detection['eval_id']]['utf8_string']) detection['gt_string'] = gt_val detection['eval_string'] = eval_val if editdistance.eval(gt_val, eval_val)<=threshold: correct.append(detection) else: incorrect.append(detection) res[setting] = {'setting': setting, 'correct':correct, 'incorrect':incorrect, 'ignore':ignore, 'accuracy':len(correct)*1.0/len(correct+incorrect)} return res def area(bbox): return bbox[2] * 1.0 * bbox[3] # width * height def intersect(bboxA, bboxB): """Return a new bounding box that contains the intersection of 'self' and 'other', or None if there is no intersection """ new_top = max(bboxA[1], bboxB[1]) new_left = max(bboxA[0], bboxB[0]) new_right = min(bboxA[0]+bboxA[2], bboxB[0]+bboxB[2]) new_bottom = min(bboxA[1]+bboxA[3], bboxB[1]+bboxB[3]) if new_top < new_bottom and new_left < new_right: return [new_left, new_top, new_right - new_left, new_bottom - new_top] return None def iou_score(bboxA, bboxB): """Returns the Intersection-over-Union score, defined as the area of the intersection divided by the intersection over the union of the two bounding boxes. This measure is symmetric. """ if intersect(bboxA, bboxB): intersection_area = area(intersect(bboxA, bboxB)) else: intersection_area = 0 union_area = area(bboxA) + area(bboxB) - intersection_area if union_area > 0: return float(intersection_area) / float(union_area) else: return 0 def decode(trans): trans = trans.encode("ascii" ,'ignore') trans = trans.replace('\n', ' ') trans2 = re.sub('[^a-zA-Z0-9!?@\_\-\+\*\:\&\/ \.]', '', trans) return trans2.lower() def inter(list1, list2): return list(set(list1).intersection(set(list2))) def printDetailedResults(c_text, detection_results, transcription_results, name): print(name) #detected coco-text annids found = [x['gt_id'] for x in detection_results['true_positives']] n_found = [x['gt_id'] for x in detection_results['false_negatives']] fp = [x['eval_id'] for x in detection_results['false_positives']] leg_eng_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','machine printed')], areaRng=[]) leg_eng_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','handwritten')], areaRng=[]) leg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','machine printed')], areaRng=[]) ileg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','machine printed')], areaRng=[]) leg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','handwritten')], areaRng=[]) ileg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','handwritten')], areaRng=[]) leg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','others')], areaRng=[]) ileg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','others')], areaRng=[]) #Detection print() print("Detection") print("Recall") if (len(inter(found+n_found, leg_mp)))>0: lm = "%.2f"%(100*len(inter(found, leg_mp))*1.0/(len(inter(found+n_found, leg_mp)))) else: lm = 0 print('legible & machine printed: ', lm) if (len(inter(found+n_found, leg_hw)))>0: lh = "%.2f"%(100*len(inter(found, leg_hw))*1.0/(len(inter(found+n_found, leg_hw)))) else: lh = 0 print('legible & handwritten: ', lh) if (len(inter(found+n_found, leg_ot)))>0: lo = "%.2f"%(100*len(inter(found, leg_ot))*1.0/(len(inter(found+n_found, leg_ot)))) else: lo = 0 # print 'legible & others: ', lo if (len(inter(found+n_found, leg_mp+leg_hw)))>0: lto = "%.2f"%(100*len(inter(found, leg_mp+leg_hw))*1.0/(len(inter(found+n_found, leg_mp+leg_hw)))) else: lto = 0 print('legible overall: ', lto) if (len(inter(found+n_found, ileg_mp)))>0: ilm = "%.2f"%(100*len(inter(found, ileg_mp))*1.0/(len(inter(found+n_found, ileg_mp)))) else: ilm = 0 print('illegible & machine printed: ', ilm) if (len(inter(found+n_found, ileg_hw)))>0: ilh = "%.2f"%(100*len(inter(found, ileg_hw))*1.0/(len(inter(found+n_found, ileg_hw)))) else: ilh = 0 print('illegible & handwritten: ', ilh) if (len(inter(found+n_found, ileg_ot)))>0: ilo = "%.2f"%(100*len(inter(found, ileg_ot))*1.0/(len(inter(found+n_found, ileg_ot)))) else: ilo = 0 # print 'illegible & others: ', ilo if (len(inter(found+n_found, ileg_mp+ileg_hw)))>0: ilto = "%.2f"%(100*len(inter(found, ileg_mp+ileg_hw))*1.0/(len(inter(found+n_found, ileg_mp+ileg_hw)))) else: ilto = 0 print('illegible overall: ', ilto) #total = "%.1f"%(100*len(found)*1.0/(len(found)+len(n_found))) t_recall = 100*len(found)*1.0/(len(inter(found+n_found, leg_mp+leg_hw+ileg_mp+ileg_hw))) total = "%.1f"%(t_recall) print('total recall: ', total) print("Precision") t_precision = 100*len(found)*1.0/(len(found+fp)) precision = "%.2f"%(t_precision) print('total precision: ', precision) print("f-score") f_score = "%.2f"%(2 * t_recall * t_precision / (t_recall + t_precision)) if (t_recall + t_precision)>0 else 0 print('f-score localization: ', f_score) print() print("Transcription") transAcc = "%.2f"%(100*transcription_results['exact']['accuracy']) transAcc1 = "%.2f"%(100*transcription_results['distance1']['accuracy']) print('accuracy for exact matches: ', transAcc) print('accuracy for matches with edit distance<=1: ', transAcc1) print() print('End-to-end') TP_new = len(inter(found, leg_eng_mp+leg_eng_hw)) * transcription_results['exact']['accuracy'] FP_new = len(fp) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy']) FN_new = len(inter(n_found, leg_eng_mp+leg_eng_hw)) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy']) t_recall_new = 100 * TP_new / (TP_new + FN_new) t_precision_new = 100 * TP_new / (TP_new + FP_new) if (TP_new + FP_new)>0 else 0 fscore = "%.2f"%(2 * t_recall_new * t_precision_new / (t_recall_new + t_precision_new)) if (t_recall_new + t_precision_new)>0 else 0 recall_new = "%.2f"%(t_recall_new) precision_new = "%.2f"%(t_precision_new) print('recall: ', recall_new, ) print('precision: ', precision_new) print('End-to-end f-score: ', fscore) print() #print lm, ' & ', lh, ' & ', lto, ' & ', ilm, ' & ', ilh, ' & ', ilto, '&', total, ' & ', precision, ' & ', transAcc, ' & ', transAcc1, ' & ', fscore print(lm, ' & ', lh, ' & ', ilm, ' & ', ilh, '&', total, ' & ', precision, ' & ', f_score, ' & ', transAcc, ' & ', recall_new, ' & ', precision_new, ' & ', fscore) print()
{ "repo_name": "NehaTelhan/CompVisionFinalProj", "path": "coco_evaluation.py", "copies": "1", "size": "13162", "license": "mit", "hash": 8150403622984231000, "line_mean": 35.3920454545, "line_max": 174, "alpha_frac": 0.6511168515, "autogenerated": false, "ratio": 2.884505807582731, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.40356226590827304, "avg_score": null, "num_lines": null }
__author__ = 'Andrei' import numpy as np from chiffatools.linalg_routines import rm_nans from scipy.stats import t, norm from scipy.spatial.distance import pdist, squareform from matplotlib import pyplot as plt import os drug_c_array = np.array([0]+[2**_i for _i in range(0, 9)])*0.5**8 def safe_dir_create(path): if not os.path.isdir(path): os.makedirs(path) def index(myset): return dict((elt, i) for i, elt in enumerate(myset)) def broadcast(subline): if len(subline) !=30: print subline raise Exception('wrong number of items in subline') else: arr = np.array(subline) return arr.reshape((10, 3)) def make_comparator(percentile_5_range): st = np.sqrt(2) def compare(val1, val2): if val1 - val2 > st * percentile_5_range: return 1 if val1 - val2 < -st * percentile_5_range: return -1 else: return 0 return compare def lgi(lst, index_list): """ List get indexes: recovers indexes in the list in the provided index list and returns the result in the form of an array :param lst: :param index_list: :return: """ return np.array([lst[i_] for i_ in index_list]) def p_stabilize(array, percentile): p_low = np.percentile(rm_nans(array), percentile) p_high = np.percentile(rm_nans(array), 100-percentile) array[array < p_low] = p_low array[array > p_high] = p_high return array def get_boundary_correction(TF, background_std): def surviving_fraction(_float): return np.ceil(norm.sf(0, _float, background_std)*background_std*1.96) surviving_fraction = np.vectorize(surviving_fraction) violating_TF_mask = TF < background_std*1.96 if np.any(violating_TF_mask): TF[violating_TF_mask] = surviving_fraction(TF[violating_TF_mask]) return TF def get_relative_growth(raw_values, initial, std): fold_growth = raw_values - initial[:, :, np.newaxis] sigmas = fold_growth / std nc_sigmas = raw_values / std return fold_growth, sigmas, nc_sigmas def get_t_distro_outlier_bound_estimation(array, background_std): narray = rm_nans(array) low, up = t.interval(0.95, narray.shape[0]-1, np.mean(narray), np.sqrt(np.var(narray)+background_std**2)) up, low = (up-np.mean(narray), np.mean(narray)-low) return max(up, low) def clean_tri_replicates(points, std_of_tools): """ Deletes an element inside the triplicates if one of them is strongly outlying compared to the others :param points: :return: """ if all(np.isnan(points)): # early termination if all points are nan return points arr_of_interest = pdist(points[:, np.newaxis]) _min, _max = (np.min(arr_of_interest), np.max(arr_of_interest)) containment = t.interval(0.95, 1, scale=_min/2)[1] if _max > containment: outlier = 2 - np.argmin(arr_of_interest) msk = np.array([True, True, True]) msk[outlier] = False _mean, _std = (np.mean(points[msk]), np.std(points[msk])) containment_2 = t.interval(0.95, 1, loc=_mean, scale=np.sqrt(_std**2+std_of_tools**2)) if points[outlier] > containment_2[1] or points[outlier] < containment_2[0]: points[outlier] = np.nan return points def C0_correction(value_set): for i in range(0, value_set.shape[0]): if not np.all(np.isnan(value_set)): value_set[i, :, :] /= np.nanmean(value_set[i, 0, :]) return value_set def compute_stats(values, concentrations, background_std, clean=True): def preprocess_concentrations(): u_concentrations = np.unique(concentrations)[1:] re_concentrations = np.log(u_concentrations) _5_p = np.log(1.05) backbone = squareform(pdist(re_concentrations[:, np.newaxis])) msk = np.array((backbone < _5_p).nonzero()).T collapse = [] for i, j in msk.tolist(): if i > j: collapse.append((u_concentrations[i], u_concentrations[j])) for c1, c2 in collapse: concentrations[concentrations == c2] = c1 preprocess_concentrations() unique_values = np.unique(concentrations) means = np.zeros_like(unique_values) errs = np.zeros_like(unique_values) stds = np.zeros_like(unique_values) freedom_degs = np.zeros_like(unique_values) for i, val in enumerate(unique_values): mask = concentrations == val vals = rm_nans(values[:, mask, :]) means[i] = np.mean(vals) stds[i] = np.sqrt(np.std(vals)**2 + background_std**2) freedom_degs[i] = np.max((vals.shape[0] - 1, 1)) # errs[i] = stds[i]/np.sqrt(freedom_degs[i]) errs[i] = get_t_distro_outlier_bound_estimation(vals, background_std)/freedom_degs[i] return means, errs, stds, freedom_degs, unique_values def block_fusion(arg_arr): expansion_factor = len(arg_arr) ghost = np.empty_like(arg_arr[0]) ghost.fill(np.nan) new_arg_arr = [] for i, arr in enumerate(arg_arr): payload = [] for j in range(0, i): payload.append(ghost) payload.append(arr) for j in range(i, expansion_factor-1): payload.append(ghost) new_arg_arr.append(np.vstack(tuple(payload))) return np.hstack(new_arg_arr) def calculate_information(means, errs): def inner_comparison(idx1, idx2): return (means[idx1] - means[idx2]) / np.sqrt(errs[idx1]**2 + errs[idx2]**2) total_delta = inner_comparison(0, -1) high_start = inner_comparison(np.argmax(means), 0) low_finish = inner_comparison(-1, np.argmin(means)) # print 'delta: %s, start: %s, finish: %s, total: %s' % (total_delta, high_start, low_finish, # total_delta - high_start - low_finish) return total_delta - high_start - low_finish def estimate_differences(mean_diff_array): mean_diff_array = np.array(mean_diff_array) return np.nanstd(mean_diff_array, axis=0, ddof=1) def correct_plates(plate_stack, concentrations, std_of_tools, replicate_cleaning=True, filter_level=np.nan, info_threshold=3, bang_threshold=20): """ Performs the correction of the plates status :param plate: :param concentrations: :param std_of_tools: :return: """ re_plate_stack = [] means_stack = [] errs_stack = [] unique_concs_stack = [] ghost = np.empty_like(plate_stack[0, :, :]) ghost.fill(np.nan) # removal of outliers in triplicates have to be performed first because they affect stds if replicate_cleaning: np.apply_along_axis(clean_tri_replicates, 2, plate_stack, std_of_tools) for i in range(0, plate_stack.shape[0]): plate = plate_stack[i, :, :][np.newaxis, :, :] # the plates are not assembled yet. means, errs, stds, freedom_degs, unique_concs = compute_stats(plate, concentrations, std_of_tools) flat_ghost = np.empty_like(means) flat_ghost.fill(np.nan) msk = np.logical_not(np.isnan(means)) # in this specific case, nan removal is required for info calculation to be properly implemented # if not np.isnan(filter_level): # msk = np.logical_and(msk, errs > filter_level) # # this was desabled because the masking breaks the indexing routines further down the road re_means = means[msk] re_stds = stds[msk] re_unique_concs = unique_concs[msk] for i, conc in enumerate(concentrations): # this clears sets that were filtered out due to excessive noise. if conc not in unique_concs: plate[:, i, :] = np.nan info = calculate_information(re_means, re_stds) bang = np.max(re_means)/std_of_tools unique_concs_stack.append(unique_concs) if info > info_threshold and bang > bang_threshold: re_plate_stack.append(plate[0, :, :]) means_stack.append(means) errs_stack.append(errs) else: re_plate_stack.append(ghost) means_stack.append(flat_ghost) errs_stack.append(errs) # this fragment fails in case we try to filter out additional points from the plot. Hence the off switch above re_plate_stack = np.array(re_plate_stack) means_stack = np.array(means_stack) errs_stack = np.array(errs_stack) unique_concs_stack = np.array(unique_concs_stack) return re_plate_stack, means_stack, errs_stack, unique_concs_stack def clean_nans(stake_of_interest, dims=3): mask = [] for i in range(0, stake_of_interest.shape[0]): if dims == 3: mask.append(np.all(np.isnan(stake_of_interest[i, :, :]))) if dims == 2: mask.append(np.all(np.isnan(stake_of_interest[i, :]))) mask = np.logical_not(np.array(mask)) return mask def retrieve_normalization_factor(T0_median_array): redux_function = lambda x: rm_nans(x)[0] retour = np.apply_along_axis(redux_function, 1, T0_median_array) return retour def type_map(xy): x, y = tuple(str(xy)) x, y = (int(x), int(y)) x_str = ['_', 'raw', 'normalized', 'collapsed'][x] y_str = ['plate c0-', 'plate t0-'][y] if x < 2: return x_str if x > 1: return y_str+x_str def normalize(plate_stack, means_stack, errs_stack, std_of_tools, normalization_vector): if normalization_vector is None: raise Exception('Normalization vector supplied is empty. Make sure your your parameters are of form xy, x=1/2/3, y=0/1') plate_stack /= normalization_vector[:, np.newaxis, np.newaxis] means_stack /= normalization_vector[:, np.newaxis] errs_stack /= normalization_vector[:, np.newaxis] std_of_tools = std_of_tools / normalization_vector return plate_stack, means_stack, errs_stack, std_of_tools def combine(plate_stack, concentrations, std_of_tools_vector): std_of_tools = np.max(std_of_tools_vector) means, errs, stds, freedom_degs, unique_concs = compute_stats(plate_stack, concentrations, std_of_tools) return means, errs, unique_concs def logistic_regression(TF, T0, concentrations, background_std): def get_1p_bounds(mean, std, dof): return t.interval(0.99, dof, mean, std) mask = concentrations == 0.0 vals_at_0 = rm_nans(TF[:, mask, :]) max_capacity = get_1p_bounds(np.mean(vals_at_0), np.sqrt(np.var(vals_at_0) + background_std**2), vals_at_0.shape[0])[1]*1.05 compensation_T0 = -np.log2(max_capacity/T0-1)[:, :, np.newaxis] compensation_TF = -np.log2(max_capacity/TF-1) alphas = compensation_TF - compensation_T0 return alphas
{ "repo_name": "chiffa/Pharmacosensitivity_growth_assays", "path": "src/supporting_functions.py", "copies": "1", "size": "10773", "license": "bsd-3-clause", "hash": 4521170662711522300, "line_mean": 30.6852941176, "line_max": 128, "alpha_frac": 0.6231319038, "autogenerated": false, "ratio": 3.1592375366568914, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4282369440456891, "avg_score": null, "num_lines": null }
class TekUsbtmc: USBTMC_USR_WAVEFORM_NAME = ['USER1', 'USER2', 'USER3', 'USER4'] def __init__(self, device="/dev/usbtmc0"): self.usbtmc = open(device, mode = "r+", buffering=0) self.device = device self.id = self.get_id() print('Connected to: %s' % self.id) def close(self): self.usbtmc.close() def write(self, string): self.usbtmc.write(string) #time.sleep(0.01) def read(self): return self.usbtmc.readline().strip() def get_id(self): self.write('*IDN?') return self.read() def get_value(self, index): if index < 1: print 'Index should starts with 1!' return -1 self.write('DATA:DATA:VALue? EMEMory,' + str(index)) try: st = self.read() except: return -1 return int(st) def upload_data(self, data, usr_wav_name, verify = True): #trace name: USER1 USER2 USER3, USER4 if not any(True for name in self.USBTMC_USR_WAVEFORM_NAME if name == usr_wav_name): print 'Wrong USER waveform name. Possible options:' print self.USBTMC_USR_WAVEFORM_NAME return length = len(data) # check if length of the input data is appropriate (2--131072) if (length < 2) or (length > 131072): print 'The length of the input data is out of bonds!' print 'The length should be between 2 and 131072 (now it is ' + str(length) + ').' return # check if data is in the required interval (0--16382) for x in data: if (x < 0) or (x > 16382): print 'Input data out of range!' print 'input data shoult contain values between 0 and 16382.' return # prepare the header of the command #header = 'DATA:DATA EMEMory,#' + str(len(str(length*2))) + str(length*2) header = 'DATA:DATA EMEMory,#6' + '{:06d}'.format(length*2) # prepare the binary data binary_data = '' for i in range(0,length): binary_data += chr(( (data[i] >> 8 ) & 0x000000FF )) #lower byte binary_data += chr( data[i] & 0x000000FF ) #higher byte #binary_data = struct.pack('>' + 'h'*len(data), *data) # transfer data to device print 'Transfering data...' self.write( header + binary_data) # copy data to trace self.write('DATA:COPY '+ usr_wav_name + ',EMEMory') # verify the integrity if verify: print 'Verifying data...' for i in range(0,length): val = self.get_value(i+1) if val != data[i]: print 'Value at #' + str(i+1) + ' is incorrect! To fix this, try to resend the data.' return print 'Data transferred successfully.' else: print 'Data transferred, but not verified' def set_value(self, index, data): if index < 1: print '\n\rIndex starts with 1!\n\r' return if (data < 0) or (data > 16382): print '\n\rInput data out of range!' print 'input data shoult contain values between 0 and 16382.\n\r' return self.write('DATA:DATA:VALue EMEMory,' + str(index) + ',' + str(data)) val = self.get_value(index) if val != data: print 'Transfer failed!' else: print 'Value transfered sucessfully.'
{ "repo_name": "duke-87/tekusbtmc", "path": "tekusbtmc.py", "copies": "1", "size": "3784", "license": "mit", "hash": -6909145907106680000, "line_mean": 31.6206896552, "line_max": 105, "alpha_frac": 0.5103065539, "autogenerated": false, "ratio": 3.8890030832476876, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.48993096371476874, "avg_score": null, "num_lines": null }
__author__ = 'Andrej Frank' __version__ = '1.0.0' import sys from PyQt5 import QtWidgets, uic from threading import Thread from time import sleep import libraries.icon_rc as icon_rc class Frontend(QtWidgets.QMainWindow): def __init__(self): QtWidgets.QMainWindow.__init__(self) # PyQt Designer Layout laden self.__ui = uic.loadUi("config/Controller.ui", self) #X-Slider verbinden und min. & max. des Sliders setzen self.__ui.xSlider.valueChanged[int].connect(self.set_slider_x) self.__ui.xSlider.setMinimum(-100) self.__ui.xSlider.setMaximum(100) #Y-Slider verbinden und min. & max. des Sliders setzen self.__ui.ySlider.valueChanged[int].connect(self.set_slider_y) self.__ui.ySlider.setMinimum(-100) self.__ui.ySlider.setMaximum(100) # speed slider self.__ui.speed.valueChanged[int].connect(self.set_speed_bar) self.__ui.speed.setMinimum(0) self.__ui.speed.setMaximum(100) # GUI anzeigen lassen self.show() # X-Slider setzen, für Joypad def set_slider_x(self, value): self.__ui.xSlider.setValue(value) def set_slider_y(self, value): self.__ui.ySlider.setValue(value) def set_speed_bar(self, value): self.__ui.speed.setValue(value) def set_direction(self, value): self.__ui.directionDial.setValue(value) def set_connected(self, value): if value: self.__ui.controller_connected.setText("Controller connected") self.__ui.controller_connected.setStyleSheet("background-color : green") else: self.__ui.controller_connected.setText("Controller disconnected") self.__ui.controller_connected.setStyleSheet("background-color : red") def set_move_type(self, mtype): button = getattr(self.__ui, mtype) button.setChecked(True) def value_setter(main_window, data, logger, config, app, run_event): """ function to set frontend information :param mainWindow: :param data: """ connected = False connected_old = False move_type = None while run_event.is_set(): if "axis" in data and "x" in data["axis"] and "y" in data["axis"]: main_window.set_slider_x(data["axis"]["x"] * 100) main_window.set_slider_y(data["axis"]["y"] * 100) main_window.set_direction(data["axis"]["angle"] + 180) main_window.set_speed_bar(data["axis"]["speed"] * 100) # check if connection state changed connected = data["ctrl"]["connected"] if connected != connected_old: main_window.set_connected(data["ctrl"]["connected"]) connected_old = data["ctrl"]["connected"] if move_type != data["ctrl"]["mode"]: move_type = data["ctrl"]["mode"] main_window.set_move_type(move_type) logger.log(data, "debug") sleep(0.1) app.closeAllWindows() app.quit() # worker function that is called by master process def worker(data, logger, config, run_event): # start QApplication with empty list as parameter (needs list!) app = QtWidgets.QApplication([]) # spawn Frontend-object main_window = Frontend() # starting thread which will set values into the frontend t = Thread(target=value_setter, args=[main_window, data, logger, config, app, run_event]) t.start() # start main frontend app.exec()
{ "repo_name": "vibe-x/robotic", "path": "modules/Frontend.py", "copies": "1", "size": "3502", "license": "apache-2.0", "hash": 4691362124590617000, "line_mean": 31.4166666667, "line_max": 93, "alpha_frac": 0.6183947444, "autogenerated": false, "ratio": 3.7403846153846154, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4858779359784615, "avg_score": null, "num_lines": null }
__author__ = "Andre Merzky, Mark Santcroos" __copyright__ = "Copyright 2015, The SAGA Project" __license__ = "MIT" '''This examples shows how to use the saga.Filesystem API with the Globus Online file adaptor. If something doesn't work as expected, try to set SAGA_VERBOSE=3 in your environment before you run the script in order to get some debug output. If you think you have encountered a defect, please report it at: https://github.com/saga-project/saga-python/issues ''' import sys import os import saga def main(): try: ctx = saga.Context("x509") ctx.user_proxy = '/Users/mark/proj/myproxy/xsede.x509' session = saga.Session() session.add_context(ctx) source = "go://marksant#netbook/Users/mark/tmp/go/" #destination = "go://xsede#stampede/~/tmp/" #destination = "go://gridftp.stampede.tacc.xsede.org/~/tmp/" destination = "go://oasis-dm.sdsc.xsede.org/~/tmp/" #destination = "go://ncsa#BlueWaters/~/tmp/" filename = "my_file" # open home directory on a remote machine source_dir = saga.filesystem.Directory(source) # copy .bash_history to /tmp/ on the local machine source_dir.copy(filename, destination) # list 'm*' in local /tmp/ directory dest_dir = saga.filesystem.Directory(destination) for entry in dest_dir.list(pattern='%s*' % filename[0]): print entry dest_file = saga.filesystem.File(os.path.join(destination, filename)) assert dest_file.is_file() == True assert dest_file.is_link() == False assert dest_file.is_dir() == False print 'Size: %d' % dest_file.get_size() return 0 except saga.SagaException as ex: # Catch all saga exceptions print "An exception occured: (%s) %s " % (ex.type, (str(ex))) # Trace back the exception. That can be helpful for debugging. print " \n*** Backtrace:\n %s" % ex.traceback return -1 if __name__ == "__main__": sys.exit(main())
{ "repo_name": "luis-rr/saga-python", "path": "examples/files/go_file_copy.py", "copies": "2", "size": "2066", "license": "mit", "hash": -1590165464214849300, "line_mean": 31.7936507937, "line_max": 77, "alpha_frac": 0.6214908035, "autogenerated": false, "ratio": 3.3758169934640523, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4997307796964052, "avg_score": null, "num_lines": null }
__author__ = "Andre Merzky, Ole Weidner, Mark Santcroos" __copyright__ = "Copyright 2012-2015, The SAGA Project" __license__ = "MIT" """ PBSPro job adaptor implementation """ import threading import saga.url as surl import saga.utils.pty_shell as sups import saga.adaptors.base import saga.adaptors.cpi.job from saga.job.constants import * import re import os import time import threading from cgi import parse_qs SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL SYNC_WAIT_UPDATE_INTERVAL = 1 # seconds MONITOR_UPDATE_INTERVAL = 60 # seconds # -------------------------------------------------------------------- # class _job_state_monitor(threading.Thread): """ thread that periodically monitors job states """ def __init__(self, job_service): self.logger = job_service._logger self.js = job_service self._stop = threading.Event() super(_job_state_monitor, self).__init__() self.setDaemon(True) def stop(self): self._stop.set() def run(self): # we stop the monitoring thread when we see the same error 3 times in # a row... error_type_count = dict() while not self._stop.is_set (): try: # FIXME: do bulk updates here! we don't want to pull information # job by job. that would be too inefficient! jobs = self.js.jobs for job_id in jobs.keys() : job_info = jobs[job_id] # we only need to monitor jobs that are not in a # terminal state, so we can skip the ones that are # either done, failed or canceled if job_info['state'] not in [saga.job.DONE, saga.job.FAILED, saga.job.CANCELED] : # Store the current state since the current state # variable is updated when _job_get_info is called pre_update_state = job_info['state'] new_job_info = self.js._job_get_info(job_id, reconnect=False) self.logger.info ("Job monitoring thread updating Job " "%s (old state: %s, new state: %s)" % (job_id, pre_update_state, new_job_info['state'])) # fire job state callback if 'state' has changed if new_job_info['state'] != pre_update_state: job_obj = job_info['obj'] job_obj._attributes_i_set('state', new_job_info['state'], job_obj._UP, True) # update job info jobs[job_id] = new_job_info except Exception as e: import traceback traceback.print_exc () self.logger.warning("Exception caught in job monitoring thread: %s" % e) # check if we see the same error again and again error_type = str(e) if error_type not in error_type_count : error_type_count = dict() error_type_count[error_type] = 1 else : error_type_count[error_type] += 1 if error_type_count[error_type] >= 3 : self.logger.error("too many monitoring errors -- stopping job monitoring thread") return finally : time.sleep (MONITOR_UPDATE_INTERVAL) # -------------------------------------------------------------------- # def log_error_and_raise(message, exception, logger): """ logs an 'error' message and subsequently throws an exception """ logger.error(message) raise exception(message) # -------------------------------------------------------------------- # def _pbs_to_saga_jobstate(pbsjs, logger=None): """ translates a pbs one-letter state to saga """ # 'C' Torque "Job is completed after having run." # 'F' PBS Pro "Job is finished." # 'H' PBS Pro and TORQUE "Job is held." # 'Q' PBS Pro and TORQUE "Job is queued(, eligible to run or routed.) # 'S' PBS Pro and TORQUE "Job is suspended." # 'W' PBS Pro and TORQUE "Job is waiting for its execution time to be reached." # 'R' PBS Pro and TORQUE "Job is running." # 'E' PBS Pro and TORQUE "Job is exiting after having run" # 'T' PBS Pro and TORQUE "Job is being moved to new location." # 'X' PBS Pro "Subjob has completed execution or has been deleted." ret = None if pbsjs == 'C': ret = saga.job.DONE elif pbsjs == 'F': ret = saga.job.DONE elif pbsjs == 'H': ret = saga.job.PENDING elif pbsjs == 'Q': ret = saga.job.PENDING elif pbsjs == 'S': ret = saga.job.PENDING elif pbsjs == 'W': ret = saga.job.PENDING elif pbsjs == 'R': ret = saga.job.RUNNING elif pbsjs == 'E': ret = saga.job.RUNNING elif pbsjs == 'T': ret = saga.job.RUNNING elif pbsjs == 'X': ret = saga.job.CANCELED else : ret = saga.job.UNKNOWN logger.debug('check state: %s', pbsjs) logger.debug('use state: %s', ret) return ret # -------------------------------------------------------------------- # def _pbscript_generator(url, logger, jd, ppn, gres, pbs_version, is_cray=False, queue=None, ): """ generates a PBS Pro script from a SAGA job description """ pbs_params = str() exec_n_args = str() exec_n_args += 'export SAGA_PPN=%d\n' % ppn if jd.executable: exec_n_args += "%s " % (jd.executable) if jd.arguments: for arg in jd.arguments: exec_n_args += "%s " % (arg) if jd.name: pbs_params += "#PBS -N %s \n" % jd.name if (is_cray is "") or not('Version: 4.2.7' in pbs_version): # qsub on Cray systems complains about the -V option: # Warning: # Your job uses the -V option, which requests that all of your # current shell environment settings (9913 bytes) be exported to # it. This is not recommended, as it causes problems for the # batch environment in some cases. pbs_params += "#PBS -V \n" if jd.environment: pbs_params += "#PBS -v %s\n" % \ ','.join (["%s=%s" % (k,v) for k,v in jd.environment.iteritems()]) # apparently this doesn't work with older PBS installations # if jd.working_directory: # pbs_params += "#PBS -d %s \n" % jd.working_directory # a workaround is to do an explicit 'cd' if jd.working_directory: workdir_directives = 'export PBS_O_WORKDIR=%s \n' % jd.working_directory workdir_directives += 'mkdir -p %s\n' % jd.working_directory workdir_directives += 'cd %s\n' % jd.working_directory else: workdir_directives = '' if jd.output: # if working directory is set, we want stdout to end up in # the working directory as well, unless it containes a specific # path name. if jd.working_directory: if os.path.isabs(jd.output): pbs_params += "#PBS -o %s \n" % jd.output else: # user provided a relative path for STDOUT. in this case # we prepend the workind directory path before passing # it on to PBS pbs_params += "#PBS -o %s/%s \n" % (jd.working_directory, jd.output) else: pbs_params += "#PBS -o %s \n" % jd.output if jd.error: # if working directory is set, we want stderr to end up in # the working directory as well, unless it contains a specific # path name. if jd.working_directory: if os.path.isabs(jd.error): pbs_params += "#PBS -e %s \n" % jd.error else: # user provided a realtive path for STDERR. in this case # we prepend the workind directory path before passing # it on to PBS pbs_params += "#PBS -e %s/%s \n" % (jd.working_directory, jd.error) else: pbs_params += "#PBS -e %s \n" % jd.error if jd.wall_time_limit: hours = jd.wall_time_limit / 60 minutes = jd.wall_time_limit % 60 pbs_params += "#PBS -l walltime=%s:%s:00 \n" \ % (str(hours), str(minutes)) if jd.queue and queue: pbs_params += "#PBS -q %s \n" % queue elif jd.queue and not queue: pbs_params += "#PBS -q %s \n" % jd.queue elif queue and not jd.queue: pbs_params += "#PBS -q %s \n" % queue if jd.project: if 'PBSPro_1' in pbs_version: # On PBS Pro we set both -P(roject) and -A(accounting), # as we don't know what the admins decided, and just # pray that this doesn't create problems. pbs_params += "#PBS -P %s \n" % str(jd.project) pbs_params += "#PBS -A %s \n" % str(jd.project) else: # Torque pbs_params += "#PBS -A %s \n" % str(jd.project) if jd.job_contact: pbs_params += "#PBS -m abe \n" # if total_cpu_count is not defined, we assume 1 if not jd.total_cpu_count: jd.total_cpu_count = 1 # Request enough nodes to cater for the number of cores requested nnodes = jd.total_cpu_count / ppn if jd.total_cpu_count % ppn > 0: nnodes += 1 # We use the ncpus value for systems that need to specify ncpus as multiple of PPN ncpus = nnodes * ppn # Node properties are appended to the nodes argument in the resource_list. node_properties = [] # Parse candidate_hosts # # Currently only implemented for "bigflash" on Gordon@SDSC # https://github.com/radical-cybertools/saga-python/issues/406 # if jd.candidate_hosts: if 'BIG_FLASH' in jd.candidate_hosts: node_properties.append('bigflash') else: raise saga.NotImplemented("This type of 'candidate_hosts' not implemented: '%s'" % jd.candidate_hosts) if is_cray is not "": # Special cases for PBS/TORQUE on Cray. Different PBSes, # different flags. A complete nightmare... if 'PBSPro_10' in pbs_version: logger.info("Using Cray XT (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).") pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count elif 'PBSPro_12' in pbs_version: logger.info("Using Cray XT (e.g. Archer) specific '#PBS -l select=xx' flags (PBSPro_12).") pbs_params += "#PBS -l select=%d\n" % nnodes elif '4.2.6' in pbs_version: logger.info("Using Titan (Cray XP) specific '#PBS -l nodes=xx'") pbs_params += "#PBS -l nodes=%d\n" % nnodes elif '4.2.7' in pbs_version: logger.info("Using Cray XT @ NERSC (e.g. Edison) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).") pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count elif 'Version: 5.' in pbs_version: logger.info("Using TORQUE 5.x notation '#PBS -l procs=XX' ") pbs_params += "#PBS -l procs=%d\n" % jd.total_cpu_count else: logger.info("Using Cray XT (e.g. Kraken, Jaguar) specific '#PBS -l size=xx' flags (TORQUE).") pbs_params += "#PBS -l size=%s\n" % jd.total_cpu_count elif 'version: 2.3.13' in pbs_version: # e.g. Blacklight # TODO: The more we add, the more it screams for a refactoring pbs_params += "#PBS -l ncpus=%d\n" % ncpus elif '4.2.7' in pbs_version: logger.info("Using Cray XT @ NERSC (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).") pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count elif 'PBSPro_12' in pbs_version: logger.info("Using PBSPro 12 notation '#PBS -l select=XX' ") pbs_params += "#PBS -l select=%d\n" % (nnodes) elif 'PBSPro_13' in pbs_version: logger.info("Using PBSPro 13 notation '#PBS -l select=XX' ") pbs_params += "#PBS -l select=%d\n" % (nnodes) else: # Default case, i.e, standard HPC cluster (non-Cray) # If we want just a slice of one node if jd.total_cpu_count < ppn: ppn = jd.total_cpu_count pbs_params += "#PBS -l nodes=%d:ppn=%d%s\n" % ( nnodes, ppn, ''.join([':%s' % prop for prop in node_properties])) # Process Generic Resource specification request if gres: pbs_params += "#PBS -l gres=%s\n" % gres # escape all double quotes and dollarsigns, otherwise 'echo |' # further down won't work # only escape '$' in args and exe. not in the params exec_n_args = workdir_directives + exec_n_args exec_n_args = exec_n_args.replace('$', '\\$') pbscript = "\n#!/bin/bash \n%s%s" % (pbs_params, exec_n_args) pbscript = pbscript.replace('"', '\\"') return pbscript # -------------------------------------------------------------------- # some private defs # _PTY_TIMEOUT = 2.0 # -------------------------------------------------------------------- # the adaptor name # _ADAPTOR_NAME = "saga.adaptor.pbsprojob" _ADAPTOR_SCHEMAS = ["pbspro", "pbspro+ssh", "pbspro+gsissh"] _ADAPTOR_OPTIONS = [] # -------------------------------------------------------------------- # the adaptor capabilities & supported attributes # _ADAPTOR_CAPABILITIES = { "jdes_attributes": [saga.job.NAME, saga.job.EXECUTABLE, saga.job.ARGUMENTS, saga.job.CANDIDATE_HOSTS, saga.job.ENVIRONMENT, saga.job.INPUT, saga.job.OUTPUT, saga.job.ERROR, saga.job.QUEUE, saga.job.PROJECT, saga.job.WALL_TIME_LIMIT, saga.job.WORKING_DIRECTORY, saga.job.WALL_TIME_LIMIT, saga.job.SPMD_VARIATION, # TODO: 'hot'-fix for BigJob saga.job.PROCESSES_PER_HOST, saga.job.TOTAL_CPU_COUNT], "job_attributes": [saga.job.EXIT_CODE, saga.job.EXECUTION_HOSTS, saga.job.CREATED, saga.job.STARTED, saga.job.FINISHED], "metrics": [saga.job.STATE], "callbacks": [saga.job.STATE], "contexts": {"ssh": "SSH public/private keypair", "x509": "GSISSH X509 proxy context", "userpass": "username/password pair (ssh)"} } # -------------------------------------------------------------------- # the adaptor documentation # _ADAPTOR_DOC = { "name": _ADAPTOR_NAME, "cfg_options": _ADAPTOR_OPTIONS, "capabilities": _ADAPTOR_CAPABILITIES, "description": """ The PBSPro adaptor allows to run and manage jobs on `PBS <http://www.pbsworks.com/>`_ controlled HPC clusters. """, "example": "examples/jobs/pbsjob.py", "schemas": {"pbspro": "connect to a local cluster", "pbspro+ssh": "connect to a remote cluster via SSH", "pbspro+gsissh": "connect to a remote cluster via GSISSH"} } # -------------------------------------------------------------------- # the adaptor info is used to register the adaptor with SAGA # _ADAPTOR_INFO = { "name" : _ADAPTOR_NAME, "version" : "v0.1", "schemas" : _ADAPTOR_SCHEMAS, "capabilities": _ADAPTOR_CAPABILITIES, "cpis": [ { "type": "saga.job.Service", "class": "PBSProJobService" }, { "type": "saga.job.Job", "class": "PBSProJob" } ] } ############################################################################### # The adaptor class class Adaptor (saga.adaptors.base.Base): """ this is the actual adaptor class, which gets loaded by SAGA (i.e. by the SAGA engine), and which registers the CPI implementation classes which provide the adaptor's functionality. """ # ---------------------------------------------------------------- # def __init__(self): saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS) self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$') self.opts = self.get_config (_ADAPTOR_NAME) # ---------------------------------------------------------------- # def sanity_check(self): # FIXME: also check for gsissh pass # ---------------------------------------------------------------- # def parse_id(self, id): # split the id '[rm]-[pid]' in its parts, and return them. match = self.id_re.match(id) if not match or len(match.groups()) != 2: raise saga.BadParameter("Cannot parse job id '%s'" % id) return (match.group(1), match.group(2)) ############################################################################### # class PBSProJobService (saga.adaptors.cpi.job.Service): """ implements saga.adaptors.cpi.job.Service """ # ---------------------------------------------------------------- # def __init__(self, api, adaptor): self._mt = None _cpi_base = super(PBSProJobService, self) _cpi_base.__init__(api, adaptor) self._adaptor = adaptor # ---------------------------------------------------------------- # def __del__(self): self.close() # ---------------------------------------------------------------- # def close(self): if self.mt : self.mt.stop() self.mt.join(10) # don't block forever on join() self._logger.info("Job monitoring thread stopped.") self.finalize(True) # ---------------------------------------------------------------- # def finalize(self, kill_shell=False): if kill_shell : if self.shell : self.shell.finalize (True) # ---------------------------------------------------------------- # @SYNC_CALL def init_instance(self, adaptor_state, rm_url, session): """ service instance constructor """ self.rm = rm_url self.session = session self.ppn = None self.is_cray = "" self.queue = None self.shell = None self.jobs = dict() self.gres = None # the monitoring thread - one per service instance self.mt = _job_state_monitor(job_service=self) self.mt.start() rm_scheme = rm_url.scheme pty_url = surl.Url(rm_url) # this adaptor supports options that can be passed via the # 'query' component of the job service URL. if rm_url.query: for key, val in parse_qs(rm_url.query).iteritems(): if key == 'queue': self.queue = val[0] elif key == 'craytype': self.is_cray = val[0] elif key == 'ppn': self.ppn = int(val[0]) elif key == 'gres': self.gres = val[0] # we need to extract the scheme for PTYShell. That's basically the # job.Service Url without the pbs+ part. We use the PTYShell to execute # pbs commands either locally or via gsissh or ssh. if rm_scheme == "pbspro": pty_url.scheme = "fork" elif rm_scheme == "pbspro+ssh": pty_url.scheme = "ssh" elif rm_scheme == "pbspro+gsissh": pty_url.scheme = "gsissh" # these are the commands that we need in order to interact with PBS. # the adaptor will try to find them during initialize(self) and bail # out in case they are note available. self._commands = {'pbsnodes': None, 'qstat': None, 'qsub': None, 'qdel': None} self.shell = sups.PTYShell(pty_url, self.session) # self.shell.set_initialize_hook(self.initialize) # self.shell.set_finalize_hook(self.finalize) self.initialize() return self.get_api() # ---------------------------------------------------------------- # def initialize(self): # check if all required pbs tools are available for cmd in self._commands.keys(): ret, out, _ = self.shell.run_sync("which %s " % cmd) if ret != 0: message = "Error finding PBS tools: %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) else: path = out.strip() # strip removes newline if cmd == 'qdel': # qdel doesn't support --version! self._commands[cmd] = {"path": path, "version": "?"} elif cmd == 'qsub': # qsub doesn't always support --version! self._commands[cmd] = {"path": path, "version": "?"} else: ret, out, _ = self.shell.run_sync("%s --version" % cmd) if ret != 0: message = "Error finding PBS tools: %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) else: # version is reported as: "version: x.y.z" version = out#.strip().split()[1] # add path and version to the command dictionary self._commands[cmd] = {"path": path, "version": version} self._logger.info("Found PBS tools: %s" % self._commands) # # TODO: Get rid of this, as I dont think there is any justification that Cray's are special # # let's try to figure out if we're working on a Cray machine. # naively, we assume that if we can find the 'aprun' command in the # path that we're logged in to a Cray machine. if self.is_cray == "": ret, out, _ = self.shell.run_sync('which aprun') if ret != 0: self.is_cray = "" else: self._logger.info("Host '%s' seems to be a Cray machine." \ % self.rm.host) self.is_cray = "unknowncray" else: self._logger.info("Assuming host is a Cray since 'craytype' is set to: %s" % self.is_cray) # # Get number of processes per node # if self.ppn: self._logger.debug("Using user specified 'ppn': %d" % self.ppn) return # TODO: this is quite a hack. however, it *seems* to work quite # well in practice. if any(ver in self._commands['qstat']['version'] for ver in ('PBSPro_13', 'PBSPro_12', 'PBSPro_11.3')): ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "resources_available.ncpus"' % \ self._commands['pbsnodes']['path']) else: ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "(np|pcpu|pcpus)[[:blank:]]*=" ' % \ self._commands['pbsnodes']['path']) if ret != 0: message = "Error running pbsnodes: %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) else: # this is black magic. we just assume that the highest occurrence # of a specific np is the number of processors (cores) per compute # node. this equals max "PPN" for job scripts ppn_list = dict() for line in out.split('\n'): np = line.split(' = ') if len(np) == 2: np_str = np[1].strip() if np_str == '<various>': continue else: np = int(np_str) if np in ppn_list: ppn_list[np] += 1 else: ppn_list[np] = 1 self.ppn = max(ppn_list, key=ppn_list.get) self._logger.debug("Found the following 'ppn' configurations: %s. " "Using %s as default ppn." % (ppn_list, self.ppn)) # ---------------------------------------------------------------- # def _job_run(self, job_obj): """ runs a job via qsub """ # get the job description jd = job_obj.get_description() # normalize working directory path if jd.working_directory : jd.working_directory = os.path.normpath (jd.working_directory) # TODO: Why would one want this? if self.queue and jd.queue: self._logger.warning("Job service was instantiated explicitly with \ 'queue=%s', but job description tries to a different queue: '%s'. Using '%s'." % (self.queue, jd.queue, self.queue)) try: # create a PBS job script from SAGA job description script = _pbscript_generator(url=self.rm, logger=self._logger, jd=jd, ppn=self.ppn, gres=self.gres, pbs_version=self._commands['qstat']['version'], is_cray=self.is_cray, queue=self.queue, ) self._logger.info("Generated PBS script: %s" % script) except Exception, ex: log_error_and_raise(str(ex), saga.BadParameter, self._logger) # try to create the working directory (if defined) # WARNING: this assumes a shared filesystem between login node and # compute nodes. if jd.working_directory: self._logger.info("Creating working directory %s" % jd.working_directory) ret, out, _ = self.shell.run_sync("mkdir -p %s" % (jd.working_directory)) if ret != 0: # something went wrong message = "Couldn't create working directory - %s" % (out) log_error_and_raise(message, saga.NoSuccess, self._logger) # Now we want to execute the script. This process consists of two steps: # (1) we create a temporary file with 'mktemp' and write the contents of # the generated PBS script into it # (2) we call 'qsub <tmpfile>' to submit the script to the queueing system cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-PBSProJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['qsub']['path']) ret, out, _ = self.shell.run_sync(cmdline) if ret != 0: # something went wrong message = "Error running job via 'qsub': %s. Commandline was: %s" \ % (out, cmdline) log_error_and_raise(message, saga.NoSuccess, self._logger) else: # parse the job id. qsub usually returns just the job id, but # sometimes there are a couple of lines of warnings before. # if that's the case, we log those as 'warnings' lines = out.split('\n') lines = filter(lambda lines: lines != '', lines) # remove empty if len(lines) > 1: self._logger.warning('qsub: %s' % ''.join(lines[:-2])) # we asssume job id is in the last line #print cmdline #print out job_id = "[%s]-[%s]" % (self.rm, lines[-1].strip().split('.')[0]) self._logger.info("Submitted PBS job with id: %s" % job_id) state = saga.job.PENDING # populate job info dict self.jobs[job_id] = {'obj' : job_obj, 'job_id' : job_id, 'state' : state, 'exec_hosts' : None, 'returncode' : None, 'create_time' : None, 'start_time' : None, 'end_time' : None, 'gone' : False } self._logger.info ("assign job id %s / %s / %s to watch list (%s)" \ % (None, job_id, job_obj, self.jobs.keys())) # set status to 'pending' and manually trigger callback job_obj._attributes_i_set('state', state, job_obj._UP, True) # return the job id return job_id # ---------------------------------------------------------------- # def _retrieve_job(self, job_id): """ see if we can get some info about a job that we don't know anything about """ # rm, pid = self._adaptor.parse_id(job_id) # # run the PBS 'qstat' command to get some infos about our job # if 'PBSPro_1' in self._commands['qstat']['version']: # qstat_flag = '-f' # else: # qstat_flag ='-f1' # # ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "\ # "grep -E -i '(job_state)|(exec_host)|(exit_status)|(ctime)|"\ # "(start_time)|(comp_time)|(stime)|(qtime)|(mtime)'" \ # % (self._commands['qstat']['path'], qstat_flag, pid)) # if ret != 0: # message = "Couldn't reconnect to job '%s': %s" % (job_id, out) # log_error_and_raise(message, saga.NoSuccess, self._logger) # else: # # the job seems to exist on the backend. let's gather some data # job_info = { # 'job_id': job_id, # 'state': saga.job.UNKNOWN, # 'exec_hosts': None, # 'returncode': None, # 'create_time': None, # 'start_time': None, # 'end_time': None, # 'gone': False # } # # job_info = self._parse_qstat(out, job_info) # # return job_info # ---------------------------------------------------------------- # def _job_get_info(self, job_id, reconnect): """ Get job information attributes via qstat. """ # If we don't have the job in our dictionary, we don't want it, # unless we are trying to reconnect. if not reconnect and job_id not in self.jobs: message = "Unknown job id: %s. Can't update state." % job_id log_error_and_raise(message, saga.NoSuccess, self._logger) if not reconnect: # job_info contains the info collect when _job_get_info # was called the last time job_info = self.jobs[job_id] # if the 'gone' flag is set, there's no need to query the job # state again. it's gone forever if job_info['gone'] is True: return job_info else: # Create a template data structure job_info = { 'job_id': job_id, 'state': saga.job.UNKNOWN, 'exec_hosts': None, 'returncode': None, 'create_time': None, 'start_time': None, 'end_time': None, 'gone': False } rm, pid = self._adaptor.parse_id(job_id) # run the PBS 'qstat' command to get some infos about our job # TODO: create a PBSPRO/TORQUE flag once if 'PBSPro_1' in self._commands['qstat']['version']: qstat_flag = '-fx' else: qstat_flag ='-f1' ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | " "grep -E -i '(job_state)|(exec_host)|(exit_status)|" "(ctime)|(start_time)|(stime)|(mtime)'" % (self._commands['qstat']['path'], qstat_flag, pid)) if ret != 0: if reconnect: message = "Couldn't reconnect to job '%s': %s" % (job_id, out) log_error_and_raise(message, saga.NoSuccess, self._logger) if ("Unknown Job Id" in out): # Let's see if the last known job state was running or pending. in # that case, the job is gone now, which can either mean DONE, # or FAILED. the only thing we can do is set it to 'DONE' job_info['gone'] = True # TODO: we can also set the end time? self._logger.warning("Previously running job has disappeared. " "This probably means that the backend doesn't store " "information about finished jobs. Setting state to 'DONE'.") if job_info['state'] in [saga.job.RUNNING, saga.job.PENDING]: job_info['state'] = saga.job.DONE else: # TODO: This is an uneducated guess? job_info['state'] = saga.job.FAILED else: # something went wrong message = "Error retrieving job info via 'qstat': %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) else: # The job seems to exist on the backend. let's process some data. # TODO: make the parsing "contextual", in the sense that it takes # the state into account. # parse the egrep result. this should look something like this: # job_state = C # exec_host = i72/0 # exit_status = 0 results = out.split('\n') for line in results: if len(line.split('=')) == 2: key, val = line.split('=') key = key.strip() val = val.strip() # The ubiquitous job state if key in ['job_state']: # PBS Pro and TORQUE job_info['state'] = _pbs_to_saga_jobstate(val, self._logger) # Hosts where the job ran elif key in ['exec_host']: # PBS Pro and TORQUE job_info['exec_hosts'] = val.split('+') # format i73/7+i73/6+... # Exit code of the job elif key in ['exit_status', # TORQUE 'Exit_status' # PBS Pro ]: job_info['returncode'] = int(val) # Time job got created in the queue elif key in ['ctime']: # PBS Pro and TORQUE job_info['create_time'] = val # Time job started to run elif key in ['start_time', # TORQUE 'stime' # PBS Pro ]: job_info['start_time'] = val # Time job ended. # # PBS Pro doesn't have an "end time" field. # It has an "resources_used.walltime" though, # which could be added up to the start time. # We will not do that arithmetic now though. # # Alternatively, we can use mtime, as the latest # modification time will generally also be the end time. # # TORQUE has an "comp_time" (completion? time) field, # that is generally the same as mtime at the finish. # # For the time being we will use mtime as end time for # both TORQUE and PBS Pro. # if key in ['mtime']: # PBS Pro and TORQUE job_info['end_time'] = val # PBSPRO state does not indicate error or success -- we derive that from # the exit code if job_info['returncode'] not in [None, 0]: job_info['state'] = saga.job.FAILED # return the updated job info return job_info def _parse_qstat(self, haystack, job_info): # return the new job info dict return job_info # ---------------------------------------------------------------- # def _job_get_state(self, job_id): """ get the job's state """ return self.jobs[job_id]['state'] # ---------------------------------------------------------------- # def _job_get_exit_code(self, job_id): """ get the job's exit code """ ret = self.jobs[job_id]['returncode'] # FIXME: 'None' should cause an exception if ret == None : return None else : return int(ret) # ---------------------------------------------------------------- # def _job_get_execution_hosts(self, job_id): """ get the job's exit code """ return self.jobs[job_id]['exec_hosts'] # ---------------------------------------------------------------- # def _job_get_create_time(self, job_id): """ get the job's creation time """ return self.jobs[job_id]['create_time'] # ---------------------------------------------------------------- # def _job_get_start_time(self, job_id): """ get the job's start time """ return self.jobs[job_id]['start_time'] # ---------------------------------------------------------------- # def _job_get_end_time(self, job_id): """ get the job's end time """ return self.jobs[job_id]['end_time'] # ---------------------------------------------------------------- # def _job_cancel(self, job_id): """ cancel the job via 'qdel' """ rm, pid = self._adaptor.parse_id(job_id) ret, out, _ = self.shell.run_sync("%s %s\n" \ % (self._commands['qdel']['path'], pid)) if ret != 0: message = "Error canceling job via 'qdel': %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) # assume the job was succesfully canceled self.jobs[job_id]['state'] = saga.job.CANCELED # ---------------------------------------------------------------- # def _job_wait(self, job_id, timeout): """ wait for the job to finish or fail """ time_start = time.time() time_now = time_start rm, pid = self._adaptor.parse_id(job_id) while True: state = self.jobs[job_id]['state'] # this gets updated in the bg. if state == saga.job.DONE or \ state == saga.job.FAILED or \ state == saga.job.CANCELED: return True # avoid busy poll time.sleep(SYNC_WAIT_UPDATE_INTERVAL) # check if we hit timeout if timeout >= 0: time_now = time.time() if time_now - time_start > timeout: return False # ---------------------------------------------------------------- # @SYNC_CALL def create_job(self, jd): """ implements saga.adaptors.cpi.job.Service.get_url() """ # this dict is passed on to the job adaptor class -- use it to pass any # state information you need there. adaptor_state = {"job_service": self, "job_description": jd, "job_schema": self.rm.schema, "reconnect": False } # create and return a new job object return saga.job.Job(_adaptor=self._adaptor, _adaptor_state=adaptor_state) # ---------------------------------------------------------------- # @SYNC_CALL def get_job(self, job_id): """ Implements saga.adaptors.cpi.job.Service.get_job() Re-create job instance from a job-id. """ # If we already have the job info, we just pass the current info. if job_id in self.jobs : return self.jobs[job_id]['obj'] # Try to get some initial information about this job (again) job_info = self._job_get_info(job_id, reconnect=True) # this dict is passed on to the job adaptor class -- use it to pass any # state information you need there. adaptor_state = {"job_service": self, # TODO: fill job description "job_description": saga.job.Description(), "job_schema": self.rm.schema, "reconnect": True, "reconnect_jobid": job_id } job_obj = saga.job.Job(_adaptor=self._adaptor, _adaptor_state=adaptor_state) # throw it into our job dictionary. job_info['obj'] = job_obj self.jobs[job_id] = job_info return job_obj # ---------------------------------------------------------------- # @SYNC_CALL def get_url(self): """ implements saga.adaptors.cpi.job.Service.get_url() """ return self.rm # ---------------------------------------------------------------- # @SYNC_CALL def list(self): """ implements saga.adaptors.cpi.job.Service.list() """ ids = [] ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`" % self._commands['qstat']['path']) if ret != 0 and len(out) > 0: message = "failed to list jobs via 'qstat': %s" % out log_error_and_raise(message, saga.NoSuccess, self._logger) elif ret != 0 and len(out) == 0: # qstat | grep `` exits with 1 if the list is empty pass else: for line in out.split("\n"): # output looks like this: # 112059.svc.uc.futuregrid testjob oweidner 0 Q batch # 112061.svc.uc.futuregrid testjob oweidner 0 Q batch if len(line.split()) > 1: job_id = "[%s]-[%s]" % (self.rm, line.split()[0].split('.')[0]) ids.append(str(job_id)) return ids # # ---------------------------------------------------------------- # # # def container_run (self, jobs) : # self._logger.debug ("container run: %s" % str(jobs)) # # TODO: this is not optimized yet # for job in jobs: # job.run () # # # # ---------------------------------------------------------------- # # # def container_wait (self, jobs, mode, timeout) : # self._logger.debug ("container wait: %s" % str(jobs)) # # TODO: this is not optimized yet # for job in jobs: # job.wait () # # # # ---------------------------------------------------------------- # # # def container_cancel (self, jobs, timeout) : # self._logger.debug ("container cancel: %s" % str(jobs)) # raise saga.NoSuccess ("Not Implemented"); ############################################################################### # class PBSProJob (saga.adaptors.cpi.job.Job): """ implements saga.adaptors.cpi.job.Job """ def __init__(self, api, adaptor): # initialize parent class _cpi_base = super(PBSProJob, self) _cpi_base.__init__(api, adaptor) def _get_impl(self): return self @SYNC_CALL def init_instance(self, job_info): """ implements saga.adaptors.cpi.job.Job.init_instance() """ # init_instance is called for every new saga.job.Job object # that is created self.jd = job_info["job_description"] self.js = job_info["job_service"] if job_info['reconnect'] is True: self._id = job_info['reconnect_jobid'] self._name = self.jd.get(saga.job.NAME) self._started = True else: self._id = None self._name = self.jd.get(saga.job.NAME) self._started = False return self.get_api() # ---------------------------------------------------------------- # @SYNC_CALL def get_state(self): """ implements saga.adaptors.cpi.job.Job.get_state() """ if self._started is False: return saga.job.NEW return self.js._job_get_state(job_id=self._id) # ---------------------------------------------------------------- # @SYNC_CALL def wait(self, timeout): """ implements saga.adaptors.cpi.job.Job.wait() """ if self._started is False: log_error_and_raise("Can't wait for job that hasn't been started", saga.IncorrectState, self._logger) else: self.js._job_wait(job_id=self._id, timeout=timeout) # ---------------------------------------------------------------- # @SYNC_CALL def cancel(self, timeout): """ implements saga.adaptors.cpi.job.Job.cancel() """ if self._started is False: log_error_and_raise("Can't wait for job that hasn't been started", saga.IncorrectState, self._logger) else: self.js._job_cancel(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def run(self): """ implements saga.adaptors.cpi.job.Job.run() """ self._id = self.js._job_run(self._api()) self._started = True # ---------------------------------------------------------------- # @SYNC_CALL def get_service_url(self): """ implements saga.adaptors.cpi.job.Job.get_service_url() """ return self.js.rm # ---------------------------------------------------------------- # @SYNC_CALL def get_id(self): """ implements saga.adaptors.cpi.job.Job.get_id() """ return self._id # ---------------------------------------------------------------- # @SYNC_CALL def get_name (self): """ Implements saga.adaptors.cpi.job.Job.get_name() """ return self._name # ---------------------------------------------------------------- # @SYNC_CALL def get_exit_code(self): """ implements saga.adaptors.cpi.job.Job.get_exit_code() """ if self._started is False: return None else: return self.js._job_get_exit_code(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def get_created(self): """ implements saga.adaptors.cpi.job.Job.get_created() """ if self._started is False: return None else: return self.js._job_get_create_time(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def get_started(self): """ implements saga.adaptors.cpi.job.Job.get_started() """ if self._started is False: return None else: return self.js._job_get_start_time(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def get_finished(self): """ implements saga.adaptors.cpi.job.Job.get_finished() """ if self._started is False: return None else: return self.js._job_get_end_time(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def get_execution_hosts(self): """ implements saga.adaptors.cpi.job.Job.get_execution_hosts() """ if self._started is False: return None else: return self.js._job_get_execution_hosts(self._id) # ---------------------------------------------------------------- # @SYNC_CALL def get_description(self): """ implements saga.adaptors.cpi.job.Job.get_execution_hosts() """ return self.jd
{ "repo_name": "telamonian/saga-python", "path": "src/saga/adaptors/pbspro/pbsprojob.py", "copies": "1", "size": "48804", "license": "mit", "hash": 1279092145485413400, "line_mean": 36.2549618321, "line_max": 192, "alpha_frac": 0.4738546021, "autogenerated": false, "ratio": 3.941209723007349, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4915064325107349, "avg_score": null, "num_lines": null }
__author__ = "Andre Merzky, Ole Weidner" __copyright__ = "Copyright 2012-2013, The SAGA Project" __license__ = "MIT" # -*- coding: utf-8 -*- # # SAGA documentation build configuration file, created by # sphinx-quickstart on Mon Dec 3 21:55:42 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) print sys.path # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] [extensions] todo_include_todos=True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' rst_epilog = """ .. |param_ttype| replace:: Type of operation. Default (None) is synchronous. .. |rtype_ttype| replace:: :class:`saga.Task` if the operation is asynchronous. .. |not_implemented| replace:: CURRENTLY NOT IMPLEMENTED / SUPPORTED """ # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'SAGA' copyright = u'2012, The SAGA Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version - get this directly from SAGA ! import saga version = saga.version # The full version, including alpha/beta/rc tags. release = saga.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ["_themes"] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'default' html_theme = "armstrong" html_theme_path = ["_themes", ] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "collapsiblesidebar" : "true", } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = 'images/logo.jpg' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'SAGAdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'SAGA.tex', u'SAGA-Python Documentation', u'The SAGA Project', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = 'images/logo.jpg' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. latex_show_pagerefs = True # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'saga', u'SAGA-Python Documentation', [u'The SAGA Project'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'SAGA', u'SAGA-Python Documentation', u'The SAGA Project', 'SAGA', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None} autodoc_member_order = 'bysource' autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
{ "repo_name": "mehdisadeghi/saga-python", "path": "docs/source/conf.py", "copies": "2", "size": "8775", "license": "mit", "hash": 3204425639367200000, "line_mean": 31.1428571429, "line_max": 215, "alpha_frac": 0.704957265, "autogenerated": false, "ratio": 3.666945256999582, "config_test": true, "has_no_keywords": false, "few_assignments": false, "quality_score": 1, "avg_score": 0.011845166863116116, "num_lines": 273 }
__author__ = 'Andre' import codecs import time import logging import sys import os from bs4 import BeautifulSoup import progressbar as pb sys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..')) from text.corpus import Corpus from text.document import Document from text.sentence import Sentence class AIMedCorpus(Corpus): def __init__(self, corpusdir, **kwargs): super(AIMedCorpus, self).__init__(corpusdir, **kwargs) def load_corpus(self, corenlpserver, process=True): trainfiles = [self.path + '/' + f for f in os.listdir(self.path)] total = len(trainfiles) widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.AdaptiveETA(), ' ', pb.Timer()] pbar = pb.ProgressBar(widgets=widgets, maxval=total, redirect_stdout=True).start() time_per_abs = [] for current, f in enumerate(trainfiles): #logging.debug('%s:%s/%s', f, current + 1, total) print '{}:{}/{}'.format(f, current + 1, total) did = f t = time.time() with open(f, 'r') as f: article = "<Article>" + f.read() + "</Article>" soup = BeautifulSoup(article, 'xml') #doc = soup.find_all("article") title = soup.ArticleTitle.get_text() abstract = soup.AbstractText.get_text() doc_text = title + " " + abstract newdoc = Document(doc_text, process=False, did=did) newdoc.sentence_tokenize("biomedical") newdoc.process_document(corenlpserver, "biomedical") #logging.info(len(newdoc.sentences)) self.documents[newdoc.did] = newdoc abs_time = time.time() - t time_per_abs.append(abs_time) logging.debug("%s sentences, %ss processing time" % (len(newdoc.sentences), abs_time)) pbar.update(current) pbar.finish() abs_avg = sum(time_per_abs)*1.0/len(time_per_abs) logging.info("average time per abstract: %ss" % abs_avg) def load_annotations(self, ann_dir, etype, ptype): trainfiles = [ann_dir + '/' + f for f in os.listdir(self.path)] total = len(trainfiles) widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.Timer()] pbar = pb.ProgressBar(widgets=widgets, maxval=total, redirect_stdout=True).start() time_per_abs = [] for current, f in enumerate(trainfiles): # logging.debug('%s:%s/%s', f, current + 1, total) print '{}:{}/{}'.format(f, current + 1, total) did = f with open(f, 'r') as f: article = "<Article>" + f.read() + "</Article>" soup = BeautifulSoup(article, 'xml') title = soup.ArticleTitle abstract = soup.AbstractText title_text = title.get_text() abstract_text = abstract.get_text() abs_offset = len(title.get_text()) + 1 title_entities = title.find_all("prot", recursive=False) abs_entities = abstract.find_all("prot", recursive=False) lastindex = 0 for ei, e in enumerate(title_entities): estart = title_text.find(e.text, lastindex) eend = estart + len(e.text) etext = title_text[estart:eend] #print etext, estart, eend, self.documents[did].text this_sentence = self.documents[did].find_sentence_containing(estart, eend, chemdner=False) eid = this_sentence.tag_entity(estart, eend, "protein", text=e.text) if eid is None: print "did not add this entity: {}".format(e.text) # print e.text lastindex = estart lastindex = 0 for ei, e in enumerate(abs_entities): estart = abstract_text.find(e.text, lastindex) eend = estart + len(e.text) etext = self.documents[did].text[estart:eend] # logging.info("{} - {}".format(lastindex, e.text)) #logging.info(estart) #logging.info("{} + {} {}: {}-{}: {}".format(abstract_text.find(e.text, lastindex), abs_offset, e.text, estart, # eend, "-".join([str(s.offset) for s in self.documents[did].sentences]))) #logging.info(abstract_text) this_sentence = self.documents[did].find_sentence_containing(estart + abs_offset, eend + abs_offset, chemdner=False) eid = this_sentence.tag_entity(estart + abs_offset - this_sentence.offset , eend + abs_offset - this_sentence.offset, "protein", text=e.text) if eid is None: print "did not add this entity: {}".format(e.text) # print e.text lastindex = estart #for s in all_entities: # print s, len(all_entities[s]) def get_genia_gold_ann_set(goldann, etype): gold_offsets = set() soup = BeautifulSoup(codecs.open(goldann, 'r', "utf-8"), 'html.parser') docs = soup.find_all("article") all_entities = {} for doc in docs: did = "GENIA" + doc.articleinfo.bibliomisc.text.split(":")[1] title = doc.title.sentence.get_text() doc_text = title + " " doc_offset = 0 sentences = doc.abstract.find_all("sentence") for si, s in enumerate(sentences): stext = s.get_text() sentities = s.find_all("cons", recursive=False) lastindex = 0 for ei, e in enumerate(sentities): estart = stext.find(e.text, lastindex) + doc_offset # relative to document eend = estart + len(e.text) sem = e.get("sem") if sem.startswith("("): #TODO: Deal with overlapping entities continue entity_type = sem.split("_")[0] if etype == "all" or type_match.get(entity_type) == etype: gold_offsets.add((did, estart, eend, e.text)) # etext = doc_text[estart:eend] # logging.info("gold annotation: {}".format(e.text)) doc_text += stext + " " doc_offset = len(doc_text) return gold_offsets, None def main(): logging.basicConfig(level=logging.DEBUG) c = GeniaCorpus(sys.argv[1]) c.load_corpus("") c.load_annotations(sys.argv[1]) if __name__ == "__main__": main()
{ "repo_name": "AndreLamurias/IBEnt", "path": "src/reader/aimed_corpus.py", "copies": "1", "size": "6527", "license": "mit", "hash": 873119241827266700, "line_mean": 44.6503496503, "line_max": 133, "alpha_frac": 0.543741382, "autogenerated": false, "ratio": 3.7212086659064996, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.47649500479065, "avg_score": null, "num_lines": null }
__author__ = 'andre' from cv.cv import CVUtil from ocr.functions import call_tesseract from patterns import Validator, file_get_contents, clean_dir from os import listdir from os.path import isfile, join, splitext import argparse import json def main(): parser = argparse.ArgumentParser(description='Computer Visual module.') parser.add_argument('-f', '--file', metavar='image', type=str, required=True, help='Input file') parser.add_argument('-o', '--out', metavar='directory', type=str, required=True, help='Valid output directory') args = parser.parse_args() path = args.out clean_dir(path) cvu = CVUtil(args.file, path, False) cvu.optimized_close() cvu.cutter() onlyfiles = [f for f in listdir(path) if isfile(join(path, f))] val = Validator() result = {'cnpj': None, 'data': None, 'coo': None, 'total': None} for fname in onlyfiles: (name, extension) = splitext(fname) if extension == ".png": call_tesseract(join(path, name), extension) content = file_get_contents(join(path, name+".txt")) for key, value in val.validate(content): result[key] = value print json.dumps(result) if __name__ == "__main__": main()
{ "repo_name": "ocr-doacao/cvocr", "path": "cvocr.py", "copies": "1", "size": "1248", "license": "apache-2.0", "hash": 4022691744927606000, "line_mean": 32.7567567568, "line_max": 115, "alpha_frac": 0.6426282051, "autogenerated": false, "ratio": 3.565714285714286, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9676059781337996, "avg_score": 0.006456541895258181, "num_lines": 37 }
__author__ = 'andre' from datetime import datetime def bin_search(lst, value): if len(lst) == 0: return 0 if value > lst[len(lst)-1][0]: return len(lst) l = 0 r = len(lst) m = r/2 while r-l > 1: if lst[m][0] > value: r = m else: l = m m = (r+l)/2 return m def bin_add(lst, cell): pos = bin_search(lst, cell[0]) if pos == 0: cell.append(cell[1]) else: cell.append(lst[pos-1][2]+cell[1]) lst.insert(pos, cell) for i in range(pos+1, len(lst)): lst[i][2] = lst[i-1][2] + lst[i][1] def main(): n = int(raw_input()) sums = [] for i in range(n): (credit, day, hour) = raw_input().split() credit = long(credit) (day, month) = map(int, day.split('.')) (hour, minute) = map(int, hour.split(':')) dt = datetime(year=2042, day=day, month=month, hour=hour, minute=minute) bin_add(sums, [dt, credit]) divida = 0 for cr in sums[::-1]: if cr[1] < 0: divida = cr[2] break print divida if divida < 0 else 0 if __name__ == "__main__": main()
{ "repo_name": "andredalton/bcc", "path": "2015/MAC0327/Desafios 1/p18.py", "copies": "2", "size": "1198", "license": "apache-2.0", "hash": -2750056946407635000, "line_mean": 22.4901960784, "line_max": 80, "alpha_frac": 0.4716193656, "autogenerated": false, "ratio": 3.0100502512562812, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.9449452408985478, "avg_score": 0.006443441574160528, "num_lines": 51 }
__author__ = 'andre' import cv2 import numpy as np from matplotlib.pyplot import imshow, show def adaptive_threshold(image_gray, blur=True, verbose=False): if verbose: print "Thresholding" if blur: img = cv2.medianBlur(image_gray, 3) img = cv2.fastNlMeansDenoising(img, None, 10, 7, 21) return cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) def angle(image_gray, nlines, alpha, verbose=False): if verbose: print "Get angle" width = image_gray.shape[1] edges = cv2.Canny(image_gray, 50, 150, apertureSize=3) angle = None begin = 0 end = width while angle is None: middle = (begin + end)/2 lst_angle = [] lines = cv2.HoughLines(edges, 1, np.pi/180, middle) try: for rho, theta in lines[0]: theta = 180*theta/np.pi if theta < alpha: lst_angle.append(theta) elif theta > 180 - alpha: lst_angle.append(-(180 - theta)) if len(lst_angle) == nlines or end - begin == 1: angle = np.average(lst_angle) elif len(lst_angle) < nlines: end = middle elif len(lst_angle) > nlines: begin = middle except TypeError: end = middle return angle def matrix_rotation(width, height, alpha): return cv2.getRotationMatrix2D((width/2, height/2), alpha, 1) def rotate(alpha, image, verbose=False): if verbose: print "Rotate image" height = image.shape[0] width = image.shape[1] mtz_rotation = matrix_rotation(width, height, alpha) return cv2.warpAffine(image, mtz_rotation, (width, height)) def horizontal_close(image_bin, lenght=None, verbose=False): height, width = image_bin.shape if verbose: print "Making close" if lenght is None: lenght = width/30 cv2.bitwise_not(image_bin, image_bin) kernel = np.ones((1, int(lenght)), np.uint8) image_bin = cv2.morphologyEx(image_bin, cv2.MORPH_CLOSE, kernel) cv2.bitwise_not(image_bin, image_bin) return image_bin def crop(up_left, down_right, image): crop = image[up_left[1]:down_right[1], up_left[0]:down_right[0]] return cv2.copyMakeBorder(crop, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value=[255, 255, 255]) def pop_up(image): imshow(image,) show()
{ "repo_name": "ocr-doacao/cvocr", "path": "cv/util.py", "copies": "1", "size": "2423", "license": "apache-2.0", "hash": 7112405968890914000, "line_mean": 32.2054794521, "line_max": 100, "alpha_frac": 0.6050350805, "autogenerated": false, "ratio": 3.2611036339165547, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.4366138714416554, "avg_score": null, "num_lines": null }
import random import math list = ["Hola", "no", "estoy", "aqui", "Javi", "assca"] # Inplace shuffle def shuffle(list): for index in range(0, len(list)): new_index = random.randint(0, len(list) - 1) var = list[index] list[index] = list[new_index] list[new_index] = var return list # Stract form list shuffle def shuffle2(list): new_list = [] while len(list) > 0: index = random.randint(0, len(list) - 1) new_list.append(list[index]) list.pop(index) list = new_list return list # Count ocurrance of each combination dic = {} n = 10000 for i in range(0,n): list = shuffle(list) if str(list) in dic.keys(): dic[str(list)] += 1 else: dic[str(list)] = 1 # Calculate the mean sum = 0 for combination in dic.keys(): sum += dic[combination] print (dic[combination]) mean = 1.0*sum/len(dic.keys()) print ("Mean:") print (mean) # Calculate Standard deviation coeficient = 0 for combination in dic.keys(): coeficient += (mean - dic[combination]) * (mean - dic[combination]) standard_deviation = math.sqrt(coeficient/n) print (standard_deviation)
{ "repo_name": "asix7/RandomScripts", "path": "shuffle.py", "copies": "1", "size": "1165", "license": "mit", "hash": -7808118374284280000, "line_mean": 20.1818181818, "line_max": 69, "alpha_frac": 0.6763948498, "autogenerated": false, "ratio": 2.7411764705882353, "config_test": false, "has_no_keywords": false, "few_assignments": false, "quality_score": 0.8591995418039752, "avg_score": 0.06511518046969662, "num_lines": 55 }